]> Git Repo - linux.git/blame - drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c
Merge branch 'x86-urgent-for-linus' of git://git.kernel.org/pub/scm/linux/kernel...
[linux.git] / drivers / gpu / drm / amd / powerplay / hwmgr / vega10_hwmgr.c
CommitLineData
f83a9991
EH
1/*
2 * Copyright 2016 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 */
f90dee20
MY
23
24#include <linux/delay.h>
25#include <linux/fb.h>
f83a9991
EH
26#include <linux/module.h>
27#include <linux/slab.h>
f83a9991
EH
28
29#include "hwmgr.h"
30#include "amd_powerplay.h"
f83a9991
EH
31#include "hardwaremanager.h"
32#include "ppatomfwctrl.h"
33#include "atomfirmware.h"
34#include "cgs_common.h"
35#include "vega10_powertune.h"
36#include "smu9.h"
37#include "smu9_driver_if.h"
38#include "vega10_inc.h"
b8a55591 39#include "soc15_common.h"
f83a9991
EH
40#include "pppcielanes.h"
41#include "vega10_hwmgr.h"
0b2c0a12 42#include "vega10_smumgr.h"
f83a9991
EH
43#include "vega10_processpptables.h"
44#include "vega10_pptable.h"
45#include "vega10_thermal.h"
46#include "pp_debug.h"
f83a9991 47#include "amd_pcie_helpers.h"
f83a9991 48#include "ppinterrupt.h"
ab5cf3a5 49#include "pp_overdriver.h"
0a91ee07 50#include "pp_thermal.h"
f83a9991 51
59655cb6
RZ
52#include "smuio/smuio_9_0_offset.h"
53#include "smuio/smuio_9_0_sh_mask.h"
54
f83a9991
EH
55#define HBM_MEMORY_CHANNEL_WIDTH 128
56
30b58a24 57static const uint32_t channel_number[] = {1, 2, 0, 4, 0, 8, 0, 16, 2};
f83a9991 58
f83a9991
EH
59#define mmDF_CS_AON0_DramBaseAddress0 0x0044
60#define mmDF_CS_AON0_DramBaseAddress0_BASE_IDX 0
61
62//DF_CS_AON0_DramBaseAddress0
63#define DF_CS_AON0_DramBaseAddress0__AddrRngVal__SHIFT 0x0
64#define DF_CS_AON0_DramBaseAddress0__LgcyMmioHoleEn__SHIFT 0x1
65#define DF_CS_AON0_DramBaseAddress0__IntLvNumChan__SHIFT 0x4
66#define DF_CS_AON0_DramBaseAddress0__IntLvAddrSel__SHIFT 0x8
67#define DF_CS_AON0_DramBaseAddress0__DramBaseAddr__SHIFT 0xc
68#define DF_CS_AON0_DramBaseAddress0__AddrRngVal_MASK 0x00000001L
69#define DF_CS_AON0_DramBaseAddress0__LgcyMmioHoleEn_MASK 0x00000002L
70#define DF_CS_AON0_DramBaseAddress0__IntLvNumChan_MASK 0x000000F0L
71#define DF_CS_AON0_DramBaseAddress0__IntLvAddrSel_MASK 0x00000700L
72#define DF_CS_AON0_DramBaseAddress0__DramBaseAddr_MASK 0xFFFFF000L
73
f87c379e 74static const ULONG PhwVega10_Magic = (ULONG)(PHM_VIslands_Magic);
f83a9991
EH
75
76struct vega10_power_state *cast_phw_vega10_power_state(
77 struct pp_hw_power_state *hw_ps)
78{
79 PP_ASSERT_WITH_CODE((PhwVega10_Magic == hw_ps->magic),
80 "Invalid Powerstate Type!",
81 return NULL;);
82
83 return (struct vega10_power_state *)hw_ps;
84}
85
86const struct vega10_power_state *cast_const_phw_vega10_power_state(
87 const struct pp_hw_power_state *hw_ps)
88{
89 PP_ASSERT_WITH_CODE((PhwVega10_Magic == hw_ps->magic),
90 "Invalid Powerstate Type!",
91 return NULL;);
92
93 return (const struct vega10_power_state *)hw_ps;
94}
95
96static void vega10_set_default_registry_data(struct pp_hwmgr *hwmgr)
97{
690dc626 98 struct vega10_hwmgr *data = hwmgr->backend;
f83a9991
EH
99
100 data->registry_data.sclk_dpm_key_disabled =
101 hwmgr->feature_mask & PP_SCLK_DPM_MASK ? false : true;
102 data->registry_data.socclk_dpm_key_disabled =
103 hwmgr->feature_mask & PP_SOCCLK_DPM_MASK ? false : true;
104 data->registry_data.mclk_dpm_key_disabled =
105 hwmgr->feature_mask & PP_MCLK_DPM_MASK ? false : true;
2d5f5f94
RZ
106 data->registry_data.pcie_dpm_key_disabled =
107 hwmgr->feature_mask & PP_PCIE_DPM_MASK ? false : true;
f83a9991
EH
108
109 data->registry_data.dcefclk_dpm_key_disabled =
110 hwmgr->feature_mask & PP_DCEFCLK_DPM_MASK ? false : true;
111
112 if (hwmgr->feature_mask & PP_POWER_CONTAINMENT_MASK) {
113 data->registry_data.power_containment_support = 1;
114 data->registry_data.enable_pkg_pwr_tracking_feature = 1;
115 data->registry_data.enable_tdc_limit_feature = 1;
116 }
117
afc0255c 118 data->registry_data.clock_stretcher_support =
117a48a7 119 hwmgr->feature_mask & PP_CLOCK_STRETCH_MASK ? true : false;
afc0255c 120
4022e4f2
RZ
121 data->registry_data.ulv_support =
122 hwmgr->feature_mask & PP_ULV_MASK ? true : false;
123
df057e02
RZ
124 data->registry_data.sclk_deep_sleep_support =
125 hwmgr->feature_mask & PP_SCLK_DEEP_SLEEP_MASK ? true : false;
afc0255c 126
f83a9991
EH
127 data->registry_data.disable_water_mark = 0;
128
129 data->registry_data.fan_control_support = 1;
130 data->registry_data.thermal_support = 1;
131 data->registry_data.fw_ctf_enabled = 1;
132
a19c3bea
AD
133 data->registry_data.avfs_support =
134 hwmgr->feature_mask & PP_AVFS_MASK ? true : false;
f83a9991
EH
135 data->registry_data.led_dpm_enabled = 1;
136
137 data->registry_data.vr0hot_enabled = 1;
138 data->registry_data.vr1hot_enabled = 1;
139 data->registry_data.regulator_hot_gpio_support = 1;
140
9b7b8154
EQ
141 data->registry_data.didt_support = 1;
142 if (data->registry_data.didt_support) {
143 data->registry_data.didt_mode = 6;
144 data->registry_data.sq_ramping_support = 1;
145 data->registry_data.db_ramping_support = 0;
146 data->registry_data.td_ramping_support = 0;
147 data->registry_data.tcp_ramping_support = 0;
148 data->registry_data.dbr_ramping_support = 0;
149 data->registry_data.edc_didt_support = 1;
150 data->registry_data.gc_didt_support = 0;
151 data->registry_data.psm_didt_support = 0;
152 }
153
f83a9991
EH
154 data->display_voltage_mode = PPVEGA10_VEGA10DISPLAYVOLTAGEMODE_DFLT;
155 data->dcef_clk_quad_eqn_a = PPREGKEY_VEGA10QUADRATICEQUATION_DFLT;
156 data->dcef_clk_quad_eqn_b = PPREGKEY_VEGA10QUADRATICEQUATION_DFLT;
157 data->dcef_clk_quad_eqn_c = PPREGKEY_VEGA10QUADRATICEQUATION_DFLT;
158 data->disp_clk_quad_eqn_a = PPREGKEY_VEGA10QUADRATICEQUATION_DFLT;
159 data->disp_clk_quad_eqn_b = PPREGKEY_VEGA10QUADRATICEQUATION_DFLT;
160 data->disp_clk_quad_eqn_c = PPREGKEY_VEGA10QUADRATICEQUATION_DFLT;
161 data->pixel_clk_quad_eqn_a = PPREGKEY_VEGA10QUADRATICEQUATION_DFLT;
162 data->pixel_clk_quad_eqn_b = PPREGKEY_VEGA10QUADRATICEQUATION_DFLT;
163 data->pixel_clk_quad_eqn_c = PPREGKEY_VEGA10QUADRATICEQUATION_DFLT;
164 data->phy_clk_quad_eqn_a = PPREGKEY_VEGA10QUADRATICEQUATION_DFLT;
165 data->phy_clk_quad_eqn_b = PPREGKEY_VEGA10QUADRATICEQUATION_DFLT;
166 data->phy_clk_quad_eqn_c = PPREGKEY_VEGA10QUADRATICEQUATION_DFLT;
167
168 data->gfxclk_average_alpha = PPVEGA10_VEGA10GFXCLKAVERAGEALPHA_DFLT;
169 data->socclk_average_alpha = PPVEGA10_VEGA10SOCCLKAVERAGEALPHA_DFLT;
170 data->uclk_average_alpha = PPVEGA10_VEGA10UCLKCLKAVERAGEALPHA_DFLT;
171 data->gfx_activity_average_alpha = PPVEGA10_VEGA10GFXACTIVITYAVERAGEALPHA_DFLT;
172}
173
174static int vega10_set_features_platform_caps(struct pp_hwmgr *hwmgr)
175{
690dc626 176 struct vega10_hwmgr *data = hwmgr->backend;
f83a9991
EH
177 struct phm_ppt_v2_information *table_info =
178 (struct phm_ppt_v2_information *)hwmgr->pptable;
ada6770e 179 struct amdgpu_device *adev = hwmgr->adev;
f83a9991
EH
180
181 phm_cap_set(hwmgr->platform_descriptor.platformCaps,
182 PHM_PlatformCaps_SclkDeepSleep);
183
184 phm_cap_set(hwmgr->platform_descriptor.platformCaps,
185 PHM_PlatformCaps_DynamicPatchPowerState);
186
187 if (data->vddci_control == VEGA10_VOLTAGE_CONTROL_NONE)
188 phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
189 PHM_PlatformCaps_ControlVDDCI);
190
f83a9991
EH
191 phm_cap_set(hwmgr->platform_descriptor.platformCaps,
192 PHM_PlatformCaps_EnableSMU7ThermalManagement);
193
ada6770e 194 if (adev->pg_flags & AMD_PG_SUPPORT_UVD)
f83a9991
EH
195 phm_cap_set(hwmgr->platform_descriptor.platformCaps,
196 PHM_PlatformCaps_UVDPowerGating);
197
ada6770e 198 if (adev->pg_flags & AMD_PG_SUPPORT_VCE)
f83a9991
EH
199 phm_cap_set(hwmgr->platform_descriptor.platformCaps,
200 PHM_PlatformCaps_VCEPowerGating);
201
202 phm_cap_set(hwmgr->platform_descriptor.platformCaps,
203 PHM_PlatformCaps_UnTabledHardwareInterface);
204
205 phm_cap_set(hwmgr->platform_descriptor.platformCaps,
206 PHM_PlatformCaps_FanSpeedInTableIsRPM);
207
208 phm_cap_set(hwmgr->platform_descriptor.platformCaps,
209 PHM_PlatformCaps_ODFuzzyFanControlSupport);
210
211 phm_cap_set(hwmgr->platform_descriptor.platformCaps,
212 PHM_PlatformCaps_DynamicPowerManagement);
213
214 phm_cap_set(hwmgr->platform_descriptor.platformCaps,
215 PHM_PlatformCaps_SMC);
216
217 /* power tune caps */
218 /* assume disabled */
219 phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
220 PHM_PlatformCaps_PowerContainment);
9b7b8154
EQ
221 phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
222 PHM_PlatformCaps_DiDtSupport);
f83a9991
EH
223 phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
224 PHM_PlatformCaps_SQRamping);
225 phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
226 PHM_PlatformCaps_DBRamping);
227 phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
228 PHM_PlatformCaps_TDRamping);
229 phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
230 PHM_PlatformCaps_TCPRamping);
9b7b8154
EQ
231 phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
232 PHM_PlatformCaps_DBRRamping);
233 phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
234 PHM_PlatformCaps_DiDtEDCEnable);
235 phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
236 PHM_PlatformCaps_GCEDC);
237 phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
238 PHM_PlatformCaps_PSM);
239
240 if (data->registry_data.didt_support) {
241 phm_cap_set(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_DiDtSupport);
242 if (data->registry_data.sq_ramping_support)
243 phm_cap_set(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_SQRamping);
244 if (data->registry_data.db_ramping_support)
245 phm_cap_set(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_DBRamping);
246 if (data->registry_data.td_ramping_support)
247 phm_cap_set(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_TDRamping);
248 if (data->registry_data.tcp_ramping_support)
249 phm_cap_set(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_TCPRamping);
250 if (data->registry_data.dbr_ramping_support)
251 phm_cap_set(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_DBRRamping);
252 if (data->registry_data.edc_didt_support)
253 phm_cap_set(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_DiDtEDCEnable);
254 if (data->registry_data.gc_didt_support)
255 phm_cap_set(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_GCEDC);
256 if (data->registry_data.psm_didt_support)
257 phm_cap_set(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_PSM);
258 }
f83a9991
EH
259
260 if (data->registry_data.power_containment_support)
261 phm_cap_set(hwmgr->platform_descriptor.platformCaps,
262 PHM_PlatformCaps_PowerContainment);
263 phm_cap_set(hwmgr->platform_descriptor.platformCaps,
264 PHM_PlatformCaps_CAC);
265
266 if (table_info->tdp_table->usClockStretchAmount &&
267 data->registry_data.clock_stretcher_support)
268 phm_cap_set(hwmgr->platform_descriptor.platformCaps,
269 PHM_PlatformCaps_ClockStretcher);
270
271 phm_cap_set(hwmgr->platform_descriptor.platformCaps,
272 PHM_PlatformCaps_RegulatorHot);
273 phm_cap_set(hwmgr->platform_descriptor.platformCaps,
274 PHM_PlatformCaps_AutomaticDCTransition);
275
276 phm_cap_set(hwmgr->platform_descriptor.platformCaps,
277 PHM_PlatformCaps_UVDDPM);
278 phm_cap_set(hwmgr->platform_descriptor.platformCaps,
279 PHM_PlatformCaps_VCEDPM);
280
281 return 0;
282}
283
c5a44849
RZ
284static int vega10_odn_initial_default_setting(struct pp_hwmgr *hwmgr)
285{
286 struct vega10_hwmgr *data = hwmgr->backend;
287 struct phm_ppt_v2_information *table_info =
288 (struct phm_ppt_v2_information *)(hwmgr->pptable);
289 struct vega10_odn_dpm_table *odn_table = &(data->odn_dpm_table);
290 struct vega10_odn_vddc_lookup_table *od_lookup_table;
291 struct phm_ppt_v1_voltage_lookup_table *vddc_lookup_table;
292 struct phm_ppt_v1_clock_voltage_dependency_table *dep_table[3];
293 struct phm_ppt_v1_clock_voltage_dependency_table *od_table[3];
88de542e 294 struct pp_atomfwctrl_avfs_parameters avfs_params = {0};
c5a44849 295 uint32_t i;
88de542e
RZ
296 int result;
297
298 result = pp_atomfwctrl_get_avfs_information(hwmgr, &avfs_params);
299 if (!result) {
300 data->odn_dpm_table.max_vddc = avfs_params.ulMaxVddc;
301 data->odn_dpm_table.min_vddc = avfs_params.ulMinVddc;
302 }
c5a44849
RZ
303
304 od_lookup_table = &odn_table->vddc_lookup_table;
305 vddc_lookup_table = table_info->vddc_lookup_table;
306
307 for (i = 0; i < vddc_lookup_table->count; i++)
308 od_lookup_table->entries[i].us_vdd = vddc_lookup_table->entries[i].us_vdd;
309
310 od_lookup_table->count = vddc_lookup_table->count;
311
312 dep_table[0] = table_info->vdd_dep_on_sclk;
313 dep_table[1] = table_info->vdd_dep_on_mclk;
314 dep_table[2] = table_info->vdd_dep_on_socclk;
315 od_table[0] = (struct phm_ppt_v1_clock_voltage_dependency_table *)&odn_table->vdd_dep_on_sclk;
316 od_table[1] = (struct phm_ppt_v1_clock_voltage_dependency_table *)&odn_table->vdd_dep_on_mclk;
317 od_table[2] = (struct phm_ppt_v1_clock_voltage_dependency_table *)&odn_table->vdd_dep_on_socclk;
318
319 for (i = 0; i < 3; i++)
320 smu_get_voltage_dependency_table_ppt_v1(dep_table[i], od_table[i]);
321
322 if (odn_table->max_vddc == 0 || odn_table->max_vddc > 2000)
323 odn_table->max_vddc = dep_table[0]->entries[dep_table[0]->count - 1].vddc;
324 if (odn_table->min_vddc == 0 || odn_table->min_vddc > 2000)
325 odn_table->min_vddc = dep_table[0]->entries[0].vddc;
326
327 i = od_table[2]->count - 1;
f8a5de44
RZ
328 od_table[2]->entries[i].clk = hwmgr->platform_descriptor.overdriveLimit.memoryClock > od_table[2]->entries[i].clk ?
329 hwmgr->platform_descriptor.overdriveLimit.memoryClock :
330 od_table[2]->entries[i].clk;
331 od_table[2]->entries[i].vddc = odn_table->max_vddc > od_table[2]->entries[i].vddc ?
332 odn_table->max_vddc :
333 od_table[2]->entries[i].vddc;
c5a44849
RZ
334
335 return 0;
336}
337
f83a9991
EH
338static void vega10_init_dpm_defaults(struct pp_hwmgr *hwmgr)
339{
690dc626 340 struct vega10_hwmgr *data = hwmgr->backend;
f83a9991 341 int i;
15826fbf
RZ
342 uint32_t sub_vendor_id, hw_revision;
343 struct amdgpu_device *adev = hwmgr->adev;
f83a9991
EH
344
345 vega10_initialize_power_tune_defaults(hwmgr);
346
347 for (i = 0; i < GNLD_FEATURES_MAX; i++) {
348 data->smu_features[i].smu_feature_id = 0xffff;
349 data->smu_features[i].smu_feature_bitmap = 1 << i;
350 data->smu_features[i].enabled = false;
351 data->smu_features[i].supported = false;
352 }
353
354 data->smu_features[GNLD_DPM_PREFETCHER].smu_feature_id =
355 FEATURE_DPM_PREFETCHER_BIT;
356 data->smu_features[GNLD_DPM_GFXCLK].smu_feature_id =
357 FEATURE_DPM_GFXCLK_BIT;
358 data->smu_features[GNLD_DPM_UCLK].smu_feature_id =
359 FEATURE_DPM_UCLK_BIT;
360 data->smu_features[GNLD_DPM_SOCCLK].smu_feature_id =
361 FEATURE_DPM_SOCCLK_BIT;
362 data->smu_features[GNLD_DPM_UVD].smu_feature_id =
363 FEATURE_DPM_UVD_BIT;
364 data->smu_features[GNLD_DPM_VCE].smu_feature_id =
365 FEATURE_DPM_VCE_BIT;
366 data->smu_features[GNLD_DPM_MP0CLK].smu_feature_id =
367 FEATURE_DPM_MP0CLK_BIT;
368 data->smu_features[GNLD_DPM_LINK].smu_feature_id =
369 FEATURE_DPM_LINK_BIT;
370 data->smu_features[GNLD_DPM_DCEFCLK].smu_feature_id =
371 FEATURE_DPM_DCEFCLK_BIT;
372 data->smu_features[GNLD_ULV].smu_feature_id =
373 FEATURE_ULV_BIT;
374 data->smu_features[GNLD_AVFS].smu_feature_id =
375 FEATURE_AVFS_BIT;
376 data->smu_features[GNLD_DS_GFXCLK].smu_feature_id =
377 FEATURE_DS_GFXCLK_BIT;
378 data->smu_features[GNLD_DS_SOCCLK].smu_feature_id =
379 FEATURE_DS_SOCCLK_BIT;
380 data->smu_features[GNLD_DS_LCLK].smu_feature_id =
381 FEATURE_DS_LCLK_BIT;
382 data->smu_features[GNLD_PPT].smu_feature_id =
383 FEATURE_PPT_BIT;
384 data->smu_features[GNLD_TDC].smu_feature_id =
385 FEATURE_TDC_BIT;
386 data->smu_features[GNLD_THERMAL].smu_feature_id =
387 FEATURE_THERMAL_BIT;
388 data->smu_features[GNLD_GFX_PER_CU_CG].smu_feature_id =
389 FEATURE_GFX_PER_CU_CG_BIT;
390 data->smu_features[GNLD_RM].smu_feature_id =
391 FEATURE_RM_BIT;
392 data->smu_features[GNLD_DS_DCEFCLK].smu_feature_id =
393 FEATURE_DS_DCEFCLK_BIT;
394 data->smu_features[GNLD_ACDC].smu_feature_id =
395 FEATURE_ACDC_BIT;
396 data->smu_features[GNLD_VR0HOT].smu_feature_id =
397 FEATURE_VR0HOT_BIT;
398 data->smu_features[GNLD_VR1HOT].smu_feature_id =
399 FEATURE_VR1HOT_BIT;
400 data->smu_features[GNLD_FW_CTF].smu_feature_id =
401 FEATURE_FW_CTF_BIT;
402 data->smu_features[GNLD_LED_DISPLAY].smu_feature_id =
403 FEATURE_LED_DISPLAY_BIT;
404 data->smu_features[GNLD_FAN_CONTROL].smu_feature_id =
405 FEATURE_FAN_CONTROL_BIT;
bdb8cd10 406 data->smu_features[GNLD_ACG].smu_feature_id = FEATURE_ACG_BIT;
9b7b8154 407 data->smu_features[GNLD_DIDT].smu_feature_id = FEATURE_GFX_EDC_BIT;
15826fbf 408 data->smu_features[GNLD_PCC_LIMIT].smu_feature_id = FEATURE_PCC_LIMIT_CONTROL_BIT;
f83a9991
EH
409
410 if (!data->registry_data.prefetcher_dpm_key_disabled)
411 data->smu_features[GNLD_DPM_PREFETCHER].supported = true;
412
413 if (!data->registry_data.sclk_dpm_key_disabled)
414 data->smu_features[GNLD_DPM_GFXCLK].supported = true;
415
416 if (!data->registry_data.mclk_dpm_key_disabled)
417 data->smu_features[GNLD_DPM_UCLK].supported = true;
418
419 if (!data->registry_data.socclk_dpm_key_disabled)
420 data->smu_features[GNLD_DPM_SOCCLK].supported = true;
421
dd5a6fe2 422 if (PP_CAP(PHM_PlatformCaps_UVDDPM))
f83a9991
EH
423 data->smu_features[GNLD_DPM_UVD].supported = true;
424
dd5a6fe2 425 if (PP_CAP(PHM_PlatformCaps_VCEDPM))
f83a9991
EH
426 data->smu_features[GNLD_DPM_VCE].supported = true;
427
428 if (!data->registry_data.pcie_dpm_key_disabled)
429 data->smu_features[GNLD_DPM_LINK].supported = true;
430
431 if (!data->registry_data.dcefclk_dpm_key_disabled)
432 data->smu_features[GNLD_DPM_DCEFCLK].supported = true;
433
dd5a6fe2
TSD
434 if (PP_CAP(PHM_PlatformCaps_SclkDeepSleep) &&
435 data->registry_data.sclk_deep_sleep_support) {
f83a9991
EH
436 data->smu_features[GNLD_DS_GFXCLK].supported = true;
437 data->smu_features[GNLD_DS_SOCCLK].supported = true;
438 data->smu_features[GNLD_DS_LCLK].supported = true;
df057e02 439 data->smu_features[GNLD_DS_DCEFCLK].supported = true;
f83a9991
EH
440 }
441
442 if (data->registry_data.enable_pkg_pwr_tracking_feature)
443 data->smu_features[GNLD_PPT].supported = true;
444
445 if (data->registry_data.enable_tdc_limit_feature)
446 data->smu_features[GNLD_TDC].supported = true;
447
448 if (data->registry_data.thermal_support)
449 data->smu_features[GNLD_THERMAL].supported = true;
450
451 if (data->registry_data.fan_control_support)
452 data->smu_features[GNLD_FAN_CONTROL].supported = true;
453
454 if (data->registry_data.fw_ctf_enabled)
455 data->smu_features[GNLD_FW_CTF].supported = true;
456
457 if (data->registry_data.avfs_support)
458 data->smu_features[GNLD_AVFS].supported = true;
459
460 if (data->registry_data.led_dpm_enabled)
461 data->smu_features[GNLD_LED_DISPLAY].supported = true;
462
463 if (data->registry_data.vr1hot_enabled)
464 data->smu_features[GNLD_VR1HOT].supported = true;
465
466 if (data->registry_data.vr0hot_enabled)
467 data->smu_features[GNLD_VR0HOT].supported = true;
468
d3f8c0ab 469 smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetSmuVersion);
3f9ca14a 470 hwmgr->smu_version = smum_get_argument(hwmgr);
bdb8cd10 471 /* ACG firmware has major version 5 */
d100033b 472 if ((hwmgr->smu_version & 0xff000000) == 0x5000000)
bdb8cd10 473 data->smu_features[GNLD_ACG].supported = true;
9b7b8154
EQ
474 if (data->registry_data.didt_support)
475 data->smu_features[GNLD_DIDT].supported = true;
476
15826fbf
RZ
477 hw_revision = adev->pdev->revision;
478 sub_vendor_id = adev->pdev->subsystem_vendor;
479
480 if ((hwmgr->chip_id == 0x6862 ||
481 hwmgr->chip_id == 0x6861 ||
482 hwmgr->chip_id == 0x6868) &&
483 (hw_revision == 0) &&
484 (sub_vendor_id != 0x1002))
485 data->smu_features[GNLD_PCC_LIMIT].supported = true;
f83a9991
EH
486}
487
488#ifdef PPLIB_VEGA10_EVV_SUPPORT
489static int vega10_get_socclk_for_voltage_evv(struct pp_hwmgr *hwmgr,
490 phm_ppt_v1_voltage_lookup_table *lookup_table,
491 uint16_t virtual_voltage_id, int32_t *socclk)
492{
493 uint8_t entry_id;
494 uint8_t voltage_id;
495 struct phm_ppt_v2_information *table_info =
496 (struct phm_ppt_v2_information *)(hwmgr->pptable);
497
498 PP_ASSERT_WITH_CODE(lookup_table->count != 0,
499 "Lookup table is empty",
500 return -EINVAL);
501
502 /* search for leakage voltage ID 0xff01 ~ 0xff08 and sclk */
503 for (entry_id = 0; entry_id < table_info->vdd_dep_on_sclk->count; entry_id++) {
504 voltage_id = table_info->vdd_dep_on_socclk->entries[entry_id].vddInd;
505 if (lookup_table->entries[voltage_id].us_vdd == virtual_voltage_id)
506 break;
507 }
508
509 PP_ASSERT_WITH_CODE(entry_id < table_info->vdd_dep_on_socclk->count,
510 "Can't find requested voltage id in vdd_dep_on_socclk table!",
511 return -EINVAL);
512
513 *socclk = table_info->vdd_dep_on_socclk->entries[entry_id].clk;
514
515 return 0;
516}
517
518#define ATOM_VIRTUAL_VOLTAGE_ID0 0xff01
519/**
520* Get Leakage VDDC based on leakage ID.
521*
522* @param hwmgr the address of the powerplay hardware manager.
523* @return always 0.
524*/
525static int vega10_get_evv_voltages(struct pp_hwmgr *hwmgr)
526{
690dc626 527 struct vega10_hwmgr *data = hwmgr->backend;
f83a9991
EH
528 uint16_t vv_id;
529 uint32_t vddc = 0;
530 uint16_t i, j;
531 uint32_t sclk = 0;
532 struct phm_ppt_v2_information *table_info =
533 (struct phm_ppt_v2_information *)hwmgr->pptable;
534 struct phm_ppt_v1_clock_voltage_dependency_table *socclk_table =
535 table_info->vdd_dep_on_socclk;
536 int result;
537
538 for (i = 0; i < VEGA10_MAX_LEAKAGE_COUNT; i++) {
539 vv_id = ATOM_VIRTUAL_VOLTAGE_ID0 + i;
540
541 if (!vega10_get_socclk_for_voltage_evv(hwmgr,
542 table_info->vddc_lookup_table, vv_id, &sclk)) {
dd5a6fe2 543 if (PP_CAP(PHM_PlatformCaps_ClockStretcher)) {
f83a9991
EH
544 for (j = 1; j < socclk_table->count; j++) {
545 if (socclk_table->entries[j].clk == sclk &&
546 socclk_table->entries[j].cks_enable == 0) {
547 sclk += 5000;
548 break;
549 }
550 }
551 }
552
553 PP_ASSERT_WITH_CODE(!atomctrl_get_voltage_evv_on_sclk_ai(hwmgr,
554 VOLTAGE_TYPE_VDDC, sclk, vv_id, &vddc),
555 "Error retrieving EVV voltage value!",
556 continue);
557
558
559 /* need to make sure vddc is less than 2v or else, it could burn the ASIC. */
560 PP_ASSERT_WITH_CODE((vddc < 2000 && vddc != 0),
561 "Invalid VDDC value", result = -EINVAL;);
562
563 /* the voltage should not be zero nor equal to leakage ID */
564 if (vddc != 0 && vddc != vv_id) {
565 data->vddc_leakage.actual_voltage[data->vddc_leakage.count] = (uint16_t)(vddc/100);
566 data->vddc_leakage.leakage_id[data->vddc_leakage.count] = vv_id;
567 data->vddc_leakage.count++;
568 }
569 }
570 }
571
572 return 0;
573}
574
575/**
576 * Change virtual leakage voltage to actual value.
577 *
578 * @param hwmgr the address of the powerplay hardware manager.
579 * @param pointer to changing voltage
580 * @param pointer to leakage table
581 */
582static void vega10_patch_with_vdd_leakage(struct pp_hwmgr *hwmgr,
583 uint16_t *voltage, struct vega10_leakage_voltage *leakage_table)
584{
585 uint32_t index;
586
587 /* search for leakage voltage ID 0xff01 ~ 0xff08 */
588 for (index = 0; index < leakage_table->count; index++) {
589 /* if this voltage matches a leakage voltage ID */
590 /* patch with actual leakage voltage */
591 if (leakage_table->leakage_id[index] == *voltage) {
592 *voltage = leakage_table->actual_voltage[index];
593 break;
594 }
595 }
596
597 if (*voltage > ATOM_VIRTUAL_VOLTAGE_ID0)
4f42a2dd 598 pr_info("Voltage value looks like a Leakage ID but it's not patched\n");
f83a9991
EH
599}
600
601/**
602* Patch voltage lookup table by EVV leakages.
603*
604* @param hwmgr the address of the powerplay hardware manager.
605* @param pointer to voltage lookup table
606* @param pointer to leakage table
607* @return always 0
608*/
609static int vega10_patch_lookup_table_with_leakage(struct pp_hwmgr *hwmgr,
610 phm_ppt_v1_voltage_lookup_table *lookup_table,
611 struct vega10_leakage_voltage *leakage_table)
612{
613 uint32_t i;
614
615 for (i = 0; i < lookup_table->count; i++)
616 vega10_patch_with_vdd_leakage(hwmgr,
617 &lookup_table->entries[i].us_vdd, leakage_table);
618
619 return 0;
620}
621
622static int vega10_patch_clock_voltage_limits_with_vddc_leakage(
623 struct pp_hwmgr *hwmgr, struct vega10_leakage_voltage *leakage_table,
624 uint16_t *vddc)
625{
626 vega10_patch_with_vdd_leakage(hwmgr, (uint16_t *)vddc, leakage_table);
627
628 return 0;
629}
630#endif
631
632static int vega10_patch_voltage_dependency_tables_with_lookup_table(
633 struct pp_hwmgr *hwmgr)
634{
9a5487ef
TSD
635 uint8_t entry_id, voltage_id;
636 unsigned i;
f83a9991
EH
637 struct phm_ppt_v2_information *table_info =
638 (struct phm_ppt_v2_information *)(hwmgr->pptable);
f83a9991
EH
639 struct phm_ppt_v1_mm_clock_voltage_dependency_table *mm_table =
640 table_info->mm_dep_table;
9a5487ef
TSD
641 struct phm_ppt_v1_clock_voltage_dependency_table *mclk_table =
642 table_info->vdd_dep_on_mclk;
f83a9991 643
9a5487ef
TSD
644 for (i = 0; i < 6; i++) {
645 struct phm_ppt_v1_clock_voltage_dependency_table *vdt;
646 switch (i) {
647 case 0: vdt = table_info->vdd_dep_on_socclk; break;
648 case 1: vdt = table_info->vdd_dep_on_sclk; break;
649 case 2: vdt = table_info->vdd_dep_on_dcefclk; break;
650 case 3: vdt = table_info->vdd_dep_on_pixclk; break;
651 case 4: vdt = table_info->vdd_dep_on_dispclk; break;
652 case 5: vdt = table_info->vdd_dep_on_phyclk; break;
653 }
f83a9991 654
9a5487ef
TSD
655 for (entry_id = 0; entry_id < vdt->count; entry_id++) {
656 voltage_id = vdt->entries[entry_id].vddInd;
657 vdt->entries[entry_id].vddc =
658 table_info->vddc_lookup_table->entries[voltage_id].us_vdd;
659 }
f83a9991
EH
660 }
661
9a5487ef
TSD
662 for (entry_id = 0; entry_id < mm_table->count; ++entry_id) {
663 voltage_id = mm_table->entries[entry_id].vddcInd;
664 mm_table->entries[entry_id].vddc =
665 table_info->vddc_lookup_table->entries[voltage_id].us_vdd;
f83a9991
EH
666 }
667
668 for (entry_id = 0; entry_id < mclk_table->count; ++entry_id) {
669 voltage_id = mclk_table->entries[entry_id].vddInd;
670 mclk_table->entries[entry_id].vddc =
671 table_info->vddc_lookup_table->entries[voltage_id].us_vdd;
672 voltage_id = mclk_table->entries[entry_id].vddciInd;
673 mclk_table->entries[entry_id].vddci =
674 table_info->vddci_lookup_table->entries[voltage_id].us_vdd;
675 voltage_id = mclk_table->entries[entry_id].mvddInd;
676 mclk_table->entries[entry_id].mvdd =
677 table_info->vddmem_lookup_table->entries[voltage_id].us_vdd;
678 }
679
f83a9991
EH
680
681 return 0;
682
683}
684
685static int vega10_sort_lookup_table(struct pp_hwmgr *hwmgr,
686 struct phm_ppt_v1_voltage_lookup_table *lookup_table)
687{
688 uint32_t table_size, i, j;
689 struct phm_ppt_v1_voltage_lookup_record tmp_voltage_lookup_record;
690
691 PP_ASSERT_WITH_CODE(lookup_table && lookup_table->count,
692 "Lookup table is empty", return -EINVAL);
693
694 table_size = lookup_table->count;
695
696 /* Sorting voltages */
697 for (i = 0; i < table_size - 1; i++) {
698 for (j = i + 1; j > 0; j--) {
699 if (lookup_table->entries[j].us_vdd <
700 lookup_table->entries[j - 1].us_vdd) {
701 tmp_voltage_lookup_record = lookup_table->entries[j - 1];
702 lookup_table->entries[j - 1] = lookup_table->entries[j];
703 lookup_table->entries[j] = tmp_voltage_lookup_record;
704 }
705 }
706 }
707
708 return 0;
709}
710
711static int vega10_complete_dependency_tables(struct pp_hwmgr *hwmgr)
712{
713 int result = 0;
714 int tmp_result;
715 struct phm_ppt_v2_information *table_info =
716 (struct phm_ppt_v2_information *)(hwmgr->pptable);
717#ifdef PPLIB_VEGA10_EVV_SUPPORT
690dc626 718 struct vega10_hwmgr *data = hwmgr->backend;
f83a9991
EH
719
720 tmp_result = vega10_patch_lookup_table_with_leakage(hwmgr,
721 table_info->vddc_lookup_table, &(data->vddc_leakage));
722 if (tmp_result)
723 result = tmp_result;
724
725 tmp_result = vega10_patch_clock_voltage_limits_with_vddc_leakage(hwmgr,
726 &(data->vddc_leakage), &table_info->max_clock_voltage_on_dc.vddc);
727 if (tmp_result)
728 result = tmp_result;
729#endif
730
731 tmp_result = vega10_patch_voltage_dependency_tables_with_lookup_table(hwmgr);
732 if (tmp_result)
733 result = tmp_result;
734
735 tmp_result = vega10_sort_lookup_table(hwmgr, table_info->vddc_lookup_table);
736 if (tmp_result)
737 result = tmp_result;
738
739 return result;
740}
741
742static int vega10_set_private_data_based_on_pptable(struct pp_hwmgr *hwmgr)
743{
744 struct phm_ppt_v2_information *table_info =
745 (struct phm_ppt_v2_information *)(hwmgr->pptable);
746 struct phm_ppt_v1_clock_voltage_dependency_table *allowed_sclk_vdd_table =
747 table_info->vdd_dep_on_socclk;
748 struct phm_ppt_v1_clock_voltage_dependency_table *allowed_mclk_vdd_table =
749 table_info->vdd_dep_on_mclk;
750
751 PP_ASSERT_WITH_CODE(allowed_sclk_vdd_table,
4f42a2dd 752 "VDD dependency on SCLK table is missing. This table is mandatory", return -EINVAL);
f83a9991 753 PP_ASSERT_WITH_CODE(allowed_sclk_vdd_table->count >= 1,
4f42a2dd 754 "VDD dependency on SCLK table is empty. This table is mandatory", return -EINVAL);
f83a9991
EH
755
756 PP_ASSERT_WITH_CODE(allowed_mclk_vdd_table,
4f42a2dd 757 "VDD dependency on MCLK table is missing. This table is mandatory", return -EINVAL);
f83a9991 758 PP_ASSERT_WITH_CODE(allowed_mclk_vdd_table->count >= 1,
4f42a2dd 759 "VDD dependency on MCLK table is empty. This table is mandatory", return -EINVAL);
f83a9991
EH
760
761 table_info->max_clock_voltage_on_ac.sclk =
762 allowed_sclk_vdd_table->entries[allowed_sclk_vdd_table->count - 1].clk;
763 table_info->max_clock_voltage_on_ac.mclk =
764 allowed_mclk_vdd_table->entries[allowed_mclk_vdd_table->count - 1].clk;
765 table_info->max_clock_voltage_on_ac.vddc =
766 allowed_sclk_vdd_table->entries[allowed_sclk_vdd_table->count - 1].vddc;
767 table_info->max_clock_voltage_on_ac.vddci =
768 allowed_mclk_vdd_table->entries[allowed_mclk_vdd_table->count - 1].vddci;
769
770 hwmgr->dyn_state.max_clock_voltage_on_ac.sclk =
771 table_info->max_clock_voltage_on_ac.sclk;
772 hwmgr->dyn_state.max_clock_voltage_on_ac.mclk =
773 table_info->max_clock_voltage_on_ac.mclk;
774 hwmgr->dyn_state.max_clock_voltage_on_ac.vddc =
775 table_info->max_clock_voltage_on_ac.vddc;
776 hwmgr->dyn_state.max_clock_voltage_on_ac.vddci =
777 table_info->max_clock_voltage_on_ac.vddci;
778
779 return 0;
780}
781
782static int vega10_hwmgr_backend_fini(struct pp_hwmgr *hwmgr)
783{
784 kfree(hwmgr->dyn_state.vddc_dep_on_dal_pwrl);
785 hwmgr->dyn_state.vddc_dep_on_dal_pwrl = NULL;
786
787 kfree(hwmgr->backend);
788 hwmgr->backend = NULL;
789
790 return 0;
791}
792
793static int vega10_hwmgr_backend_init(struct pp_hwmgr *hwmgr)
794{
795 int result = 0;
796 struct vega10_hwmgr *data;
797 uint32_t config_telemetry = 0;
798 struct pp_atomfwctrl_voltage_table vol_table;
ada6770e 799 struct amdgpu_device *adev = hwmgr->adev;
f83a9991
EH
800
801 data = kzalloc(sizeof(struct vega10_hwmgr), GFP_KERNEL);
802 if (data == NULL)
803 return -ENOMEM;
804
805 hwmgr->backend = data;
806
052fe96d 807 hwmgr->workload_mask = 1 << hwmgr->workload_prority[PP_SMC_POWER_PROFILE_VIDEO];
6390258a 808 hwmgr->power_profile_mode = PP_SMC_POWER_PROFILE_VIDEO;
9ac870c7 809 hwmgr->default_power_profile_mode = PP_SMC_POWER_PROFILE_VIDEO;
6390258a 810
f83a9991 811 vega10_set_default_registry_data(hwmgr);
f83a9991 812 data->disable_dpm_mask = 0xff;
f83a9991
EH
813
814 /* need to set voltage control types before EVV patching */
815 data->vddc_control = VEGA10_VOLTAGE_CONTROL_NONE;
816 data->mvdd_control = VEGA10_VOLTAGE_CONTROL_NONE;
817 data->vddci_control = VEGA10_VOLTAGE_CONTROL_NONE;
818
819 /* VDDCR_SOC */
820 if (pp_atomfwctrl_is_voltage_controlled_by_gpio_v4(hwmgr,
821 VOLTAGE_TYPE_VDDC, VOLTAGE_OBJ_SVID2)) {
822 if (!pp_atomfwctrl_get_voltage_table_v4(hwmgr,
823 VOLTAGE_TYPE_VDDC, VOLTAGE_OBJ_SVID2,
824 &vol_table)) {
825 config_telemetry = ((vol_table.telemetry_slope << 8) & 0xff00) |
826 (vol_table.telemetry_offset & 0xff);
827 data->vddc_control = VEGA10_VOLTAGE_CONTROL_BY_SVID2;
828 }
829 } else {
830 kfree(hwmgr->backend);
831 hwmgr->backend = NULL;
832 PP_ASSERT_WITH_CODE(false,
833 "VDDCR_SOC is not SVID2!",
834 return -1);
835 }
836
837 /* MVDDC */
838 if (pp_atomfwctrl_is_voltage_controlled_by_gpio_v4(hwmgr,
839 VOLTAGE_TYPE_MVDDC, VOLTAGE_OBJ_SVID2)) {
840 if (!pp_atomfwctrl_get_voltage_table_v4(hwmgr,
841 VOLTAGE_TYPE_MVDDC, VOLTAGE_OBJ_SVID2,
842 &vol_table)) {
843 config_telemetry |=
844 ((vol_table.telemetry_slope << 24) & 0xff000000) |
845 ((vol_table.telemetry_offset << 16) & 0xff0000);
846 data->mvdd_control = VEGA10_VOLTAGE_CONTROL_BY_SVID2;
847 }
848 }
849
850 /* VDDCI_MEM */
dd5a6fe2 851 if (PP_CAP(PHM_PlatformCaps_ControlVDDCI)) {
f83a9991
EH
852 if (pp_atomfwctrl_is_voltage_controlled_by_gpio_v4(hwmgr,
853 VOLTAGE_TYPE_VDDCI, VOLTAGE_OBJ_GPIO_LUT))
854 data->vddci_control = VEGA10_VOLTAGE_CONTROL_BY_GPIO;
855 }
856
857 data->config_telemetry = config_telemetry;
858
859 vega10_set_features_platform_caps(hwmgr);
860
861 vega10_init_dpm_defaults(hwmgr);
862
863#ifdef PPLIB_VEGA10_EVV_SUPPORT
864 /* Get leakage voltage based on leakage ID. */
865 PP_ASSERT_WITH_CODE(!vega10_get_evv_voltages(hwmgr),
866 "Get EVV Voltage Failed. Abort Driver loading!",
867 return -1);
868#endif
869
870 /* Patch our voltage dependency table with actual leakage voltage
871 * We need to perform leakage translation before it's used by other functions
872 */
873 vega10_complete_dependency_tables(hwmgr);
874
875 /* Parse pptable data read from VBIOS */
876 vega10_set_private_data_based_on_pptable(hwmgr);
877
878 data->is_tlu_enabled = false;
879
880 hwmgr->platform_descriptor.hardwareActivityPerformanceLevels =
881 VEGA10_MAX_HARDWARE_POWERLEVELS;
882 hwmgr->platform_descriptor.hardwarePerformanceLevels = 2;
883 hwmgr->platform_descriptor.minimumClocksReductionPercentage = 50;
884
885 hwmgr->platform_descriptor.vbiosInterruptId = 0x20000400; /* IRQ_SOURCE1_SW_INT */
886 /* The true clock step depends on the frequency, typically 4.5 or 9 MHz. Here we use 5. */
887 hwmgr->platform_descriptor.clockStep.engineClock = 500;
888 hwmgr->platform_descriptor.clockStep.memoryClock = 500;
889
ada6770e 890 data->total_active_cus = adev->gfx.cu_info.number;
f83a9991
EH
891 /* Setup default Overdrive Fan control settings */
892 data->odn_fan_table.target_fan_speed =
893 hwmgr->thermal_controller.advanceFanControlParameters.usMaxFanRPM;
894 data->odn_fan_table.target_temperature =
895 hwmgr->thermal_controller.
896 advanceFanControlParameters.ucTargetTemperature;
897 data->odn_fan_table.min_performance_clock =
898 hwmgr->thermal_controller.advanceFanControlParameters.
899 ulMinFanSCLKAcousticLimit;
900 data->odn_fan_table.min_fan_limit =
901 hwmgr->thermal_controller.
902 advanceFanControlParameters.usFanPWMMinLimit *
903 hwmgr->thermal_controller.fanInfo.ulMaxRPM / 100;
904
b8a55591 905 data->mem_channels = (RREG32_SOC15(DF, 0, mmDF_CS_AON0_DramBaseAddress0) &
451cc55d
RZ
906 DF_CS_AON0_DramBaseAddress0__IntLvNumChan_MASK) >>
907 DF_CS_AON0_DramBaseAddress0__IntLvNumChan__SHIFT;
908 PP_ASSERT_WITH_CODE(data->mem_channels < ARRAY_SIZE(channel_number),
909 "Mem Channel Index Exceeded maximum!",
910 return -EINVAL);
911
f83a9991
EH
912 return result;
913}
914
915static int vega10_init_sclk_threshold(struct pp_hwmgr *hwmgr)
916{
690dc626 917 struct vega10_hwmgr *data = hwmgr->backend;
f83a9991
EH
918
919 data->low_sclk_interrupt_threshold = 0;
920
921 return 0;
922}
923
924static int vega10_setup_dpm_led_config(struct pp_hwmgr *hwmgr)
925{
690dc626 926 struct vega10_hwmgr *data = hwmgr->backend;
f83a9991
EH
927 PPTable_t *pp_table = &(data->smc_state_table.pp_table);
928
929 struct pp_atomfwctrl_voltage_table table;
930 uint8_t i, j;
931 uint32_t mask = 0;
932 uint32_t tmp;
933 int32_t ret = 0;
934
935 ret = pp_atomfwctrl_get_voltage_table_v4(hwmgr, VOLTAGE_TYPE_LEDDPM,
936 VOLTAGE_OBJ_GPIO_LUT, &table);
937
938 if (!ret) {
939 tmp = table.mask_low;
940 for (i = 0, j = 0; i < 32; i++) {
941 if (tmp & 1) {
942 mask |= (uint32_t)(i << (8 * j));
943 if (++j >= 3)
944 break;
945 }
946 tmp >>= 1;
947 }
948 }
949
950 pp_table->LedPin0 = (uint8_t)(mask & 0xff);
951 pp_table->LedPin1 = (uint8_t)((mask >> 8) & 0xff);
952 pp_table->LedPin2 = (uint8_t)((mask >> 16) & 0xff);
953 return 0;
954}
955
956static int vega10_setup_asic_task(struct pp_hwmgr *hwmgr)
957{
958 PP_ASSERT_WITH_CODE(!vega10_init_sclk_threshold(hwmgr),
959 "Failed to init sclk threshold!",
960 return -EINVAL);
961
962 PP_ASSERT_WITH_CODE(!vega10_setup_dpm_led_config(hwmgr),
963 "Failed to set up led dpm config!",
964 return -EINVAL);
965
e21148ec
RZ
966 smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_NumOfDisplays, 0);
967
f83a9991
EH
968 return 0;
969}
970
f83a9991
EH
971/**
972* Remove repeated voltage values and create table with unique values.
973*
974* @param hwmgr the address of the powerplay hardware manager.
975* @param vol_table the pointer to changing voltage table
976* @return 0 in success
977*/
978
979static int vega10_trim_voltage_table(struct pp_hwmgr *hwmgr,
980 struct pp_atomfwctrl_voltage_table *vol_table)
981{
982 uint32_t i, j;
983 uint16_t vvalue;
984 bool found = false;
985 struct pp_atomfwctrl_voltage_table *table;
986
987 PP_ASSERT_WITH_CODE(vol_table,
988 "Voltage Table empty.", return -EINVAL);
989 table = kzalloc(sizeof(struct pp_atomfwctrl_voltage_table),
990 GFP_KERNEL);
991
992 if (!table)
993 return -ENOMEM;
994
995 table->mask_low = vol_table->mask_low;
996 table->phase_delay = vol_table->phase_delay;
997
998 for (i = 0; i < vol_table->count; i++) {
999 vvalue = vol_table->entries[i].value;
1000 found = false;
1001
1002 for (j = 0; j < table->count; j++) {
1003 if (vvalue == table->entries[j].value) {
1004 found = true;
1005 break;
1006 }
1007 }
1008
1009 if (!found) {
1010 table->entries[table->count].value = vvalue;
1011 table->entries[table->count].smio_low =
1012 vol_table->entries[i].smio_low;
1013 table->count++;
1014 }
1015 }
1016
1017 memcpy(vol_table, table, sizeof(struct pp_atomfwctrl_voltage_table));
1018 kfree(table);
1019
1020 return 0;
1021}
1022
1023static int vega10_get_mvdd_voltage_table(struct pp_hwmgr *hwmgr,
1024 phm_ppt_v1_clock_voltage_dependency_table *dep_table,
1025 struct pp_atomfwctrl_voltage_table *vol_table)
1026{
1027 int i;
1028
1029 PP_ASSERT_WITH_CODE(dep_table->count,
1030 "Voltage Dependency Table empty.",
1031 return -EINVAL);
1032
1033 vol_table->mask_low = 0;
1034 vol_table->phase_delay = 0;
1035 vol_table->count = dep_table->count;
1036
1037 for (i = 0; i < vol_table->count; i++) {
1038 vol_table->entries[i].value = dep_table->entries[i].mvdd;
1039 vol_table->entries[i].smio_low = 0;
1040 }
1041
1042 PP_ASSERT_WITH_CODE(!vega10_trim_voltage_table(hwmgr,
1043 vol_table),
1044 "Failed to trim MVDD Table!",
1045 return -1);
1046
1047 return 0;
1048}
1049
1050static int vega10_get_vddci_voltage_table(struct pp_hwmgr *hwmgr,
1051 phm_ppt_v1_clock_voltage_dependency_table *dep_table,
1052 struct pp_atomfwctrl_voltage_table *vol_table)
1053{
1054 uint32_t i;
1055
1056 PP_ASSERT_WITH_CODE(dep_table->count,
1057 "Voltage Dependency Table empty.",
1058 return -EINVAL);
1059
1060 vol_table->mask_low = 0;
1061 vol_table->phase_delay = 0;
1062 vol_table->count = dep_table->count;
1063
1064 for (i = 0; i < dep_table->count; i++) {
1065 vol_table->entries[i].value = dep_table->entries[i].vddci;
1066 vol_table->entries[i].smio_low = 0;
1067 }
1068
1069 PP_ASSERT_WITH_CODE(!vega10_trim_voltage_table(hwmgr, vol_table),
1070 "Failed to trim VDDCI table.",
1071 return -1);
1072
1073 return 0;
1074}
1075
1076static int vega10_get_vdd_voltage_table(struct pp_hwmgr *hwmgr,
1077 phm_ppt_v1_clock_voltage_dependency_table *dep_table,
1078 struct pp_atomfwctrl_voltage_table *vol_table)
1079{
1080 int i;
1081
1082 PP_ASSERT_WITH_CODE(dep_table->count,
1083 "Voltage Dependency Table empty.",
1084 return -EINVAL);
1085
1086 vol_table->mask_low = 0;
1087 vol_table->phase_delay = 0;
1088 vol_table->count = dep_table->count;
1089
1090 for (i = 0; i < vol_table->count; i++) {
1091 vol_table->entries[i].value = dep_table->entries[i].vddc;
1092 vol_table->entries[i].smio_low = 0;
1093 }
1094
1095 return 0;
1096}
1097
1098/* ---- Voltage Tables ----
1099 * If the voltage table would be bigger than
1100 * what will fit into the state table on
1101 * the SMC keep only the higher entries.
1102 */
1103static void vega10_trim_voltage_table_to_fit_state_table(
1104 struct pp_hwmgr *hwmgr,
1105 uint32_t max_vol_steps,
1106 struct pp_atomfwctrl_voltage_table *vol_table)
1107{
1108 unsigned int i, diff;
1109
1110 if (vol_table->count <= max_vol_steps)
1111 return;
1112
1113 diff = vol_table->count - max_vol_steps;
1114
1115 for (i = 0; i < max_vol_steps; i++)
1116 vol_table->entries[i] = vol_table->entries[i + diff];
1117
1118 vol_table->count = max_vol_steps;
1119}
1120
1121/**
1122* Create Voltage Tables.
1123*
1124* @param hwmgr the address of the powerplay hardware manager.
1125* @return always 0
1126*/
1127static int vega10_construct_voltage_tables(struct pp_hwmgr *hwmgr)
1128{
690dc626 1129 struct vega10_hwmgr *data = hwmgr->backend;
f83a9991
EH
1130 struct phm_ppt_v2_information *table_info =
1131 (struct phm_ppt_v2_information *)hwmgr->pptable;
1132 int result;
1133
1134 if (data->mvdd_control == VEGA10_VOLTAGE_CONTROL_BY_SVID2 ||
1135 data->mvdd_control == VEGA10_VOLTAGE_CONTROL_NONE) {
1136 result = vega10_get_mvdd_voltage_table(hwmgr,
1137 table_info->vdd_dep_on_mclk,
1138 &(data->mvdd_voltage_table));
1139 PP_ASSERT_WITH_CODE(!result,
1140 "Failed to retrieve MVDDC table!",
1141 return result);
1142 }
1143
1144 if (data->vddci_control == VEGA10_VOLTAGE_CONTROL_NONE) {
1145 result = vega10_get_vddci_voltage_table(hwmgr,
1146 table_info->vdd_dep_on_mclk,
1147 &(data->vddci_voltage_table));
1148 PP_ASSERT_WITH_CODE(!result,
1149 "Failed to retrieve VDDCI_MEM table!",
1150 return result);
1151 }
1152
1153 if (data->vddc_control == VEGA10_VOLTAGE_CONTROL_BY_SVID2 ||
1154 data->vddc_control == VEGA10_VOLTAGE_CONTROL_NONE) {
1155 result = vega10_get_vdd_voltage_table(hwmgr,
1156 table_info->vdd_dep_on_sclk,
1157 &(data->vddc_voltage_table));
1158 PP_ASSERT_WITH_CODE(!result,
1159 "Failed to retrieve VDDCR_SOC table!",
1160 return result);
1161 }
1162
1163 PP_ASSERT_WITH_CODE(data->vddc_voltage_table.count <= 16,
1164 "Too many voltage values for VDDC. Trimming to fit state table.",
1165 vega10_trim_voltage_table_to_fit_state_table(hwmgr,
1166 16, &(data->vddc_voltage_table)));
1167
1168 PP_ASSERT_WITH_CODE(data->vddci_voltage_table.count <= 16,
1169 "Too many voltage values for VDDCI. Trimming to fit state table.",
1170 vega10_trim_voltage_table_to_fit_state_table(hwmgr,
1171 16, &(data->vddci_voltage_table)));
1172
1173 PP_ASSERT_WITH_CODE(data->mvdd_voltage_table.count <= 16,
1174 "Too many voltage values for MVDD. Trimming to fit state table.",
1175 vega10_trim_voltage_table_to_fit_state_table(hwmgr,
1176 16, &(data->mvdd_voltage_table)));
1177
1178
1179 return 0;
1180}
1181
1182/*
1183 * @fn vega10_init_dpm_state
1184 * @brief Function to initialize all Soft Min/Max and Hard Min/Max to 0xff.
1185 *
1186 * @param dpm_state - the address of the DPM Table to initiailize.
1187 * @return None.
1188 */
1189static void vega10_init_dpm_state(struct vega10_dpm_state *dpm_state)
1190{
1191 dpm_state->soft_min_level = 0xff;
1192 dpm_state->soft_max_level = 0xff;
1193 dpm_state->hard_min_level = 0xff;
1194 dpm_state->hard_max_level = 0xff;
1195}
1196
1197static void vega10_setup_default_single_dpm_table(struct pp_hwmgr *hwmgr,
1198 struct vega10_single_dpm_table *dpm_table,
1199 struct phm_ppt_v1_clock_voltage_dependency_table *dep_table)
1200{
1201 int i;
1202
658b9391
RZ
1203 dpm_table->count = 0;
1204
f83a9991 1205 for (i = 0; i < dep_table->count; i++) {
b7a1f0e3 1206 if (i == 0 || dpm_table->dpm_levels[dpm_table->count - 1].value <=
f83a9991
EH
1207 dep_table->entries[i].clk) {
1208 dpm_table->dpm_levels[dpm_table->count].value =
1209 dep_table->entries[i].clk;
1210 dpm_table->dpm_levels[dpm_table->count].enabled = true;
1211 dpm_table->count++;
1212 }
1213 }
1214}
1215static int vega10_setup_default_pcie_table(struct pp_hwmgr *hwmgr)
1216{
690dc626 1217 struct vega10_hwmgr *data = hwmgr->backend;
f83a9991
EH
1218 struct vega10_pcie_table *pcie_table = &(data->dpm_table.pcie_table);
1219 struct phm_ppt_v2_information *table_info =
1220 (struct phm_ppt_v2_information *)(hwmgr->pptable);
1221 struct phm_ppt_v1_pcie_table *bios_pcie_table =
1222 table_info->pcie_table;
1223 uint32_t i;
1224
1225 PP_ASSERT_WITH_CODE(bios_pcie_table->count,
1226 "Incorrect number of PCIE States from VBIOS!",
1227 return -1);
1228
b6dc60cf 1229 for (i = 0; i < NUM_LINK_LEVELS; i++) {
f83a9991
EH
1230 if (data->registry_data.pcieSpeedOverride)
1231 pcie_table->pcie_gen[i] =
1232 data->registry_data.pcieSpeedOverride;
1233 else
1234 pcie_table->pcie_gen[i] =
1235 bios_pcie_table->entries[i].gen_speed;
1236
1237 if (data->registry_data.pcieLaneOverride)
676b4087
RZ
1238 pcie_table->pcie_lane[i] = (uint8_t)encode_pcie_lane_width(
1239 data->registry_data.pcieLaneOverride);
f83a9991 1240 else
676b4087
RZ
1241 pcie_table->pcie_lane[i] = (uint8_t)encode_pcie_lane_width(
1242 bios_pcie_table->entries[i].lane_width);
f83a9991
EH
1243 if (data->registry_data.pcieClockOverride)
1244 pcie_table->lclk[i] =
1245 data->registry_data.pcieClockOverride;
1246 else
1247 pcie_table->lclk[i] =
1248 bios_pcie_table->entries[i].pcie_sclk;
f83a9991
EH
1249 }
1250
00c4855e 1251 pcie_table->count = NUM_LINK_LEVELS;
f83a9991
EH
1252
1253 return 0;
1254}
1255
1256/*
1257 * This function is to initialize all DPM state tables
1258 * for SMU based on the dependency table.
1259 * Dynamic state patching function will then trim these
1260 * state tables to the allowed range based
1261 * on the power policy or external client requests,
1262 * such as UVD request, etc.
1263 */
1264static int vega10_setup_default_dpm_tables(struct pp_hwmgr *hwmgr)
1265{
690dc626 1266 struct vega10_hwmgr *data = hwmgr->backend;
f83a9991
EH
1267 struct phm_ppt_v2_information *table_info =
1268 (struct phm_ppt_v2_information *)(hwmgr->pptable);
1269 struct vega10_single_dpm_table *dpm_table;
1270 uint32_t i;
1271
1272 struct phm_ppt_v1_clock_voltage_dependency_table *dep_soc_table =
1273 table_info->vdd_dep_on_socclk;
1274 struct phm_ppt_v1_clock_voltage_dependency_table *dep_gfx_table =
1275 table_info->vdd_dep_on_sclk;
1276 struct phm_ppt_v1_clock_voltage_dependency_table *dep_mclk_table =
1277 table_info->vdd_dep_on_mclk;
1278 struct phm_ppt_v1_mm_clock_voltage_dependency_table *dep_mm_table =
1279 table_info->mm_dep_table;
1280 struct phm_ppt_v1_clock_voltage_dependency_table *dep_dcef_table =
1281 table_info->vdd_dep_on_dcefclk;
1282 struct phm_ppt_v1_clock_voltage_dependency_table *dep_pix_table =
1283 table_info->vdd_dep_on_pixclk;
1284 struct phm_ppt_v1_clock_voltage_dependency_table *dep_disp_table =
1285 table_info->vdd_dep_on_dispclk;
1286 struct phm_ppt_v1_clock_voltage_dependency_table *dep_phy_table =
1287 table_info->vdd_dep_on_phyclk;
1288
1289 PP_ASSERT_WITH_CODE(dep_soc_table,
1290 "SOCCLK dependency table is missing. This table is mandatory",
1291 return -EINVAL);
1292 PP_ASSERT_WITH_CODE(dep_soc_table->count >= 1,
1293 "SOCCLK dependency table is empty. This table is mandatory",
1294 return -EINVAL);
1295
1296 PP_ASSERT_WITH_CODE(dep_gfx_table,
1297 "GFXCLK dependency table is missing. This table is mandatory",
1298 return -EINVAL);
1299 PP_ASSERT_WITH_CODE(dep_gfx_table->count >= 1,
1300 "GFXCLK dependency table is empty. This table is mandatory",
1301 return -EINVAL);
1302
1303 PP_ASSERT_WITH_CODE(dep_mclk_table,
1304 "MCLK dependency table is missing. This table is mandatory",
1305 return -EINVAL);
1306 PP_ASSERT_WITH_CODE(dep_mclk_table->count >= 1,
1307 "MCLK dependency table has to have is missing. This table is mandatory",
1308 return -EINVAL);
1309
1310 /* Initialize Sclk DPM table based on allow Sclk values */
f83a9991
EH
1311 dpm_table = &(data->dpm_table.soc_table);
1312 vega10_setup_default_single_dpm_table(hwmgr,
1313 dpm_table,
1314 dep_soc_table);
1315
1316 vega10_init_dpm_state(&(dpm_table->dpm_state));
1317
1318 dpm_table = &(data->dpm_table.gfx_table);
1319 vega10_setup_default_single_dpm_table(hwmgr,
1320 dpm_table,
1321 dep_gfx_table);
46defdd6
RZ
1322 if (hwmgr->platform_descriptor.overdriveLimit.engineClock == 0)
1323 hwmgr->platform_descriptor.overdriveLimit.engineClock =
1324 dpm_table->dpm_levels[dpm_table->count-1].value;
f83a9991
EH
1325 vega10_init_dpm_state(&(dpm_table->dpm_state));
1326
1327 /* Initialize Mclk DPM table based on allow Mclk values */
1328 data->dpm_table.mem_table.count = 0;
1329 dpm_table = &(data->dpm_table.mem_table);
1330 vega10_setup_default_single_dpm_table(hwmgr,
1331 dpm_table,
1332 dep_mclk_table);
46defdd6
RZ
1333 if (hwmgr->platform_descriptor.overdriveLimit.memoryClock == 0)
1334 hwmgr->platform_descriptor.overdriveLimit.memoryClock =
1335 dpm_table->dpm_levels[dpm_table->count-1].value;
f83a9991
EH
1336 vega10_init_dpm_state(&(dpm_table->dpm_state));
1337
1338 data->dpm_table.eclk_table.count = 0;
1339 dpm_table = &(data->dpm_table.eclk_table);
1340 for (i = 0; i < dep_mm_table->count; i++) {
1341 if (i == 0 || dpm_table->dpm_levels
b7a1f0e3 1342 [dpm_table->count - 1].value <=
f83a9991
EH
1343 dep_mm_table->entries[i].eclk) {
1344 dpm_table->dpm_levels[dpm_table->count].value =
1345 dep_mm_table->entries[i].eclk;
1346 dpm_table->dpm_levels[dpm_table->count].enabled =
1347 (i == 0) ? true : false;
1348 dpm_table->count++;
1349 }
1350 }
1351 vega10_init_dpm_state(&(dpm_table->dpm_state));
1352
1353 data->dpm_table.vclk_table.count = 0;
1354 data->dpm_table.dclk_table.count = 0;
1355 dpm_table = &(data->dpm_table.vclk_table);
1356 for (i = 0; i < dep_mm_table->count; i++) {
1357 if (i == 0 || dpm_table->dpm_levels
b7a1f0e3 1358 [dpm_table->count - 1].value <=
f83a9991
EH
1359 dep_mm_table->entries[i].vclk) {
1360 dpm_table->dpm_levels[dpm_table->count].value =
1361 dep_mm_table->entries[i].vclk;
1362 dpm_table->dpm_levels[dpm_table->count].enabled =
1363 (i == 0) ? true : false;
1364 dpm_table->count++;
1365 }
1366 }
1367 vega10_init_dpm_state(&(dpm_table->dpm_state));
1368
1369 dpm_table = &(data->dpm_table.dclk_table);
1370 for (i = 0; i < dep_mm_table->count; i++) {
1371 if (i == 0 || dpm_table->dpm_levels
b7a1f0e3 1372 [dpm_table->count - 1].value <=
f83a9991
EH
1373 dep_mm_table->entries[i].dclk) {
1374 dpm_table->dpm_levels[dpm_table->count].value =
1375 dep_mm_table->entries[i].dclk;
1376 dpm_table->dpm_levels[dpm_table->count].enabled =
1377 (i == 0) ? true : false;
1378 dpm_table->count++;
1379 }
1380 }
1381 vega10_init_dpm_state(&(dpm_table->dpm_state));
1382
1383 /* Assume there is no headless Vega10 for now */
1384 dpm_table = &(data->dpm_table.dcef_table);
1385 vega10_setup_default_single_dpm_table(hwmgr,
1386 dpm_table,
1387 dep_dcef_table);
1388
1389 vega10_init_dpm_state(&(dpm_table->dpm_state));
1390
1391 dpm_table = &(data->dpm_table.pixel_table);
1392 vega10_setup_default_single_dpm_table(hwmgr,
1393 dpm_table,
1394 dep_pix_table);
1395
1396 vega10_init_dpm_state(&(dpm_table->dpm_state));
1397
1398 dpm_table = &(data->dpm_table.display_table);
1399 vega10_setup_default_single_dpm_table(hwmgr,
1400 dpm_table,
1401 dep_disp_table);
1402
1403 vega10_init_dpm_state(&(dpm_table->dpm_state));
1404
1405 dpm_table = &(data->dpm_table.phy_table);
1406 vega10_setup_default_single_dpm_table(hwmgr,
1407 dpm_table,
1408 dep_phy_table);
1409
1410 vega10_init_dpm_state(&(dpm_table->dpm_state));
1411
1412 vega10_setup_default_pcie_table(hwmgr);
1413
1414 /* save a copy of the default DPM table */
1415 memcpy(&(data->golden_dpm_table), &(data->dpm_table),
1416 sizeof(struct vega10_dpm_table));
1417
f83a9991
EH
1418 return 0;
1419}
1420
1421/*
1422 * @fn vega10_populate_ulv_state
1423 * @brief Function to provide parameters for Utral Low Voltage state to SMC.
1424 *
1425 * @param hwmgr - the address of the hardware manager.
1426 * @return Always 0.
1427 */
1428static int vega10_populate_ulv_state(struct pp_hwmgr *hwmgr)
1429{
690dc626 1430 struct vega10_hwmgr *data = hwmgr->backend;
f83a9991
EH
1431 struct phm_ppt_v2_information *table_info =
1432 (struct phm_ppt_v2_information *)(hwmgr->pptable);
1433
1434 data->smc_state_table.pp_table.UlvOffsetVid =
effa290c 1435 (uint8_t)table_info->us_ulv_voltage_offset;
f83a9991
EH
1436
1437 data->smc_state_table.pp_table.UlvSmnclkDid =
1438 (uint8_t)(table_info->us_ulv_smnclk_did);
1439 data->smc_state_table.pp_table.UlvMp1clkDid =
1440 (uint8_t)(table_info->us_ulv_mp1clk_did);
1441 data->smc_state_table.pp_table.UlvGfxclkBypass =
1442 (uint8_t)(table_info->us_ulv_gfxclk_bypass);
1443 data->smc_state_table.pp_table.UlvPhaseSheddingPsi0 =
1444 (uint8_t)(data->vddc_voltage_table.psi0_enable);
1445 data->smc_state_table.pp_table.UlvPhaseSheddingPsi1 =
1446 (uint8_t)(data->vddc_voltage_table.psi1_enable);
1447
1448 return 0;
1449}
1450
1451static int vega10_populate_single_lclk_level(struct pp_hwmgr *hwmgr,
1452 uint32_t lclock, uint8_t *curr_lclk_did)
1453{
1454 struct pp_atomfwctrl_clock_dividers_soc15 dividers;
1455
1456 PP_ASSERT_WITH_CODE(!pp_atomfwctrl_get_gpu_pll_dividers_vega10(
1457 hwmgr,
1458 COMPUTE_GPUCLK_INPUT_FLAG_DEFAULT_GPUCLK,
1459 lclock, &dividers),
1460 "Failed to get LCLK clock settings from VBIOS!",
1461 return -1);
1462
1463 *curr_lclk_did = dividers.ulDid;
1464
1465 return 0;
1466}
1467
1468static int vega10_populate_smc_link_levels(struct pp_hwmgr *hwmgr)
1469{
1470 int result = -1;
690dc626 1471 struct vega10_hwmgr *data = hwmgr->backend;
f83a9991
EH
1472 PPTable_t *pp_table = &(data->smc_state_table.pp_table);
1473 struct vega10_pcie_table *pcie_table =
1474 &(data->dpm_table.pcie_table);
1475 uint32_t i, j;
1476
1477 for (i = 0; i < pcie_table->count; i++) {
1478 pp_table->PcieGenSpeed[i] = pcie_table->pcie_gen[i];
1479 pp_table->PcieLaneCount[i] = pcie_table->pcie_lane[i];
1480
1481 result = vega10_populate_single_lclk_level(hwmgr,
1482 pcie_table->lclk[i], &(pp_table->LclkDid[i]));
1483 if (result) {
1484 pr_info("Populate LClock Level %d Failed!\n", i);
1485 return result;
1486 }
1487 }
1488
1489 j = i - 1;
1490 while (i < NUM_LINK_LEVELS) {
1491 pp_table->PcieGenSpeed[i] = pcie_table->pcie_gen[j];
1492 pp_table->PcieLaneCount[i] = pcie_table->pcie_lane[j];
1493
1494 result = vega10_populate_single_lclk_level(hwmgr,
1495 pcie_table->lclk[j], &(pp_table->LclkDid[i]));
1496 if (result) {
1497 pr_info("Populate LClock Level %d Failed!\n", i);
1498 return result;
1499 }
1500 i++;
1501 }
1502
1503 return result;
1504}
1505
1506/**
1507* Populates single SMC GFXSCLK structure using the provided engine clock
1508*
1509* @param hwmgr the address of the hardware manager
1510* @param gfx_clock the GFX clock to use to populate the structure.
1511* @param current_gfxclk_level location in PPTable for the SMC GFXCLK structure.
1512*/
1513
1514static int vega10_populate_single_gfx_level(struct pp_hwmgr *hwmgr,
84d43463
EQ
1515 uint32_t gfx_clock, PllSetting_t *current_gfxclk_level,
1516 uint32_t *acg_freq)
f83a9991
EH
1517{
1518 struct phm_ppt_v2_information *table_info =
1519 (struct phm_ppt_v2_information *)(hwmgr->pptable);
c5a44849 1520 struct phm_ppt_v1_clock_voltage_dependency_table *dep_on_sclk;
690dc626 1521 struct vega10_hwmgr *data = hwmgr->backend;
f83a9991 1522 struct pp_atomfwctrl_clock_dividers_soc15 dividers;
dd4e2237
EH
1523 uint32_t gfx_max_clock =
1524 hwmgr->platform_descriptor.overdriveLimit.engineClock;
1525 uint32_t i = 0;
f83a9991 1526
c5a44849 1527 if (hwmgr->od_enabled)
f83a9991 1528 dep_on_sclk = (struct phm_ppt_v1_clock_voltage_dependency_table *)
c5a44849
RZ
1529 &(data->odn_dpm_table.vdd_dep_on_sclk);
1530 else
1531 dep_on_sclk = table_info->vdd_dep_on_sclk;
f83a9991
EH
1532
1533 PP_ASSERT_WITH_CODE(dep_on_sclk,
1534 "Invalid SOC_VDD-GFX_CLK Dependency Table!",
1535 return -EINVAL);
1536
dd4e2237
EH
1537 if (data->need_update_dpm_table & DPMTABLE_OD_UPDATE_SCLK)
1538 gfx_clock = gfx_clock > gfx_max_clock ? gfx_max_clock : gfx_clock;
1539 else {
1540 for (i = 0; i < dep_on_sclk->count; i++) {
1541 if (dep_on_sclk->entries[i].clk == gfx_clock)
1542 break;
1543 }
1544 PP_ASSERT_WITH_CODE(dep_on_sclk->count > i,
1545 "Cannot find gfx_clk in SOC_VDD-GFX_CLK!",
1546 return -EINVAL);
f83a9991
EH
1547 }
1548
f83a9991
EH
1549 PP_ASSERT_WITH_CODE(!pp_atomfwctrl_get_gpu_pll_dividers_vega10(hwmgr,
1550 COMPUTE_GPUCLK_INPUT_FLAG_GFXCLK,
1551 gfx_clock, &dividers),
1552 "Failed to get GFX Clock settings from VBIOS!",
1553 return -EINVAL);
1554
1555 /* Feedback Multiplier: bit 0:8 int, bit 15:12 post_div, bit 31:16 frac */
1556 current_gfxclk_level->FbMult =
1557 cpu_to_le32(dividers.ulPll_fb_mult);
1558 /* Spread FB Multiplier bit: bit 0:8 int, bit 31:16 frac */
93480f89 1559 current_gfxclk_level->SsOn = dividers.ucPll_ss_enable;
f83a9991
EH
1560 current_gfxclk_level->SsFbMult =
1561 cpu_to_le32(dividers.ulPll_ss_fbsmult);
1562 current_gfxclk_level->SsSlewFrac =
1563 cpu_to_le16(dividers.usPll_ss_slew_frac);
1564 current_gfxclk_level->Did = (uint8_t)(dividers.ulDid);
1565
84d43463
EQ
1566 *acg_freq = gfx_clock / 100; /* 100 Khz to Mhz conversion */
1567
f83a9991
EH
1568 return 0;
1569}
1570
1571/**
1572 * @brief Populates single SMC SOCCLK structure using the provided clock.
1573 *
1574 * @param hwmgr - the address of the hardware manager.
1575 * @param soc_clock - the SOC clock to use to populate the structure.
1576 * @param current_socclk_level - location in PPTable for the SMC SOCCLK structure.
1577 * @return 0 on success..
1578 */
1579static int vega10_populate_single_soc_level(struct pp_hwmgr *hwmgr,
1580 uint32_t soc_clock, uint8_t *current_soc_did,
1581 uint8_t *current_vol_index)
1582{
c5a44849 1583 struct vega10_hwmgr *data = hwmgr->backend;
f83a9991
EH
1584 struct phm_ppt_v2_information *table_info =
1585 (struct phm_ppt_v2_information *)(hwmgr->pptable);
c5a44849 1586 struct phm_ppt_v1_clock_voltage_dependency_table *dep_on_soc;
f83a9991
EH
1587 struct pp_atomfwctrl_clock_dividers_soc15 dividers;
1588 uint32_t i;
1589
c5a44849
RZ
1590 if (hwmgr->od_enabled) {
1591 dep_on_soc = (struct phm_ppt_v1_clock_voltage_dependency_table *)
1592 &data->odn_dpm_table.vdd_dep_on_socclk;
1593 for (i = 0; i < dep_on_soc->count; i++) {
1594 if (dep_on_soc->entries[i].clk >= soc_clock)
1595 break;
1596 }
1597 } else {
1598 dep_on_soc = table_info->vdd_dep_on_socclk;
1599 for (i = 0; i < dep_on_soc->count; i++) {
1600 if (dep_on_soc->entries[i].clk == soc_clock)
1601 break;
1602 }
f83a9991 1603 }
c5a44849 1604
f83a9991
EH
1605 PP_ASSERT_WITH_CODE(dep_on_soc->count > i,
1606 "Cannot find SOC_CLK in SOC_VDD-SOC_CLK Dependency Table",
1607 return -EINVAL);
c5a44849 1608
f83a9991
EH
1609 PP_ASSERT_WITH_CODE(!pp_atomfwctrl_get_gpu_pll_dividers_vega10(hwmgr,
1610 COMPUTE_GPUCLK_INPUT_FLAG_DEFAULT_GPUCLK,
1611 soc_clock, &dividers),
1612 "Failed to get SOC Clock settings from VBIOS!",
1613 return -EINVAL);
1614
1615 *current_soc_did = (uint8_t)dividers.ulDid;
1616 *current_vol_index = (uint8_t)(dep_on_soc->entries[i].vddInd);
f83a9991
EH
1617 return 0;
1618}
1619
1620/**
1621* Populates all SMC SCLK levels' structure based on the trimmed allowed dpm engine clock states
1622*
1623* @param hwmgr the address of the hardware manager
1624*/
1625static int vega10_populate_all_graphic_levels(struct pp_hwmgr *hwmgr)
1626{
690dc626 1627 struct vega10_hwmgr *data = hwmgr->backend;
f83a9991
EH
1628 struct phm_ppt_v2_information *table_info =
1629 (struct phm_ppt_v2_information *)(hwmgr->pptable);
f83a9991
EH
1630 PPTable_t *pp_table = &(data->smc_state_table.pp_table);
1631 struct vega10_single_dpm_table *dpm_table = &(data->dpm_table.gfx_table);
1632 int result = 0;
1633 uint32_t i, j;
1634
1635 for (i = 0; i < dpm_table->count; i++) {
1636 result = vega10_populate_single_gfx_level(hwmgr,
1637 dpm_table->dpm_levels[i].value,
84d43463
EQ
1638 &(pp_table->GfxclkLevel[i]),
1639 &(pp_table->AcgFreqTable[i]));
f83a9991
EH
1640 if (result)
1641 return result;
1642 }
1643
1644 j = i - 1;
1645 while (i < NUM_GFXCLK_DPM_LEVELS) {
1646 result = vega10_populate_single_gfx_level(hwmgr,
1647 dpm_table->dpm_levels[j].value,
84d43463
EQ
1648 &(pp_table->GfxclkLevel[i]),
1649 &(pp_table->AcgFreqTable[i]));
f83a9991
EH
1650 if (result)
1651 return result;
1652 i++;
1653 }
1654
1655 pp_table->GfxclkSlewRate =
1656 cpu_to_le16(table_info->us_gfxclk_slew_rate);
1657
1658 dpm_table = &(data->dpm_table.soc_table);
1659 for (i = 0; i < dpm_table->count; i++) {
f83a9991
EH
1660 result = vega10_populate_single_soc_level(hwmgr,
1661 dpm_table->dpm_levels[i].value,
1662 &(pp_table->SocclkDid[i]),
1663 &(pp_table->SocDpmVoltageIndex[i]));
1664 if (result)
1665 return result;
1666 }
1667
1668 j = i - 1;
1669 while (i < NUM_SOCCLK_DPM_LEVELS) {
f83a9991
EH
1670 result = vega10_populate_single_soc_level(hwmgr,
1671 dpm_table->dpm_levels[j].value,
1672 &(pp_table->SocclkDid[i]),
1673 &(pp_table->SocDpmVoltageIndex[i]));
1674 if (result)
1675 return result;
1676 i++;
1677 }
1678
1679 return result;
1680}
1681
c5a44849
RZ
1682static void vega10_populate_vddc_soc_levels(struct pp_hwmgr *hwmgr)
1683{
1684 struct vega10_hwmgr *data = hwmgr->backend;
1685 PPTable_t *pp_table = &(data->smc_state_table.pp_table);
1686 struct phm_ppt_v2_information *table_info = hwmgr->pptable;
1687 struct phm_ppt_v1_voltage_lookup_table *vddc_lookup_table;
1688
1689 uint8_t soc_vid = 0;
1690 uint32_t i, max_vddc_level;
1691
1692 if (hwmgr->od_enabled)
1693 vddc_lookup_table = (struct phm_ppt_v1_voltage_lookup_table *)&data->odn_dpm_table.vddc_lookup_table;
1694 else
1695 vddc_lookup_table = table_info->vddc_lookup_table;
1696
1697 max_vddc_level = vddc_lookup_table->count;
1698 for (i = 0; i < max_vddc_level; i++) {
1699 soc_vid = (uint8_t)convert_to_vid(vddc_lookup_table->entries[i].us_vdd);
1700 pp_table->SocVid[i] = soc_vid;
1701 }
1702 while (i < MAX_REGULAR_DPM_NUMBER) {
1703 pp_table->SocVid[i] = soc_vid;
1704 i++;
1705 }
1706}
1707
f83a9991
EH
1708/**
1709 * @brief Populates single SMC GFXCLK structure using the provided clock.
1710 *
1711 * @param hwmgr - the address of the hardware manager.
1712 * @param mem_clock - the memory clock to use to populate the structure.
1713 * @return 0 on success..
1714 */
1715static int vega10_populate_single_memory_level(struct pp_hwmgr *hwmgr,
1716 uint32_t mem_clock, uint8_t *current_mem_vid,
1717 PllSetting_t *current_memclk_level, uint8_t *current_mem_soc_vind)
1718{
690dc626 1719 struct vega10_hwmgr *data = hwmgr->backend;
f83a9991
EH
1720 struct phm_ppt_v2_information *table_info =
1721 (struct phm_ppt_v2_information *)(hwmgr->pptable);
c5a44849 1722 struct phm_ppt_v1_clock_voltage_dependency_table *dep_on_mclk;
f83a9991 1723 struct pp_atomfwctrl_clock_dividers_soc15 dividers;
dd4e2237
EH
1724 uint32_t mem_max_clock =
1725 hwmgr->platform_descriptor.overdriveLimit.memoryClock;
1726 uint32_t i = 0;
f83a9991 1727
c5a44849 1728 if (hwmgr->od_enabled)
f83a9991 1729 dep_on_mclk = (struct phm_ppt_v1_clock_voltage_dependency_table *)
c5a44849
RZ
1730 &data->odn_dpm_table.vdd_dep_on_mclk;
1731 else
1732 dep_on_mclk = table_info->vdd_dep_on_mclk;
f83a9991
EH
1733
1734 PP_ASSERT_WITH_CODE(dep_on_mclk,
1735 "Invalid SOC_VDD-UCLK Dependency Table!",
1736 return -EINVAL);
1737
c5a44849 1738 if (data->need_update_dpm_table & DPMTABLE_OD_UPDATE_MCLK) {
dd4e2237 1739 mem_clock = mem_clock > mem_max_clock ? mem_max_clock : mem_clock;
c5a44849 1740 } else {
dd4e2237
EH
1741 for (i = 0; i < dep_on_mclk->count; i++) {
1742 if (dep_on_mclk->entries[i].clk == mem_clock)
1743 break;
1744 }
1745 PP_ASSERT_WITH_CODE(dep_on_mclk->count > i,
1746 "Cannot find UCLK in SOC_VDD-UCLK Dependency Table!",
1747 return -EINVAL);
f83a9991
EH
1748 }
1749
f83a9991
EH
1750 PP_ASSERT_WITH_CODE(!pp_atomfwctrl_get_gpu_pll_dividers_vega10(
1751 hwmgr, COMPUTE_GPUCLK_INPUT_FLAG_UCLK, mem_clock, &dividers),
1752 "Failed to get UCLK settings from VBIOS!",
1753 return -1);
1754
1755 *current_mem_vid =
1756 (uint8_t)(convert_to_vid(dep_on_mclk->entries[i].mvdd));
1757 *current_mem_soc_vind =
1758 (uint8_t)(dep_on_mclk->entries[i].vddInd);
1759 current_memclk_level->FbMult = cpu_to_le32(dividers.ulPll_fb_mult);
1760 current_memclk_level->Did = (uint8_t)(dividers.ulDid);
1761
1762 PP_ASSERT_WITH_CODE(current_memclk_level->Did >= 1,
1763 "Invalid Divider ID!",
1764 return -EINVAL);
1765
1766 return 0;
1767}
1768
1769/**
1770 * @brief Populates all SMC MCLK levels' structure based on the trimmed allowed dpm memory clock states.
1771 *
1772 * @param pHwMgr - the address of the hardware manager.
1773 * @return PP_Result_OK on success.
1774 */
1775static int vega10_populate_all_memory_levels(struct pp_hwmgr *hwmgr)
1776{
690dc626 1777 struct vega10_hwmgr *data = hwmgr->backend;
f83a9991
EH
1778 PPTable_t *pp_table = &(data->smc_state_table.pp_table);
1779 struct vega10_single_dpm_table *dpm_table =
1780 &(data->dpm_table.mem_table);
1781 int result = 0;
451cc55d 1782 uint32_t i, j;
f83a9991
EH
1783
1784 for (i = 0; i < dpm_table->count; i++) {
1785 result = vega10_populate_single_memory_level(hwmgr,
1786 dpm_table->dpm_levels[i].value,
1787 &(pp_table->MemVid[i]),
1788 &(pp_table->UclkLevel[i]),
1789 &(pp_table->MemSocVoltageIndex[i]));
1790 if (result)
1791 return result;
1792 }
1793
1794 j = i - 1;
1795 while (i < NUM_UCLK_DPM_LEVELS) {
1796 result = vega10_populate_single_memory_level(hwmgr,
1797 dpm_table->dpm_levels[j].value,
1798 &(pp_table->MemVid[i]),
1799 &(pp_table->UclkLevel[i]),
1800 &(pp_table->MemSocVoltageIndex[i]));
1801 if (result)
1802 return result;
1803 i++;
1804 }
1805
451cc55d 1806 pp_table->NumMemoryChannels = (uint16_t)(data->mem_channels);
f83a9991 1807 pp_table->MemoryChannelWidth =
451cc55d
RZ
1808 (uint16_t)(HBM_MEMORY_CHANNEL_WIDTH *
1809 channel_number[data->mem_channels]);
f83a9991
EH
1810
1811 pp_table->LowestUclkReservedForUlv =
1812 (uint8_t)(data->lowest_uclk_reserved_for_ulv);
1813
1814 return result;
1815}
1816
1817static int vega10_populate_single_display_type(struct pp_hwmgr *hwmgr,
1818 DSPCLK_e disp_clock)
1819{
690dc626 1820 struct vega10_hwmgr *data = hwmgr->backend;
f83a9991
EH
1821 PPTable_t *pp_table = &(data->smc_state_table.pp_table);
1822 struct phm_ppt_v2_information *table_info =
1823 (struct phm_ppt_v2_information *)
1824 (hwmgr->pptable);
1825 struct phm_ppt_v1_clock_voltage_dependency_table *dep_table;
1826 uint32_t i;
1827 uint16_t clk = 0, vddc = 0;
1828 uint8_t vid = 0;
1829
1830 switch (disp_clock) {
1831 case DSPCLK_DCEFCLK:
1832 dep_table = table_info->vdd_dep_on_dcefclk;
1833 break;
1834 case DSPCLK_DISPCLK:
1835 dep_table = table_info->vdd_dep_on_dispclk;
1836 break;
1837 case DSPCLK_PIXCLK:
1838 dep_table = table_info->vdd_dep_on_pixclk;
1839 break;
1840 case DSPCLK_PHYCLK:
1841 dep_table = table_info->vdd_dep_on_phyclk;
1842 break;
1843 default:
1844 return -1;
1845 }
1846
1847 PP_ASSERT_WITH_CODE(dep_table->count <= NUM_DSPCLK_LEVELS,
1848 "Number Of Entries Exceeded maximum!",
1849 return -1);
1850
1851 for (i = 0; i < dep_table->count; i++) {
1852 clk = (uint16_t)(dep_table->entries[i].clk / 100);
1853 vddc = table_info->vddc_lookup_table->
1854 entries[dep_table->entries[i].vddInd].us_vdd;
1855 vid = (uint8_t)convert_to_vid(vddc);
1856 pp_table->DisplayClockTable[disp_clock][i].Freq =
1857 cpu_to_le16(clk);
1858 pp_table->DisplayClockTable[disp_clock][i].Vid =
1859 cpu_to_le16(vid);
1860 }
1861
1862 while (i < NUM_DSPCLK_LEVELS) {
1863 pp_table->DisplayClockTable[disp_clock][i].Freq =
1864 cpu_to_le16(clk);
1865 pp_table->DisplayClockTable[disp_clock][i].Vid =
1866 cpu_to_le16(vid);
1867 i++;
1868 }
1869
1870 return 0;
1871}
1872
1873static int vega10_populate_all_display_clock_levels(struct pp_hwmgr *hwmgr)
1874{
1875 uint32_t i;
1876
1877 for (i = 0; i < DSPCLK_COUNT; i++) {
1878 PP_ASSERT_WITH_CODE(!vega10_populate_single_display_type(hwmgr, i),
1879 "Failed to populate Clock in DisplayClockTable!",
1880 return -1);
1881 }
1882
1883 return 0;
1884}
1885
1886static int vega10_populate_single_eclock_level(struct pp_hwmgr *hwmgr,
1887 uint32_t eclock, uint8_t *current_eclk_did,
1888 uint8_t *current_soc_vol)
1889{
1890 struct phm_ppt_v2_information *table_info =
1891 (struct phm_ppt_v2_information *)(hwmgr->pptable);
1892 struct phm_ppt_v1_mm_clock_voltage_dependency_table *dep_table =
1893 table_info->mm_dep_table;
1894 struct pp_atomfwctrl_clock_dividers_soc15 dividers;
1895 uint32_t i;
1896
1897 PP_ASSERT_WITH_CODE(!pp_atomfwctrl_get_gpu_pll_dividers_vega10(hwmgr,
1898 COMPUTE_GPUCLK_INPUT_FLAG_DEFAULT_GPUCLK,
1899 eclock, &dividers),
1900 "Failed to get ECLK clock settings from VBIOS!",
1901 return -1);
1902
1903 *current_eclk_did = (uint8_t)dividers.ulDid;
1904
1905 for (i = 0; i < dep_table->count; i++) {
1906 if (dep_table->entries[i].eclk == eclock)
1907 *current_soc_vol = dep_table->entries[i].vddcInd;
1908 }
1909
1910 return 0;
1911}
1912
1913static int vega10_populate_smc_vce_levels(struct pp_hwmgr *hwmgr)
1914{
690dc626 1915 struct vega10_hwmgr *data = hwmgr->backend;
f83a9991
EH
1916 PPTable_t *pp_table = &(data->smc_state_table.pp_table);
1917 struct vega10_single_dpm_table *dpm_table = &(data->dpm_table.eclk_table);
1918 int result = -EINVAL;
1919 uint32_t i, j;
1920
1921 for (i = 0; i < dpm_table->count; i++) {
1922 result = vega10_populate_single_eclock_level(hwmgr,
1923 dpm_table->dpm_levels[i].value,
1924 &(pp_table->EclkDid[i]),
1925 &(pp_table->VceDpmVoltageIndex[i]));
1926 if (result)
1927 return result;
1928 }
1929
1930 j = i - 1;
1931 while (i < NUM_VCE_DPM_LEVELS) {
1932 result = vega10_populate_single_eclock_level(hwmgr,
1933 dpm_table->dpm_levels[j].value,
1934 &(pp_table->EclkDid[i]),
1935 &(pp_table->VceDpmVoltageIndex[i]));
1936 if (result)
1937 return result;
1938 i++;
1939 }
1940
1941 return result;
1942}
1943
1944static int vega10_populate_single_vclock_level(struct pp_hwmgr *hwmgr,
1945 uint32_t vclock, uint8_t *current_vclk_did)
1946{
1947 struct pp_atomfwctrl_clock_dividers_soc15 dividers;
1948
1949 PP_ASSERT_WITH_CODE(!pp_atomfwctrl_get_gpu_pll_dividers_vega10(hwmgr,
1950 COMPUTE_GPUCLK_INPUT_FLAG_DEFAULT_GPUCLK,
1951 vclock, &dividers),
1952 "Failed to get VCLK clock settings from VBIOS!",
1953 return -EINVAL);
1954
1955 *current_vclk_did = (uint8_t)dividers.ulDid;
1956
1957 return 0;
1958}
1959
1960static int vega10_populate_single_dclock_level(struct pp_hwmgr *hwmgr,
1961 uint32_t dclock, uint8_t *current_dclk_did)
1962{
1963 struct pp_atomfwctrl_clock_dividers_soc15 dividers;
1964
1965 PP_ASSERT_WITH_CODE(!pp_atomfwctrl_get_gpu_pll_dividers_vega10(hwmgr,
1966 COMPUTE_GPUCLK_INPUT_FLAG_DEFAULT_GPUCLK,
1967 dclock, &dividers),
1968 "Failed to get DCLK clock settings from VBIOS!",
1969 return -EINVAL);
1970
1971 *current_dclk_did = (uint8_t)dividers.ulDid;
1972
1973 return 0;
1974}
1975
1976static int vega10_populate_smc_uvd_levels(struct pp_hwmgr *hwmgr)
1977{
690dc626 1978 struct vega10_hwmgr *data = hwmgr->backend;
f83a9991
EH
1979 PPTable_t *pp_table = &(data->smc_state_table.pp_table);
1980 struct vega10_single_dpm_table *vclk_dpm_table =
1981 &(data->dpm_table.vclk_table);
1982 struct vega10_single_dpm_table *dclk_dpm_table =
1983 &(data->dpm_table.dclk_table);
1984 struct phm_ppt_v2_information *table_info =
1985 (struct phm_ppt_v2_information *)(hwmgr->pptable);
1986 struct phm_ppt_v1_mm_clock_voltage_dependency_table *dep_table =
1987 table_info->mm_dep_table;
1988 int result = -EINVAL;
1989 uint32_t i, j;
1990
1991 for (i = 0; i < vclk_dpm_table->count; i++) {
1992 result = vega10_populate_single_vclock_level(hwmgr,
1993 vclk_dpm_table->dpm_levels[i].value,
1994 &(pp_table->VclkDid[i]));
1995 if (result)
1996 return result;
1997 }
1998
1999 j = i - 1;
2000 while (i < NUM_UVD_DPM_LEVELS) {
2001 result = vega10_populate_single_vclock_level(hwmgr,
2002 vclk_dpm_table->dpm_levels[j].value,
2003 &(pp_table->VclkDid[i]));
2004 if (result)
2005 return result;
2006 i++;
2007 }
2008
2009 for (i = 0; i < dclk_dpm_table->count; i++) {
2010 result = vega10_populate_single_dclock_level(hwmgr,
2011 dclk_dpm_table->dpm_levels[i].value,
2012 &(pp_table->DclkDid[i]));
2013 if (result)
2014 return result;
2015 }
2016
2017 j = i - 1;
2018 while (i < NUM_UVD_DPM_LEVELS) {
2019 result = vega10_populate_single_dclock_level(hwmgr,
2020 dclk_dpm_table->dpm_levels[j].value,
2021 &(pp_table->DclkDid[i]));
2022 if (result)
2023 return result;
2024 i++;
2025 }
2026
2027 for (i = 0; i < dep_table->count; i++) {
2028 if (dep_table->entries[i].vclk ==
2029 vclk_dpm_table->dpm_levels[i].value &&
2030 dep_table->entries[i].dclk ==
2031 dclk_dpm_table->dpm_levels[i].value)
2032 pp_table->UvdDpmVoltageIndex[i] =
2033 dep_table->entries[i].vddcInd;
2034 else
2035 return -1;
2036 }
2037
2038 j = i - 1;
2039 while (i < NUM_UVD_DPM_LEVELS) {
2040 pp_table->UvdDpmVoltageIndex[i] = dep_table->entries[j].vddcInd;
2041 i++;
2042 }
2043
2044 return 0;
2045}
2046
2047static int vega10_populate_clock_stretcher_table(struct pp_hwmgr *hwmgr)
2048{
690dc626 2049 struct vega10_hwmgr *data = hwmgr->backend;
f83a9991
EH
2050 PPTable_t *pp_table = &(data->smc_state_table.pp_table);
2051 struct phm_ppt_v2_information *table_info =
2052 (struct phm_ppt_v2_information *)(hwmgr->pptable);
2053 struct phm_ppt_v1_clock_voltage_dependency_table *dep_table =
2054 table_info->vdd_dep_on_sclk;
2055 uint32_t i;
2056
afc0255c 2057 for (i = 0; i < dep_table->count; i++) {
f83a9991 2058 pp_table->CksEnable[i] = dep_table->entries[i].cks_enable;
afc0255c
RZ
2059 pp_table->CksVidOffset[i] = (uint8_t)(dep_table->entries[i].cks_voffset
2060 * VOLTAGE_VID_OFFSET_SCALE2 / VOLTAGE_VID_OFFSET_SCALE1);
f83a9991
EH
2061 }
2062
2063 return 0;
2064}
2065
2066static int vega10_populate_avfs_parameters(struct pp_hwmgr *hwmgr)
2067{
690dc626 2068 struct vega10_hwmgr *data = hwmgr->backend;
f83a9991
EH
2069 PPTable_t *pp_table = &(data->smc_state_table.pp_table);
2070 struct phm_ppt_v2_information *table_info =
2071 (struct phm_ppt_v2_information *)(hwmgr->pptable);
2072 struct phm_ppt_v1_clock_voltage_dependency_table *dep_table =
2073 table_info->vdd_dep_on_sclk;
2074 struct pp_atomfwctrl_avfs_parameters avfs_params = {0};
2075 int result = 0;
2076 uint32_t i;
2077
2078 pp_table->MinVoltageVid = (uint8_t)0xff;
2079 pp_table->MaxVoltageVid = (uint8_t)0;
2080
2081 if (data->smu_features[GNLD_AVFS].supported) {
2082 result = pp_atomfwctrl_get_avfs_information(hwmgr, &avfs_params);
2083 if (!result) {
2084 pp_table->MinVoltageVid = (uint8_t)
f83a9991 2085 convert_to_vid((uint16_t)(avfs_params.ulMinVddc));
6524e494
RZ
2086 pp_table->MaxVoltageVid = (uint8_t)
2087 convert_to_vid((uint16_t)(avfs_params.ulMaxVddc));
2088
2089 pp_table->AConstant[0] = cpu_to_le32(avfs_params.ulMeanNsigmaAcontant0);
2090 pp_table->AConstant[1] = cpu_to_le32(avfs_params.ulMeanNsigmaAcontant1);
2091 pp_table->AConstant[2] = cpu_to_le32(avfs_params.ulMeanNsigmaAcontant2);
2092 pp_table->DC_tol_sigma = cpu_to_le16(avfs_params.usMeanNsigmaDcTolSigma);
2093 pp_table->Platform_mean = cpu_to_le16(avfs_params.usMeanNsigmaPlatformMean);
2094 pp_table->Platform_sigma = cpu_to_le16(avfs_params.usMeanNsigmaDcTolSigma);
2095 pp_table->PSM_Age_CompFactor = cpu_to_le16(avfs_params.usPsmAgeComfactor);
f83a9991
EH
2096
2097 pp_table->BtcGbVdroopTableCksOff.a0 =
2098 cpu_to_le32(avfs_params.ulGbVdroopTableCksoffA0);
6524e494 2099 pp_table->BtcGbVdroopTableCksOff.a0_shift = 20;
f83a9991
EH
2100 pp_table->BtcGbVdroopTableCksOff.a1 =
2101 cpu_to_le32(avfs_params.ulGbVdroopTableCksoffA1);
6524e494 2102 pp_table->BtcGbVdroopTableCksOff.a1_shift = 20;
f83a9991
EH
2103 pp_table->BtcGbVdroopTableCksOff.a2 =
2104 cpu_to_le32(avfs_params.ulGbVdroopTableCksoffA2);
6524e494
RZ
2105 pp_table->BtcGbVdroopTableCksOff.a2_shift = 20;
2106
2107 pp_table->OverrideBtcGbCksOn = avfs_params.ucEnableGbVdroopTableCkson;
2108 pp_table->BtcGbVdroopTableCksOn.a0 =
2109 cpu_to_le32(avfs_params.ulGbVdroopTableCksonA0);
2110 pp_table->BtcGbVdroopTableCksOn.a0_shift = 20;
2111 pp_table->BtcGbVdroopTableCksOn.a1 =
2112 cpu_to_le32(avfs_params.ulGbVdroopTableCksonA1);
2113 pp_table->BtcGbVdroopTableCksOn.a1_shift = 20;
2114 pp_table->BtcGbVdroopTableCksOn.a2 =
2115 cpu_to_le32(avfs_params.ulGbVdroopTableCksonA2);
2116 pp_table->BtcGbVdroopTableCksOn.a2_shift = 20;
f83a9991
EH
2117
2118 pp_table->AvfsGbCksOn.m1 =
2119 cpu_to_le32(avfs_params.ulGbFuseTableCksonM1);
2120 pp_table->AvfsGbCksOn.m2 =
040cd2d1 2121 cpu_to_le32(avfs_params.ulGbFuseTableCksonM2);
f83a9991
EH
2122 pp_table->AvfsGbCksOn.b =
2123 cpu_to_le32(avfs_params.ulGbFuseTableCksonB);
2124 pp_table->AvfsGbCksOn.m1_shift = 24;
2125 pp_table->AvfsGbCksOn.m2_shift = 12;
6524e494 2126 pp_table->AvfsGbCksOn.b_shift = 0;
f83a9991 2127
6524e494
RZ
2128 pp_table->OverrideAvfsGbCksOn =
2129 avfs_params.ucEnableGbFuseTableCkson;
f83a9991
EH
2130 pp_table->AvfsGbCksOff.m1 =
2131 cpu_to_le32(avfs_params.ulGbFuseTableCksoffM1);
2132 pp_table->AvfsGbCksOff.m2 =
040cd2d1 2133 cpu_to_le32(avfs_params.ulGbFuseTableCksoffM2);
f83a9991
EH
2134 pp_table->AvfsGbCksOff.b =
2135 cpu_to_le32(avfs_params.ulGbFuseTableCksoffB);
2136 pp_table->AvfsGbCksOff.m1_shift = 24;
2137 pp_table->AvfsGbCksOff.m2_shift = 12;
6524e494
RZ
2138 pp_table->AvfsGbCksOff.b_shift = 0;
2139
16d6e962
EH
2140 for (i = 0; i < dep_table->count; i++)
2141 pp_table->StaticVoltageOffsetVid[i] =
2142 convert_to_vid((uint8_t)(dep_table->entries[i].sclk_offset));
f83a9991
EH
2143
2144 if ((PPREGKEY_VEGA10QUADRATICEQUATION_DFLT !=
2145 data->disp_clk_quad_eqn_a) &&
2146 (PPREGKEY_VEGA10QUADRATICEQUATION_DFLT !=
2147 data->disp_clk_quad_eqn_b)) {
2148 pp_table->DisplayClock2Gfxclk[DSPCLK_DISPCLK].m1 =
2149 (int32_t)data->disp_clk_quad_eqn_a;
2150 pp_table->DisplayClock2Gfxclk[DSPCLK_DISPCLK].m2 =
6524e494 2151 (int32_t)data->disp_clk_quad_eqn_b;
f83a9991
EH
2152 pp_table->DisplayClock2Gfxclk[DSPCLK_DISPCLK].b =
2153 (int32_t)data->disp_clk_quad_eqn_c;
2154 } else {
2155 pp_table->DisplayClock2Gfxclk[DSPCLK_DISPCLK].m1 =
2156 (int32_t)avfs_params.ulDispclk2GfxclkM1;
2157 pp_table->DisplayClock2Gfxclk[DSPCLK_DISPCLK].m2 =
6524e494 2158 (int32_t)avfs_params.ulDispclk2GfxclkM2;
f83a9991
EH
2159 pp_table->DisplayClock2Gfxclk[DSPCLK_DISPCLK].b =
2160 (int32_t)avfs_params.ulDispclk2GfxclkB;
2161 }
2162
2163 pp_table->DisplayClock2Gfxclk[DSPCLK_DISPCLK].m1_shift = 24;
2164 pp_table->DisplayClock2Gfxclk[DSPCLK_DISPCLK].m2_shift = 12;
4bae05e1 2165 pp_table->DisplayClock2Gfxclk[DSPCLK_DISPCLK].b_shift = 12;
f83a9991
EH
2166
2167 if ((PPREGKEY_VEGA10QUADRATICEQUATION_DFLT !=
2168 data->dcef_clk_quad_eqn_a) &&
2169 (PPREGKEY_VEGA10QUADRATICEQUATION_DFLT !=
2170 data->dcef_clk_quad_eqn_b)) {
2171 pp_table->DisplayClock2Gfxclk[DSPCLK_DCEFCLK].m1 =
2172 (int32_t)data->dcef_clk_quad_eqn_a;
2173 pp_table->DisplayClock2Gfxclk[DSPCLK_DCEFCLK].m2 =
6524e494 2174 (int32_t)data->dcef_clk_quad_eqn_b;
f83a9991
EH
2175 pp_table->DisplayClock2Gfxclk[DSPCLK_DCEFCLK].b =
2176 (int32_t)data->dcef_clk_quad_eqn_c;
2177 } else {
2178 pp_table->DisplayClock2Gfxclk[DSPCLK_DCEFCLK].m1 =
2179 (int32_t)avfs_params.ulDcefclk2GfxclkM1;
2180 pp_table->DisplayClock2Gfxclk[DSPCLK_DCEFCLK].m2 =
6524e494 2181 (int32_t)avfs_params.ulDcefclk2GfxclkM2;
f83a9991
EH
2182 pp_table->DisplayClock2Gfxclk[DSPCLK_DCEFCLK].b =
2183 (int32_t)avfs_params.ulDcefclk2GfxclkB;
2184 }
2185
2186 pp_table->DisplayClock2Gfxclk[DSPCLK_DCEFCLK].m1_shift = 24;
2187 pp_table->DisplayClock2Gfxclk[DSPCLK_DCEFCLK].m2_shift = 12;
4bae05e1 2188 pp_table->DisplayClock2Gfxclk[DSPCLK_DCEFCLK].b_shift = 12;
f83a9991
EH
2189
2190 if ((PPREGKEY_VEGA10QUADRATICEQUATION_DFLT !=
2191 data->pixel_clk_quad_eqn_a) &&
2192 (PPREGKEY_VEGA10QUADRATICEQUATION_DFLT !=
2193 data->pixel_clk_quad_eqn_b)) {
2194 pp_table->DisplayClock2Gfxclk[DSPCLK_PIXCLK].m1 =
2195 (int32_t)data->pixel_clk_quad_eqn_a;
2196 pp_table->DisplayClock2Gfxclk[DSPCLK_PIXCLK].m2 =
6524e494 2197 (int32_t)data->pixel_clk_quad_eqn_b;
f83a9991
EH
2198 pp_table->DisplayClock2Gfxclk[DSPCLK_PIXCLK].b =
2199 (int32_t)data->pixel_clk_quad_eqn_c;
2200 } else {
2201 pp_table->DisplayClock2Gfxclk[DSPCLK_PIXCLK].m1 =
2202 (int32_t)avfs_params.ulPixelclk2GfxclkM1;
2203 pp_table->DisplayClock2Gfxclk[DSPCLK_PIXCLK].m2 =
6524e494 2204 (int32_t)avfs_params.ulPixelclk2GfxclkM2;
f83a9991
EH
2205 pp_table->DisplayClock2Gfxclk[DSPCLK_PIXCLK].b =
2206 (int32_t)avfs_params.ulPixelclk2GfxclkB;
2207 }
2208
2209 pp_table->DisplayClock2Gfxclk[DSPCLK_PIXCLK].m1_shift = 24;
2210 pp_table->DisplayClock2Gfxclk[DSPCLK_PIXCLK].m2_shift = 12;
4bae05e1 2211 pp_table->DisplayClock2Gfxclk[DSPCLK_PIXCLK].b_shift = 12;
f83a9991
EH
2212 if ((PPREGKEY_VEGA10QUADRATICEQUATION_DFLT !=
2213 data->phy_clk_quad_eqn_a) &&
2214 (PPREGKEY_VEGA10QUADRATICEQUATION_DFLT !=
2215 data->phy_clk_quad_eqn_b)) {
2216 pp_table->DisplayClock2Gfxclk[DSPCLK_PHYCLK].m1 =
2217 (int32_t)data->phy_clk_quad_eqn_a;
2218 pp_table->DisplayClock2Gfxclk[DSPCLK_PHYCLK].m2 =
6524e494 2219 (int32_t)data->phy_clk_quad_eqn_b;
f83a9991
EH
2220 pp_table->DisplayClock2Gfxclk[DSPCLK_PHYCLK].b =
2221 (int32_t)data->phy_clk_quad_eqn_c;
2222 } else {
2223 pp_table->DisplayClock2Gfxclk[DSPCLK_PHYCLK].m1 =
2224 (int32_t)avfs_params.ulPhyclk2GfxclkM1;
2225 pp_table->DisplayClock2Gfxclk[DSPCLK_PHYCLK].m2 =
6524e494 2226 (int32_t)avfs_params.ulPhyclk2GfxclkM2;
f83a9991
EH
2227 pp_table->DisplayClock2Gfxclk[DSPCLK_PHYCLK].b =
2228 (int32_t)avfs_params.ulPhyclk2GfxclkB;
2229 }
2230
2231 pp_table->DisplayClock2Gfxclk[DSPCLK_PHYCLK].m1_shift = 24;
2232 pp_table->DisplayClock2Gfxclk[DSPCLK_PHYCLK].m2_shift = 12;
4bae05e1 2233 pp_table->DisplayClock2Gfxclk[DSPCLK_PHYCLK].b_shift = 12;
bdb8cd10
RZ
2234
2235 pp_table->AcgBtcGbVdroopTable.a0 = avfs_params.ulAcgGbVdroopTableA0;
2236 pp_table->AcgBtcGbVdroopTable.a0_shift = 20;
2237 pp_table->AcgBtcGbVdroopTable.a1 = avfs_params.ulAcgGbVdroopTableA1;
2238 pp_table->AcgBtcGbVdroopTable.a1_shift = 20;
2239 pp_table->AcgBtcGbVdroopTable.a2 = avfs_params.ulAcgGbVdroopTableA2;
2240 pp_table->AcgBtcGbVdroopTable.a2_shift = 20;
2241
2242 pp_table->AcgAvfsGb.m1 = avfs_params.ulAcgGbFuseTableM1;
2243 pp_table->AcgAvfsGb.m2 = avfs_params.ulAcgGbFuseTableM2;
2244 pp_table->AcgAvfsGb.b = avfs_params.ulAcgGbFuseTableB;
2245 pp_table->AcgAvfsGb.m1_shift = 0;
2246 pp_table->AcgAvfsGb.m2_shift = 0;
2247 pp_table->AcgAvfsGb.b_shift = 0;
2248
f83a9991
EH
2249 } else {
2250 data->smu_features[GNLD_AVFS].supported = false;
2251 }
2252 }
2253
2254 return 0;
2255}
2256
bdb8cd10
RZ
2257static int vega10_acg_enable(struct pp_hwmgr *hwmgr)
2258{
690dc626 2259 struct vega10_hwmgr *data = hwmgr->backend;
bdb8cd10
RZ
2260 uint32_t agc_btc_response;
2261
2262 if (data->smu_features[GNLD_ACG].supported) {
d3f8c0ab 2263 if (0 == vega10_enable_smc_features(hwmgr, true,
bdb8cd10
RZ
2264 data->smu_features[GNLD_DPM_PREFETCHER].smu_feature_bitmap))
2265 data->smu_features[GNLD_DPM_PREFETCHER].enabled = true;
2266
d3f8c0ab 2267 smum_send_msg_to_smc(hwmgr, PPSMC_MSG_InitializeAcg);
bdb8cd10 2268
d3f8c0ab 2269 smum_send_msg_to_smc(hwmgr, PPSMC_MSG_RunAcgBtc);
3f9ca14a 2270 agc_btc_response = smum_get_argument(hwmgr);
bdb8cd10
RZ
2271
2272 if (1 == agc_btc_response) {
2273 if (1 == data->acg_loop_state)
d3f8c0ab 2274 smum_send_msg_to_smc(hwmgr, PPSMC_MSG_RunAcgInClosedLoop);
bdb8cd10 2275 else if (2 == data->acg_loop_state)
d3f8c0ab
RZ
2276 smum_send_msg_to_smc(hwmgr, PPSMC_MSG_RunAcgInOpenLoop);
2277 if (0 == vega10_enable_smc_features(hwmgr, true,
bdb8cd10
RZ
2278 data->smu_features[GNLD_ACG].smu_feature_bitmap))
2279 data->smu_features[GNLD_ACG].enabled = true;
2280 } else {
2281 pr_info("[ACG_Enable] ACG BTC Returned Failed Status!\n");
2282 data->smu_features[GNLD_ACG].enabled = false;
2283 }
2284 }
2285
2286 return 0;
2287}
2288
2289static int vega10_acg_disable(struct pp_hwmgr *hwmgr)
2290{
690dc626 2291 struct vega10_hwmgr *data = hwmgr->backend;
bdb8cd10 2292
06474d56
TSD
2293 if (data->smu_features[GNLD_ACG].supported &&
2294 data->smu_features[GNLD_ACG].enabled)
d3f8c0ab 2295 if (!vega10_enable_smc_features(hwmgr, false,
06474d56 2296 data->smu_features[GNLD_ACG].smu_feature_bitmap))
bdb8cd10 2297 data->smu_features[GNLD_ACG].enabled = false;
bdb8cd10
RZ
2298
2299 return 0;
2300}
2301
f83a9991
EH
2302static int vega10_populate_gpio_parameters(struct pp_hwmgr *hwmgr)
2303{
690dc626 2304 struct vega10_hwmgr *data = hwmgr->backend;
f83a9991
EH
2305 PPTable_t *pp_table = &(data->smc_state_table.pp_table);
2306 struct pp_atomfwctrl_gpio_parameters gpio_params = {0};
2307 int result;
2308
2309 result = pp_atomfwctrl_get_gpio_information(hwmgr, &gpio_params);
2310 if (!result) {
dd5a6fe2
TSD
2311 if (PP_CAP(PHM_PlatformCaps_RegulatorHot) &&
2312 data->registry_data.regulator_hot_gpio_support) {
f83a9991
EH
2313 pp_table->VR0HotGpio = gpio_params.ucVR0HotGpio;
2314 pp_table->VR0HotPolarity = gpio_params.ucVR0HotPolarity;
2315 pp_table->VR1HotGpio = gpio_params.ucVR1HotGpio;
2316 pp_table->VR1HotPolarity = gpio_params.ucVR1HotPolarity;
2317 } else {
2318 pp_table->VR0HotGpio = 0;
2319 pp_table->VR0HotPolarity = 0;
2320 pp_table->VR1HotGpio = 0;
2321 pp_table->VR1HotPolarity = 0;
2322 }
2323
dd5a6fe2
TSD
2324 if (PP_CAP(PHM_PlatformCaps_AutomaticDCTransition) &&
2325 data->registry_data.ac_dc_switch_gpio_support) {
f83a9991
EH
2326 pp_table->AcDcGpio = gpio_params.ucAcDcGpio;
2327 pp_table->AcDcPolarity = gpio_params.ucAcDcPolarity;
2328 } else {
2329 pp_table->AcDcGpio = 0;
2330 pp_table->AcDcPolarity = 0;
2331 }
2332 }
2333
2334 return result;
2335}
2336
2337static int vega10_avfs_enable(struct pp_hwmgr *hwmgr, bool enable)
2338{
690dc626 2339 struct vega10_hwmgr *data = hwmgr->backend;
f83a9991
EH
2340
2341 if (data->smu_features[GNLD_AVFS].supported) {
2342 if (enable) {
d3f8c0ab 2343 PP_ASSERT_WITH_CODE(!vega10_enable_smc_features(hwmgr,
f83a9991
EH
2344 true,
2345 data->smu_features[GNLD_AVFS].smu_feature_bitmap),
2346 "[avfs_control] Attempt to Enable AVFS feature Failed!",
2347 return -1);
2348 data->smu_features[GNLD_AVFS].enabled = true;
2349 } else {
d3f8c0ab 2350 PP_ASSERT_WITH_CODE(!vega10_enable_smc_features(hwmgr,
f83a9991 2351 false,
de196036 2352 data->smu_features[GNLD_AVFS].smu_feature_bitmap),
f83a9991
EH
2353 "[avfs_control] Attempt to Disable AVFS feature Failed!",
2354 return -1);
2355 data->smu_features[GNLD_AVFS].enabled = false;
2356 }
2357 }
2358
2359 return 0;
2360}
2361
c5a44849
RZ
2362static int vega10_update_avfs(struct pp_hwmgr *hwmgr)
2363{
2364 struct vega10_hwmgr *data = hwmgr->backend;
2365
2366 if (data->need_update_dpm_table & DPMTABLE_OD_UPDATE_VDDC) {
2367 vega10_avfs_enable(hwmgr, false);
2368 } else if (data->need_update_dpm_table) {
2369 vega10_avfs_enable(hwmgr, false);
2370 vega10_avfs_enable(hwmgr, true);
2371 } else {
2372 vega10_avfs_enable(hwmgr, true);
2373 }
2374
2375 return 0;
2376}
2377
ab5cf3a5
RZ
2378static int vega10_populate_and_upload_avfs_fuse_override(struct pp_hwmgr *hwmgr)
2379{
2380 int result = 0;
2381
2382 uint64_t serial_number = 0;
2383 uint32_t top32, bottom32;
2384 struct phm_fuses_default fuse;
2385
690dc626 2386 struct vega10_hwmgr *data = hwmgr->backend;
ab5cf3a5
RZ
2387 AvfsFuseOverride_t *avfs_fuse_table = &(data->smc_state_table.avfs_fuse_override_table);
2388
d3f8c0ab 2389 smum_send_msg_to_smc(hwmgr, PPSMC_MSG_ReadSerialNumTop32);
3f9ca14a 2390 top32 = smum_get_argument(hwmgr);
ab5cf3a5 2391
d3f8c0ab 2392 smum_send_msg_to_smc(hwmgr, PPSMC_MSG_ReadSerialNumBottom32);
3f9ca14a 2393 bottom32 = smum_get_argument(hwmgr);
ab5cf3a5
RZ
2394
2395 serial_number = ((uint64_t)bottom32 << 32) | top32;
2396
819c4b94 2397 if (pp_override_get_default_fuse_value(serial_number, &fuse) == 0) {
ab5cf3a5
RZ
2398 avfs_fuse_table->VFT0_b = fuse.VFT0_b;
2399 avfs_fuse_table->VFT0_m1 = fuse.VFT0_m1;
2400 avfs_fuse_table->VFT0_m2 = fuse.VFT0_m2;
2401 avfs_fuse_table->VFT1_b = fuse.VFT1_b;
2402 avfs_fuse_table->VFT1_m1 = fuse.VFT1_m1;
2403 avfs_fuse_table->VFT1_m2 = fuse.VFT1_m2;
2404 avfs_fuse_table->VFT2_b = fuse.VFT2_b;
2405 avfs_fuse_table->VFT2_m1 = fuse.VFT2_m1;
2406 avfs_fuse_table->VFT2_m2 = fuse.VFT2_m2;
3f9ca14a
RZ
2407 result = smum_smc_table_manager(hwmgr, (uint8_t *)avfs_fuse_table,
2408 AVFSFUSETABLE, false);
ab5cf3a5
RZ
2409 PP_ASSERT_WITH_CODE(!result,
2410 "Failed to upload FuseOVerride!",
2411 );
2412 }
2413
2414 return result;
2415}
2416
ecfee95a
RZ
2417static void vega10_check_dpm_table_updated(struct pp_hwmgr *hwmgr)
2418{
2419 struct vega10_hwmgr *data = hwmgr->backend;
2420 struct vega10_odn_dpm_table *odn_table = &(data->odn_dpm_table);
2421 struct phm_ppt_v2_information *table_info = hwmgr->pptable;
2422 struct phm_ppt_v1_clock_voltage_dependency_table *dep_table;
2423 struct phm_ppt_v1_clock_voltage_dependency_table *odn_dep_table;
2424 uint32_t i;
2425
2426 dep_table = table_info->vdd_dep_on_mclk;
2427 odn_dep_table = (struct phm_ppt_v1_clock_voltage_dependency_table *)&(odn_table->vdd_dep_on_mclk);
2428
2429 for (i = 0; i < dep_table->count; i++) {
2430 if (dep_table->entries[i].vddc != odn_dep_table->entries[i].vddc) {
2431 data->need_update_dpm_table |= DPMTABLE_OD_UPDATE_VDDC | DPMTABLE_OD_UPDATE_MCLK;
2432 return;
2433 }
2434 }
2435
2436 dep_table = table_info->vdd_dep_on_sclk;
2437 odn_dep_table = (struct phm_ppt_v1_clock_voltage_dependency_table *)&(odn_table->vdd_dep_on_sclk);
2438 for (i = 0; i < dep_table->count; i++) {
2439 if (dep_table->entries[i].vddc != odn_dep_table->entries[i].vddc) {
2440 data->need_update_dpm_table |= DPMTABLE_OD_UPDATE_VDDC | DPMTABLE_OD_UPDATE_SCLK;
2441 return;
2442 }
2443 }
2444
2445 if (data->need_update_dpm_table & DPMTABLE_OD_UPDATE_VDDC) {
2446 data->need_update_dpm_table &= ~DPMTABLE_OD_UPDATE_VDDC;
2447 data->need_update_dpm_table |= DPMTABLE_OD_UPDATE_SCLK | DPMTABLE_OD_UPDATE_MCLK;
2448 }
2449}
2450
f83a9991
EH
2451/**
2452* Initializes the SMC table and uploads it
2453*
2454* @param hwmgr the address of the powerplay hardware manager.
2455* @param pInput the pointer to input data (PowerState)
2456* @return always 0
2457*/
2458static int vega10_init_smc_table(struct pp_hwmgr *hwmgr)
2459{
2460 int result;
690dc626 2461 struct vega10_hwmgr *data = hwmgr->backend;
f83a9991
EH
2462 struct phm_ppt_v2_information *table_info =
2463 (struct phm_ppt_v2_information *)(hwmgr->pptable);
2464 PPTable_t *pp_table = &(data->smc_state_table.pp_table);
2465 struct pp_atomfwctrl_voltage_table voltage_table;
05ee3215 2466 struct pp_atomfwctrl_bios_boot_up_values boot_up_values;
ecfee95a 2467 struct vega10_odn_dpm_table *odn_table = &(data->odn_dpm_table);
f83a9991
EH
2468
2469 result = vega10_setup_default_dpm_tables(hwmgr);
2470 PP_ASSERT_WITH_CODE(!result,
2471 "Failed to setup default DPM tables!",
2472 return result);
2473
c5a44849 2474 /* initialize ODN table */
ecfee95a
RZ
2475 if (hwmgr->od_enabled) {
2476 if (odn_table->max_vddc) {
2477 data->need_update_dpm_table |= DPMTABLE_OD_UPDATE_SCLK | DPMTABLE_OD_UPDATE_MCLK;
2478 vega10_check_dpm_table_updated(hwmgr);
2479 } else {
2480 vega10_odn_initial_default_setting(hwmgr);
2481 }
2482 }
c5a44849 2483
f83a9991
EH
2484 pp_atomfwctrl_get_voltage_table_v4(hwmgr, VOLTAGE_TYPE_VDDC,
2485 VOLTAGE_OBJ_SVID2, &voltage_table);
2486 pp_table->MaxVidStep = voltage_table.max_vid_step;
2487
2488 pp_table->GfxDpmVoltageMode =
2489 (uint8_t)(table_info->uc_gfx_dpm_voltage_mode);
2490 pp_table->SocDpmVoltageMode =
2491 (uint8_t)(table_info->uc_soc_dpm_voltage_mode);
2492 pp_table->UclkDpmVoltageMode =
2493 (uint8_t)(table_info->uc_uclk_dpm_voltage_mode);
2494 pp_table->UvdDpmVoltageMode =
2495 (uint8_t)(table_info->uc_uvd_dpm_voltage_mode);
2496 pp_table->VceDpmVoltageMode =
2497 (uint8_t)(table_info->uc_vce_dpm_voltage_mode);
2498 pp_table->Mp0DpmVoltageMode =
2499 (uint8_t)(table_info->uc_mp0_dpm_voltage_mode);
effa290c 2500
f83a9991
EH
2501 pp_table->DisplayDpmVoltageMode =
2502 (uint8_t)(table_info->uc_dcef_dpm_voltage_mode);
2503
becdaf3f
RZ
2504 data->vddc_voltage_table.psi0_enable = voltage_table.psi0_enable;
2505 data->vddc_voltage_table.psi1_enable = voltage_table.psi1_enable;
2506
f83a9991
EH
2507 if (data->registry_data.ulv_support &&
2508 table_info->us_ulv_voltage_offset) {
2509 result = vega10_populate_ulv_state(hwmgr);
2510 PP_ASSERT_WITH_CODE(!result,
2511 "Failed to initialize ULV state!",
2512 return result);
2513 }
2514
2515 result = vega10_populate_smc_link_levels(hwmgr);
2516 PP_ASSERT_WITH_CODE(!result,
2517 "Failed to initialize Link Level!",
2518 return result);
2519
2520 result = vega10_populate_all_graphic_levels(hwmgr);
2521 PP_ASSERT_WITH_CODE(!result,
2522 "Failed to initialize Graphics Level!",
2523 return result);
2524
2525 result = vega10_populate_all_memory_levels(hwmgr);
2526 PP_ASSERT_WITH_CODE(!result,
2527 "Failed to initialize Memory Level!",
2528 return result);
2529
c5a44849
RZ
2530 vega10_populate_vddc_soc_levels(hwmgr);
2531
f83a9991
EH
2532 result = vega10_populate_all_display_clock_levels(hwmgr);
2533 PP_ASSERT_WITH_CODE(!result,
2534 "Failed to initialize Display Level!",
2535 return result);
2536
2537 result = vega10_populate_smc_vce_levels(hwmgr);
2538 PP_ASSERT_WITH_CODE(!result,
2539 "Failed to initialize VCE Level!",
2540 return result);
2541
2542 result = vega10_populate_smc_uvd_levels(hwmgr);
2543 PP_ASSERT_WITH_CODE(!result,
2544 "Failed to initialize UVD Level!",
2545 return result);
2546
afc0255c 2547 if (data->registry_data.clock_stretcher_support) {
f83a9991
EH
2548 result = vega10_populate_clock_stretcher_table(hwmgr);
2549 PP_ASSERT_WITH_CODE(!result,
2550 "Failed to populate Clock Stretcher Table!",
2551 return result);
2552 }
2553
05ee3215
RZ
2554 result = pp_atomfwctrl_get_vbios_bootup_values(hwmgr, &boot_up_values);
2555 if (!result) {
2556 data->vbios_boot_state.vddc = boot_up_values.usVddc;
2557 data->vbios_boot_state.vddci = boot_up_values.usVddci;
2558 data->vbios_boot_state.mvddc = boot_up_values.usMvddc;
2559 data->vbios_boot_state.gfx_clock = boot_up_values.ulGfxClk;
2560 data->vbios_boot_state.mem_clock = boot_up_values.ulUClk;
f73f9e35
RZ
2561 pp_atomfwctrl_get_clk_information_by_clkid(hwmgr,
2562 SMU9_SYSPLL0_SOCCLK_ID, &boot_up_values.ulSocClk);
2563
2564 pp_atomfwctrl_get_clk_information_by_clkid(hwmgr,
2565 SMU9_SYSPLL0_DCEFCLK_ID, &boot_up_values.ulDCEFClk);
2566
05ee3215 2567 data->vbios_boot_state.soc_clock = boot_up_values.ulSocClk;
c5b053d2 2568 data->vbios_boot_state.dcef_clock = boot_up_values.ulDCEFClk;
05ee3215 2569 if (0 != boot_up_values.usVddc) {
d3f8c0ab 2570 smum_send_msg_to_smc_with_parameter(hwmgr,
05ee3215
RZ
2571 PPSMC_MSG_SetFloorSocVoltage,
2572 (boot_up_values.usVddc * 4));
2573 data->vbios_boot_state.bsoc_vddc_lock = true;
2574 } else {
2575 data->vbios_boot_state.bsoc_vddc_lock = false;
2576 }
d3f8c0ab 2577 smum_send_msg_to_smc_with_parameter(hwmgr,
c5b053d2
RZ
2578 PPSMC_MSG_SetMinDeepSleepDcefclk,
2579 (uint32_t)(data->vbios_boot_state.dcef_clock / 100));
05ee3215
RZ
2580 }
2581
f83a9991
EH
2582 result = vega10_populate_avfs_parameters(hwmgr);
2583 PP_ASSERT_WITH_CODE(!result,
2584 "Failed to initialize AVFS Parameters!",
2585 return result);
2586
2587 result = vega10_populate_gpio_parameters(hwmgr);
2588 PP_ASSERT_WITH_CODE(!result,
2589 "Failed to initialize GPIO Parameters!",
2590 return result);
2591
2592 pp_table->GfxclkAverageAlpha = (uint8_t)
2593 (data->gfxclk_average_alpha);
2594 pp_table->SocclkAverageAlpha = (uint8_t)
2595 (data->socclk_average_alpha);
2596 pp_table->UclkAverageAlpha = (uint8_t)
2597 (data->uclk_average_alpha);
2598 pp_table->GfxActivityAverageAlpha = (uint8_t)
2599 (data->gfx_activity_average_alpha);
2600
ab5cf3a5
RZ
2601 vega10_populate_and_upload_avfs_fuse_override(hwmgr);
2602
3f9ca14a
RZ
2603 result = smum_smc_table_manager(hwmgr, (uint8_t *)pp_table, PPTABLE, false);
2604
f83a9991
EH
2605 PP_ASSERT_WITH_CODE(!result,
2606 "Failed to upload PPtable!", return result);
2607
2211a787
RZ
2608 result = vega10_avfs_enable(hwmgr, true);
2609 PP_ASSERT_WITH_CODE(!result, "Attempt to enable AVFS feature Failed!",
f83a9991 2610 return result);
bdb8cd10 2611 vega10_acg_enable(hwmgr);
d6c025d2 2612
f83a9991
EH
2613 return 0;
2614}
2615
2616static int vega10_enable_thermal_protection(struct pp_hwmgr *hwmgr)
2617{
690dc626 2618 struct vega10_hwmgr *data = hwmgr->backend;
f83a9991
EH
2619
2620 if (data->smu_features[GNLD_THERMAL].supported) {
2621 if (data->smu_features[GNLD_THERMAL].enabled)
2622 pr_info("THERMAL Feature Already enabled!");
2623
2624 PP_ASSERT_WITH_CODE(
d3f8c0ab 2625 !vega10_enable_smc_features(hwmgr,
f83a9991
EH
2626 true,
2627 data->smu_features[GNLD_THERMAL].smu_feature_bitmap),
2628 "Enable THERMAL Feature Failed!",
2629 return -1);
2630 data->smu_features[GNLD_THERMAL].enabled = true;
2631 }
2632
2633 return 0;
2634}
2635
8b9242ed
RZ
2636static int vega10_disable_thermal_protection(struct pp_hwmgr *hwmgr)
2637{
690dc626 2638 struct vega10_hwmgr *data = hwmgr->backend;
8b9242ed
RZ
2639
2640 if (data->smu_features[GNLD_THERMAL].supported) {
2641 if (!data->smu_features[GNLD_THERMAL].enabled)
2642 pr_info("THERMAL Feature Already disabled!");
2643
2644 PP_ASSERT_WITH_CODE(
d3f8c0ab 2645 !vega10_enable_smc_features(hwmgr,
8b9242ed
RZ
2646 false,
2647 data->smu_features[GNLD_THERMAL].smu_feature_bitmap),
2648 "disable THERMAL Feature Failed!",
2649 return -1);
2650 data->smu_features[GNLD_THERMAL].enabled = false;
2651 }
2652
2653 return 0;
2654}
2655
f83a9991
EH
2656static int vega10_enable_vrhot_feature(struct pp_hwmgr *hwmgr)
2657{
690dc626 2658 struct vega10_hwmgr *data = hwmgr->backend;
f83a9991 2659
dd5a6fe2 2660 if (PP_CAP(PHM_PlatformCaps_RegulatorHot)) {
f83a9991
EH
2661 if (data->smu_features[GNLD_VR0HOT].supported) {
2662 PP_ASSERT_WITH_CODE(
d3f8c0ab 2663 !vega10_enable_smc_features(hwmgr,
f83a9991
EH
2664 true,
2665 data->smu_features[GNLD_VR0HOT].smu_feature_bitmap),
2666 "Attempt to Enable VR0 Hot feature Failed!",
2667 return -1);
2668 data->smu_features[GNLD_VR0HOT].enabled = true;
2669 } else {
2670 if (data->smu_features[GNLD_VR1HOT].supported) {
2671 PP_ASSERT_WITH_CODE(
d3f8c0ab 2672 !vega10_enable_smc_features(hwmgr,
f83a9991
EH
2673 true,
2674 data->smu_features[GNLD_VR1HOT].smu_feature_bitmap),
2675 "Attempt to Enable VR0 Hot feature Failed!",
2676 return -1);
2677 data->smu_features[GNLD_VR1HOT].enabled = true;
2678 }
2679 }
2680 }
2681 return 0;
2682}
2683
2684static int vega10_enable_ulv(struct pp_hwmgr *hwmgr)
2685{
690dc626 2686 struct vega10_hwmgr *data = hwmgr->backend;
f83a9991
EH
2687
2688 if (data->registry_data.ulv_support) {
d3f8c0ab 2689 PP_ASSERT_WITH_CODE(!vega10_enable_smc_features(hwmgr,
f83a9991
EH
2690 true, data->smu_features[GNLD_ULV].smu_feature_bitmap),
2691 "Enable ULV Feature Failed!",
2692 return -1);
2693 data->smu_features[GNLD_ULV].enabled = true;
2694 }
2695
2696 return 0;
2697}
2698
4022e4f2
RZ
2699static int vega10_disable_ulv(struct pp_hwmgr *hwmgr)
2700{
690dc626 2701 struct vega10_hwmgr *data = hwmgr->backend;
4022e4f2
RZ
2702
2703 if (data->registry_data.ulv_support) {
d3f8c0ab 2704 PP_ASSERT_WITH_CODE(!vega10_enable_smc_features(hwmgr,
4022e4f2
RZ
2705 false, data->smu_features[GNLD_ULV].smu_feature_bitmap),
2706 "disable ULV Feature Failed!",
2707 return -EINVAL);
2708 data->smu_features[GNLD_ULV].enabled = false;
2709 }
2710
2711 return 0;
2712}
2713
f83a9991
EH
2714static int vega10_enable_deep_sleep_master_switch(struct pp_hwmgr *hwmgr)
2715{
690dc626 2716 struct vega10_hwmgr *data = hwmgr->backend;
f83a9991
EH
2717
2718 if (data->smu_features[GNLD_DS_GFXCLK].supported) {
d3f8c0ab 2719 PP_ASSERT_WITH_CODE(!vega10_enable_smc_features(hwmgr,
f83a9991
EH
2720 true, data->smu_features[GNLD_DS_GFXCLK].smu_feature_bitmap),
2721 "Attempt to Enable DS_GFXCLK Feature Failed!",
df057e02 2722 return -EINVAL);
f83a9991
EH
2723 data->smu_features[GNLD_DS_GFXCLK].enabled = true;
2724 }
2725
2726 if (data->smu_features[GNLD_DS_SOCCLK].supported) {
d3f8c0ab 2727 PP_ASSERT_WITH_CODE(!vega10_enable_smc_features(hwmgr,
f83a9991 2728 true, data->smu_features[GNLD_DS_SOCCLK].smu_feature_bitmap),
df057e02
RZ
2729 "Attempt to Enable DS_SOCCLK Feature Failed!",
2730 return -EINVAL);
f83a9991
EH
2731 data->smu_features[GNLD_DS_SOCCLK].enabled = true;
2732 }
2733
2734 if (data->smu_features[GNLD_DS_LCLK].supported) {
d3f8c0ab 2735 PP_ASSERT_WITH_CODE(!vega10_enable_smc_features(hwmgr,
f83a9991 2736 true, data->smu_features[GNLD_DS_LCLK].smu_feature_bitmap),
df057e02
RZ
2737 "Attempt to Enable DS_LCLK Feature Failed!",
2738 return -EINVAL);
f83a9991
EH
2739 data->smu_features[GNLD_DS_LCLK].enabled = true;
2740 }
2741
df057e02 2742 if (data->smu_features[GNLD_DS_DCEFCLK].supported) {
d3f8c0ab 2743 PP_ASSERT_WITH_CODE(!vega10_enable_smc_features(hwmgr,
df057e02
RZ
2744 true, data->smu_features[GNLD_DS_DCEFCLK].smu_feature_bitmap),
2745 "Attempt to Enable DS_DCEFCLK Feature Failed!",
2746 return -EINVAL);
2747 data->smu_features[GNLD_DS_DCEFCLK].enabled = true;
2748 }
2749
2750 return 0;
2751}
2752
2753static int vega10_disable_deep_sleep_master_switch(struct pp_hwmgr *hwmgr)
2754{
690dc626 2755 struct vega10_hwmgr *data = hwmgr->backend;
df057e02
RZ
2756
2757 if (data->smu_features[GNLD_DS_GFXCLK].supported) {
d3f8c0ab 2758 PP_ASSERT_WITH_CODE(!vega10_enable_smc_features(hwmgr,
df057e02
RZ
2759 false, data->smu_features[GNLD_DS_GFXCLK].smu_feature_bitmap),
2760 "Attempt to disable DS_GFXCLK Feature Failed!",
2761 return -EINVAL);
2762 data->smu_features[GNLD_DS_GFXCLK].enabled = false;
2763 }
2764
2765 if (data->smu_features[GNLD_DS_SOCCLK].supported) {
d3f8c0ab 2766 PP_ASSERT_WITH_CODE(!vega10_enable_smc_features(hwmgr,
df057e02
RZ
2767 false, data->smu_features[GNLD_DS_SOCCLK].smu_feature_bitmap),
2768 "Attempt to disable DS_ Feature Failed!",
2769 return -EINVAL);
2770 data->smu_features[GNLD_DS_SOCCLK].enabled = false;
2771 }
2772
2773 if (data->smu_features[GNLD_DS_LCLK].supported) {
d3f8c0ab 2774 PP_ASSERT_WITH_CODE(!vega10_enable_smc_features(hwmgr,
df057e02
RZ
2775 false, data->smu_features[GNLD_DS_LCLK].smu_feature_bitmap),
2776 "Attempt to disable DS_LCLK Feature Failed!",
2777 return -EINVAL);
2778 data->smu_features[GNLD_DS_LCLK].enabled = false;
2779 }
2780
2781 if (data->smu_features[GNLD_DS_DCEFCLK].supported) {
d3f8c0ab 2782 PP_ASSERT_WITH_CODE(!vega10_enable_smc_features(hwmgr,
df057e02
RZ
2783 false, data->smu_features[GNLD_DS_DCEFCLK].smu_feature_bitmap),
2784 "Attempt to disable DS_DCEFCLK Feature Failed!",
2785 return -EINVAL);
2786 data->smu_features[GNLD_DS_DCEFCLK].enabled = false;
2787 }
2788
f83a9991
EH
2789 return 0;
2790}
2791
8b9242ed
RZ
2792static int vega10_stop_dpm(struct pp_hwmgr *hwmgr, uint32_t bitmap)
2793{
690dc626 2794 struct vega10_hwmgr *data = hwmgr->backend;
8b9242ed
RZ
2795 uint32_t i, feature_mask = 0;
2796
2797
2798 if(data->smu_features[GNLD_LED_DISPLAY].supported == true){
d3f8c0ab 2799 PP_ASSERT_WITH_CODE(!vega10_enable_smc_features(hwmgr,
f06fed92
RZ
2800 false, data->smu_features[GNLD_LED_DISPLAY].smu_feature_bitmap),
2801 "Attempt to disable LED DPM feature failed!", return -EINVAL);
2802 data->smu_features[GNLD_LED_DISPLAY].enabled = false;
8b9242ed
RZ
2803 }
2804
2805 for (i = 0; i < GNLD_DPM_MAX; i++) {
2806 if (data->smu_features[i].smu_feature_bitmap & bitmap) {
2807 if (data->smu_features[i].supported) {
2808 if (data->smu_features[i].enabled) {
2809 feature_mask |= data->smu_features[i].
2810 smu_feature_bitmap;
2811 data->smu_features[i].enabled = false;
2812 }
2813 }
2814 }
2815 }
2816
d3f8c0ab 2817 vega10_enable_smc_features(hwmgr, false, feature_mask);
8b9242ed
RZ
2818
2819 return 0;
2820}
2821
f83a9991
EH
2822/**
2823 * @brief Tell SMC to enabled the supported DPMs.
2824 *
2825 * @param hwmgr - the address of the powerplay hardware manager.
2826 * @Param bitmap - bitmap for the features to enabled.
2827 * @return 0 on at least one DPM is successfully enabled.
2828 */
2829static int vega10_start_dpm(struct pp_hwmgr *hwmgr, uint32_t bitmap)
2830{
690dc626 2831 struct vega10_hwmgr *data = hwmgr->backend;
f83a9991
EH
2832 uint32_t i, feature_mask = 0;
2833
2834 for (i = 0; i < GNLD_DPM_MAX; i++) {
2835 if (data->smu_features[i].smu_feature_bitmap & bitmap) {
2836 if (data->smu_features[i].supported) {
2837 if (!data->smu_features[i].enabled) {
2838 feature_mask |= data->smu_features[i].
2839 smu_feature_bitmap;
2840 data->smu_features[i].enabled = true;
2841 }
2842 }
2843 }
2844 }
2845
d3f8c0ab 2846 if (vega10_enable_smc_features(hwmgr,
f83a9991
EH
2847 true, feature_mask)) {
2848 for (i = 0; i < GNLD_DPM_MAX; i++) {
2849 if (data->smu_features[i].smu_feature_bitmap &
2850 feature_mask)
2851 data->smu_features[i].enabled = false;
2852 }
2853 }
2854
2855 if(data->smu_features[GNLD_LED_DISPLAY].supported == true){
d3f8c0ab 2856 PP_ASSERT_WITH_CODE(!vega10_enable_smc_features(hwmgr,
f83a9991
EH
2857 true, data->smu_features[GNLD_LED_DISPLAY].smu_feature_bitmap),
2858 "Attempt to Enable LED DPM feature Failed!", return -EINVAL);
2859 data->smu_features[GNLD_LED_DISPLAY].enabled = true;
2860 }
2861
05ee3215 2862 if (data->vbios_boot_state.bsoc_vddc_lock) {
d3f8c0ab 2863 smum_send_msg_to_smc_with_parameter(hwmgr,
05ee3215
RZ
2864 PPSMC_MSG_SetFloorSocVoltage, 0);
2865 data->vbios_boot_state.bsoc_vddc_lock = false;
2866 }
2867
dd5a6fe2 2868 if (PP_CAP(PHM_PlatformCaps_Falcon_QuickTransition)) {
f83a9991 2869 if (data->smu_features[GNLD_ACDC].supported) {
d3f8c0ab 2870 PP_ASSERT_WITH_CODE(!vega10_enable_smc_features(hwmgr,
f83a9991
EH
2871 true, data->smu_features[GNLD_ACDC].smu_feature_bitmap),
2872 "Attempt to Enable DS_GFXCLK Feature Failed!",
2873 return -1);
2874 data->smu_features[GNLD_ACDC].enabled = true;
2875 }
2876 }
2877
2878 return 0;
2879}
2880
15826fbf
RZ
2881static int vega10_enable_disable_PCC_limit_feature(struct pp_hwmgr *hwmgr, bool enable)
2882{
690dc626 2883 struct vega10_hwmgr *data = hwmgr->backend;
15826fbf
RZ
2884
2885 if (data->smu_features[GNLD_PCC_LIMIT].supported) {
2886 if (enable == data->smu_features[GNLD_PCC_LIMIT].enabled)
2887 pr_info("GNLD_PCC_LIMIT has been %s \n", enable ? "enabled" : "disabled");
2888 PP_ASSERT_WITH_CODE(!vega10_enable_smc_features(hwmgr,
2889 enable, data->smu_features[GNLD_PCC_LIMIT].smu_feature_bitmap),
2890 "Attempt to Enable PCC Limit feature Failed!",
2891 return -EINVAL);
2892 data->smu_features[GNLD_PCC_LIMIT].enabled = enable;
2893 }
2894
2895 return 0;
2896}
2897
f83a9991
EH
2898static int vega10_enable_dpm_tasks(struct pp_hwmgr *hwmgr)
2899{
690dc626 2900 struct vega10_hwmgr *data = hwmgr->backend;
f83a9991
EH
2901 int tmp_result, result = 0;
2902
15826fbf
RZ
2903 vega10_enable_disable_PCC_limit_feature(hwmgr, true);
2904
e21148ec
RZ
2905 smum_send_msg_to_smc_with_parameter(hwmgr,
2906 PPSMC_MSG_ConfigureTelemetry, data->config_telemetry);
2907
f83a9991
EH
2908 tmp_result = vega10_construct_voltage_tables(hwmgr);
2909 PP_ASSERT_WITH_CODE(!tmp_result,
3d3c4f1b 2910 "Failed to construct voltage tables!",
f83a9991
EH
2911 result = tmp_result);
2912
2913 tmp_result = vega10_init_smc_table(hwmgr);
2914 PP_ASSERT_WITH_CODE(!tmp_result,
2915 "Failed to initialize SMC table!",
2916 result = tmp_result);
2917
dd5a6fe2 2918 if (PP_CAP(PHM_PlatformCaps_ThermalController)) {
f83a9991
EH
2919 tmp_result = vega10_enable_thermal_protection(hwmgr);
2920 PP_ASSERT_WITH_CODE(!tmp_result,
2921 "Failed to enable thermal protection!",
2922 result = tmp_result);
2923 }
2924
2925 tmp_result = vega10_enable_vrhot_feature(hwmgr);
2926 PP_ASSERT_WITH_CODE(!tmp_result,
2927 "Failed to enable VR hot feature!",
2928 result = tmp_result);
2929
f83a9991
EH
2930 tmp_result = vega10_enable_deep_sleep_master_switch(hwmgr);
2931 PP_ASSERT_WITH_CODE(!tmp_result,
2932 "Failed to enable deep sleep master switch!",
2933 result = tmp_result);
2934
2935 tmp_result = vega10_start_dpm(hwmgr, SMC_DPM_FEATURES);
2936 PP_ASSERT_WITH_CODE(!tmp_result,
2937 "Failed to start DPM!", result = tmp_result);
2938
9b7b8154
EQ
2939 /* enable didt, do not abort if failed didt */
2940 tmp_result = vega10_enable_didt_config(hwmgr);
2941 PP_ASSERT(!tmp_result,
2942 "Failed to enable didt config!");
2943
f83a9991
EH
2944 tmp_result = vega10_enable_power_containment(hwmgr);
2945 PP_ASSERT_WITH_CODE(!tmp_result,
2946 "Failed to enable power containment!",
2947 result = tmp_result);
2948
2949 tmp_result = vega10_power_control_set_level(hwmgr);
2950 PP_ASSERT_WITH_CODE(!tmp_result,
2951 "Failed to power control set level!",
2952 result = tmp_result);
2953
4022e4f2
RZ
2954 tmp_result = vega10_enable_ulv(hwmgr);
2955 PP_ASSERT_WITH_CODE(!tmp_result,
2956 "Failed to enable ULV!",
2957 result = tmp_result);
2958
f83a9991
EH
2959 return result;
2960}
2961
2962static int vega10_get_power_state_size(struct pp_hwmgr *hwmgr)
2963{
2964 return sizeof(struct vega10_power_state);
2965}
2966
2967static int vega10_get_pp_table_entry_callback_func(struct pp_hwmgr *hwmgr,
2968 void *state, struct pp_power_state *power_state,
2969 void *pp_table, uint32_t classification_flag)
2970{
ebc1c9c1 2971 ATOM_Vega10_GFXCLK_Dependency_Record_V2 *patom_record_V2;
f83a9991
EH
2972 struct vega10_power_state *vega10_power_state =
2973 cast_phw_vega10_power_state(&(power_state->hardware));
2974 struct vega10_performance_level *performance_level;
2975 ATOM_Vega10_State *state_entry = (ATOM_Vega10_State *)state;
2976 ATOM_Vega10_POWERPLAYTABLE *powerplay_table =
2977 (ATOM_Vega10_POWERPLAYTABLE *)pp_table;
2978 ATOM_Vega10_SOCCLK_Dependency_Table *socclk_dep_table =
2979 (ATOM_Vega10_SOCCLK_Dependency_Table *)
2980 (((unsigned long)powerplay_table) +
2981 le16_to_cpu(powerplay_table->usSocclkDependencyTableOffset));
2982 ATOM_Vega10_GFXCLK_Dependency_Table *gfxclk_dep_table =
2983 (ATOM_Vega10_GFXCLK_Dependency_Table *)
2984 (((unsigned long)powerplay_table) +
2985 le16_to_cpu(powerplay_table->usGfxclkDependencyTableOffset));
2986 ATOM_Vega10_MCLK_Dependency_Table *mclk_dep_table =
2987 (ATOM_Vega10_MCLK_Dependency_Table *)
2988 (((unsigned long)powerplay_table) +
2989 le16_to_cpu(powerplay_table->usMclkDependencyTableOffset));
2990
2991
2992 /* The following fields are not initialized here:
2993 * id orderedList allStatesList
2994 */
2995 power_state->classification.ui_label =
2996 (le16_to_cpu(state_entry->usClassification) &
2997 ATOM_PPLIB_CLASSIFICATION_UI_MASK) >>
2998 ATOM_PPLIB_CLASSIFICATION_UI_SHIFT;
2999 power_state->classification.flags = classification_flag;
3000 /* NOTE: There is a classification2 flag in BIOS
3001 * that is not being used right now
3002 */
3003 power_state->classification.temporary_state = false;
3004 power_state->classification.to_be_deleted = false;
3005
3006 power_state->validation.disallowOnDC =
3007 ((le32_to_cpu(state_entry->ulCapsAndSettings) &
3008 ATOM_Vega10_DISALLOW_ON_DC) != 0);
3009
3010 power_state->display.disableFrameModulation = false;
3011 power_state->display.limitRefreshrate = false;
3012 power_state->display.enableVariBright =
3013 ((le32_to_cpu(state_entry->ulCapsAndSettings) &
3014 ATOM_Vega10_ENABLE_VARIBRIGHT) != 0);
3015
3016 power_state->validation.supportedPowerLevels = 0;
3017 power_state->uvd_clocks.VCLK = 0;
3018 power_state->uvd_clocks.DCLK = 0;
3019 power_state->temperatures.min = 0;
3020 power_state->temperatures.max = 0;
3021
3022 performance_level = &(vega10_power_state->performance_levels
3023 [vega10_power_state->performance_level_count++]);
3024
3025 PP_ASSERT_WITH_CODE(
3026 (vega10_power_state->performance_level_count <
3027 NUM_GFXCLK_DPM_LEVELS),
3028 "Performance levels exceeds SMC limit!",
3029 return -1);
3030
3031 PP_ASSERT_WITH_CODE(
3032 (vega10_power_state->performance_level_count <=
3033 hwmgr->platform_descriptor.
3034 hardwareActivityPerformanceLevels),
3035 "Performance levels exceeds Driver limit!",
3036 return -1);
3037
3038 /* Performance levels are arranged from low to high. */
3039 performance_level->soc_clock = socclk_dep_table->entries
3040 [state_entry->ucSocClockIndexLow].ulClk;
3041 performance_level->gfx_clock = gfxclk_dep_table->entries
3042 [state_entry->ucGfxClockIndexLow].ulClk;
3043 performance_level->mem_clock = mclk_dep_table->entries
3044 [state_entry->ucMemClockIndexLow].ulMemClk;
3045
3046 performance_level = &(vega10_power_state->performance_levels
3047 [vega10_power_state->performance_level_count++]);
f83a9991 3048 performance_level->soc_clock = socclk_dep_table->entries
ebc1c9c1
RZ
3049 [state_entry->ucSocClockIndexHigh].ulClk;
3050 if (gfxclk_dep_table->ucRevId == 0) {
3051 performance_level->gfx_clock = gfxclk_dep_table->entries
f83a9991 3052 [state_entry->ucGfxClockIndexHigh].ulClk;
ebc1c9c1
RZ
3053 } else if (gfxclk_dep_table->ucRevId == 1) {
3054 patom_record_V2 = (ATOM_Vega10_GFXCLK_Dependency_Record_V2 *)gfxclk_dep_table->entries;
3055 performance_level->gfx_clock = patom_record_V2[state_entry->ucGfxClockIndexHigh].ulClk;
3056 }
3057
f83a9991
EH
3058 performance_level->mem_clock = mclk_dep_table->entries
3059 [state_entry->ucMemClockIndexHigh].ulMemClk;
3060 return 0;
3061}
3062
3063static int vega10_get_pp_table_entry(struct pp_hwmgr *hwmgr,
3064 unsigned long entry_index, struct pp_power_state *state)
3065{
3066 int result;
3067 struct vega10_power_state *ps;
3068
3069 state->hardware.magic = PhwVega10_Magic;
3070
3071 ps = cast_phw_vega10_power_state(&state->hardware);
3072
3073 result = vega10_get_powerplay_table_entry(hwmgr, entry_index, state,
3074 vega10_get_pp_table_entry_callback_func);
3075
3076 /*
3077 * This is the earliest time we have all the dependency table
3078 * and the VBIOS boot state
3079 */
3080 /* set DC compatible flag if this state supports DC */
3081 if (!state->validation.disallowOnDC)
3082 ps->dc_compatible = true;
3083
3084 ps->uvd_clks.vclk = state->uvd_clocks.VCLK;
3085 ps->uvd_clks.dclk = state->uvd_clocks.DCLK;
3086
3087 return 0;
3088}
3089
3090static int vega10_patch_boot_state(struct pp_hwmgr *hwmgr,
3091 struct pp_hw_power_state *hw_ps)
3092{
3093 return 0;
3094}
3095
3096static int vega10_apply_state_adjust_rules(struct pp_hwmgr *hwmgr,
3097 struct pp_power_state *request_ps,
3098 const struct pp_power_state *current_ps)
3099{
600ae890 3100 struct amdgpu_device *adev = hwmgr->adev;
f83a9991
EH
3101 struct vega10_power_state *vega10_ps =
3102 cast_phw_vega10_power_state(&request_ps->hardware);
3103 uint32_t sclk;
3104 uint32_t mclk;
3105 struct PP_Clocks minimum_clocks = {0};
3106 bool disable_mclk_switching;
3107 bool disable_mclk_switching_for_frame_lock;
3108 bool disable_mclk_switching_for_vr;
3109 bool force_mclk_high;
f83a9991
EH
3110 const struct phm_clock_and_voltage_limits *max_limits;
3111 uint32_t i;
690dc626 3112 struct vega10_hwmgr *data = hwmgr->backend;
f83a9991
EH
3113 struct phm_ppt_v2_information *table_info =
3114 (struct phm_ppt_v2_information *)(hwmgr->pptable);
3115 int32_t count;
3116 uint32_t stable_pstate_sclk_dpm_percentage;
3117 uint32_t stable_pstate_sclk = 0, stable_pstate_mclk = 0;
3118 uint32_t latency;
3119
3120 data->battery_state = (PP_StateUILabel_Battery ==
3121 request_ps->classification.ui_label);
3122
3123 if (vega10_ps->performance_level_count != 2)
3124 pr_info("VI should always have 2 performance levels");
3125
600ae890 3126 max_limits = adev->pm.ac_power ?
f83a9991
EH
3127 &(hwmgr->dyn_state.max_clock_voltage_on_ac) :
3128 &(hwmgr->dyn_state.max_clock_voltage_on_dc);
3129
3130 /* Cap clock DPM tables at DC MAX if it is in DC. */
600ae890 3131 if (!adev->pm.ac_power) {
f83a9991
EH
3132 for (i = 0; i < vega10_ps->performance_level_count; i++) {
3133 if (vega10_ps->performance_levels[i].mem_clock >
3134 max_limits->mclk)
3135 vega10_ps->performance_levels[i].mem_clock =
3136 max_limits->mclk;
3137 if (vega10_ps->performance_levels[i].gfx_clock >
3138 max_limits->sclk)
3139 vega10_ps->performance_levels[i].gfx_clock =
3140 max_limits->sclk;
3141 }
3142 }
3143
f83a9991 3144 /* result = PHM_CheckVBlankTime(hwmgr, &vblankTooShort);*/
555fd70c
RZ
3145 minimum_clocks.engineClock = hwmgr->display_config->min_core_set_clock;
3146 minimum_clocks.memoryClock = hwmgr->display_config->min_mem_set_clock;
f83a9991 3147
dd5a6fe2 3148 if (PP_CAP(PHM_PlatformCaps_StablePState)) {
0d1da3c1
NI
3149 stable_pstate_sclk_dpm_percentage =
3150 data->registry_data.stable_pstate_sclk_dpm_percentage;
f83a9991
EH
3151 PP_ASSERT_WITH_CODE(
3152 data->registry_data.stable_pstate_sclk_dpm_percentage >= 1 &&
3153 data->registry_data.stable_pstate_sclk_dpm_percentage <= 100,
3154 "percent sclk value must range from 1% to 100%, setting default value",
3155 stable_pstate_sclk_dpm_percentage = 75);
3156
3157 max_limits = &(hwmgr->dyn_state.max_clock_voltage_on_ac);
3158 stable_pstate_sclk = (max_limits->sclk *
3159 stable_pstate_sclk_dpm_percentage) / 100;
3160
3161 for (count = table_info->vdd_dep_on_sclk->count - 1;
3162 count >= 0; count--) {
3163 if (stable_pstate_sclk >=
3164 table_info->vdd_dep_on_sclk->entries[count].clk) {
3165 stable_pstate_sclk =
3166 table_info->vdd_dep_on_sclk->entries[count].clk;
3167 break;
3168 }
3169 }
3170
3171 if (count < 0)
3172 stable_pstate_sclk = table_info->vdd_dep_on_sclk->entries[0].clk;
3173
3174 stable_pstate_mclk = max_limits->mclk;
3175
3176 minimum_clocks.engineClock = stable_pstate_sclk;
3177 minimum_clocks.memoryClock = stable_pstate_mclk;
3178 }
3179
6ce2d46c
AD
3180 disable_mclk_switching_for_frame_lock =
3181 PP_CAP(PHM_PlatformCaps_DisableMclkSwitchingForFrameLock);
3182 disable_mclk_switching_for_vr =
3183 PP_CAP(PHM_PlatformCaps_DisableMclkSwitchForVR);
dd5a6fe2 3184 force_mclk_high = PP_CAP(PHM_PlatformCaps_ForceMclkHigh);
f83a9991 3185
555fd70c 3186 if (hwmgr->display_config->num_display == 0)
d6bca7e7
AD
3187 disable_mclk_switching = false;
3188 else
555fd70c 3189 disable_mclk_switching = (hwmgr->display_config->num_display > 1) ||
d6bca7e7
AD
3190 disable_mclk_switching_for_frame_lock ||
3191 disable_mclk_switching_for_vr ||
3192 force_mclk_high;
f83a9991
EH
3193
3194 sclk = vega10_ps->performance_levels[0].gfx_clock;
3195 mclk = vega10_ps->performance_levels[0].mem_clock;
3196
3197 if (sclk < minimum_clocks.engineClock)
3198 sclk = (minimum_clocks.engineClock > max_limits->sclk) ?
3199 max_limits->sclk : minimum_clocks.engineClock;
3200
3201 if (mclk < minimum_clocks.memoryClock)
3202 mclk = (minimum_clocks.memoryClock > max_limits->mclk) ?
3203 max_limits->mclk : minimum_clocks.memoryClock;
3204
3205 vega10_ps->performance_levels[0].gfx_clock = sclk;
3206 vega10_ps->performance_levels[0].mem_clock = mclk;
3207
d0856f3a
RZ
3208 if (vega10_ps->performance_levels[1].gfx_clock <
3209 vega10_ps->performance_levels[0].gfx_clock)
3210 vega10_ps->performance_levels[0].gfx_clock =
3211 vega10_ps->performance_levels[1].gfx_clock;
f83a9991
EH
3212
3213 if (disable_mclk_switching) {
3214 /* Set Mclk the max of level 0 and level 1 */
3215 if (mclk < vega10_ps->performance_levels[1].mem_clock)
3216 mclk = vega10_ps->performance_levels[1].mem_clock;
3217
3218 /* Find the lowest MCLK frequency that is within
3219 * the tolerable latency defined in DAL
3220 */
7d8d968d 3221 latency = hwmgr->display_config->dce_tolerable_mclk_in_active_latency;
f83a9991
EH
3222 for (i = 0; i < data->mclk_latency_table.count; i++) {
3223 if ((data->mclk_latency_table.entries[i].latency <= latency) &&
3224 (data->mclk_latency_table.entries[i].frequency >=
3225 vega10_ps->performance_levels[0].mem_clock) &&
3226 (data->mclk_latency_table.entries[i].frequency <=
3227 vega10_ps->performance_levels[1].mem_clock))
3228 mclk = data->mclk_latency_table.entries[i].frequency;
3229 }
3230 vega10_ps->performance_levels[0].mem_clock = mclk;
3231 } else {
3232 if (vega10_ps->performance_levels[1].mem_clock <
3233 vega10_ps->performance_levels[0].mem_clock)
d0856f3a
RZ
3234 vega10_ps->performance_levels[0].mem_clock =
3235 vega10_ps->performance_levels[1].mem_clock;
f83a9991
EH
3236 }
3237
dd5a6fe2 3238 if (PP_CAP(PHM_PlatformCaps_StablePState)) {
f83a9991
EH
3239 for (i = 0; i < vega10_ps->performance_level_count; i++) {
3240 vega10_ps->performance_levels[i].gfx_clock = stable_pstate_sclk;
3241 vega10_ps->performance_levels[i].mem_clock = stable_pstate_mclk;
3242 }
3243 }
3244
3245 return 0;
3246}
3247
3248static int vega10_find_dpm_states_clocks_in_dpm_table(struct pp_hwmgr *hwmgr, const void *input)
3249{
690dc626 3250 struct vega10_hwmgr *data = hwmgr->backend;
47fdd897
RZ
3251 const struct phm_set_power_state_input *states =
3252 (const struct phm_set_power_state_input *)input;
3253 const struct vega10_power_state *vega10_ps =
3254 cast_const_phw_vega10_power_state(states->pnew_state);
3255 struct vega10_single_dpm_table *sclk_table = &(data->dpm_table.gfx_table);
3256 uint32_t sclk = vega10_ps->performance_levels
3257 [vega10_ps->performance_level_count - 1].gfx_clock;
3258 struct vega10_single_dpm_table *mclk_table = &(data->dpm_table.mem_table);
3259 uint32_t mclk = vega10_ps->performance_levels
3260 [vega10_ps->performance_level_count - 1].mem_clock;
3261 uint32_t i;
3262
3263 for (i = 0; i < sclk_table->count; i++) {
3264 if (sclk == sclk_table->dpm_levels[i].value)
3265 break;
3266 }
3267
3268 if (i >= sclk_table->count) {
3269 data->need_update_dpm_table |= DPMTABLE_OD_UPDATE_SCLK;
3270 sclk_table->dpm_levels[i-1].value = sclk;
3271 }
3272
3273 for (i = 0; i < mclk_table->count; i++) {
3274 if (mclk == mclk_table->dpm_levels[i].value)
3275 break;
3276 }
3277
3278 if (i >= mclk_table->count) {
3279 data->need_update_dpm_table |= DPMTABLE_OD_UPDATE_MCLK;
3280 mclk_table->dpm_levels[i-1].value = mclk;
3281 }
f83a9991 3282
c5a44849
RZ
3283 if (data->display_timing.num_existing_displays != hwmgr->display_config->num_display)
3284 data->need_update_dpm_table |= DPMTABLE_UPDATE_MCLK;
f83a9991 3285
f83a9991
EH
3286 return 0;
3287}
3288
3289static int vega10_populate_and_upload_sclk_mclk_dpm_levels(
3290 struct pp_hwmgr *hwmgr, const void *input)
3291{
3292 int result = 0;
690dc626 3293 struct vega10_hwmgr *data = hwmgr->backend;
a0c3bf0f
RZ
3294 struct vega10_dpm_table *dpm_table = &data->dpm_table;
3295 struct vega10_odn_dpm_table *odn_table = &data->odn_dpm_table;
3296 struct vega10_odn_clock_voltage_dependency_table *odn_clk_table = &odn_table->vdd_dep_on_sclk;
3297 int count;
f83a9991 3298
c5a44849
RZ
3299 if (!data->need_update_dpm_table)
3300 return 0;
f83a9991 3301
a0c3bf0f
RZ
3302 if (hwmgr->od_enabled && data->need_update_dpm_table & DPMTABLE_OD_UPDATE_SCLK) {
3303 for (count = 0; count < dpm_table->gfx_table.count; count++)
3304 dpm_table->gfx_table.dpm_levels[count].value = odn_clk_table->entries[count].clk;
3305 }
3306
3307 odn_clk_table = &odn_table->vdd_dep_on_mclk;
3308 if (hwmgr->od_enabled && data->need_update_dpm_table & DPMTABLE_OD_UPDATE_MCLK) {
3309 for (count = 0; count < dpm_table->mem_table.count; count++)
3310 dpm_table->mem_table.dpm_levels[count].value = odn_clk_table->entries[count].clk;
3311 }
3312
c5a44849
RZ
3313 if (data->need_update_dpm_table &
3314 (DPMTABLE_OD_UPDATE_SCLK + DPMTABLE_UPDATE_SCLK + DPMTABLE_UPDATE_SOCCLK)) {
3315 result = vega10_populate_all_graphic_levels(hwmgr);
3316 PP_ASSERT_WITH_CODE((0 == result),
3317 "Failed to populate SCLK during PopulateNewDPMClocksStates Function!",
3318 return result);
3319 }
f83a9991 3320
c5a44849
RZ
3321 if (data->need_update_dpm_table &
3322 (DPMTABLE_OD_UPDATE_MCLK + DPMTABLE_UPDATE_MCLK)) {
3323 result = vega10_populate_all_memory_levels(hwmgr);
3324 PP_ASSERT_WITH_CODE((0 == result),
3325 "Failed to populate MCLK during PopulateNewDPMClocksStates Function!",
3326 return result);
3327 }
f83a9991 3328
c5a44849 3329 vega10_populate_vddc_soc_levels(hwmgr);
f83a9991 3330
f83a9991
EH
3331 return result;
3332}
3333
3334static int vega10_trim_single_dpm_states(struct pp_hwmgr *hwmgr,
3335 struct vega10_single_dpm_table *dpm_table,
3336 uint32_t low_limit, uint32_t high_limit)
3337{
3338 uint32_t i;
3339
3340 for (i = 0; i < dpm_table->count; i++) {
3341 if ((dpm_table->dpm_levels[i].value < low_limit) ||
3342 (dpm_table->dpm_levels[i].value > high_limit))
3343 dpm_table->dpm_levels[i].enabled = false;
3344 else
3345 dpm_table->dpm_levels[i].enabled = true;
3346 }
3347 return 0;
3348}
3349
3350static int vega10_trim_single_dpm_states_with_mask(struct pp_hwmgr *hwmgr,
3351 struct vega10_single_dpm_table *dpm_table,
3352 uint32_t low_limit, uint32_t high_limit,
3353 uint32_t disable_dpm_mask)
3354{
3355 uint32_t i;
3356
3357 for (i = 0; i < dpm_table->count; i++) {
3358 if ((dpm_table->dpm_levels[i].value < low_limit) ||
3359 (dpm_table->dpm_levels[i].value > high_limit))
3360 dpm_table->dpm_levels[i].enabled = false;
3361 else if (!((1 << i) & disable_dpm_mask))
3362 dpm_table->dpm_levels[i].enabled = false;
3363 else
3364 dpm_table->dpm_levels[i].enabled = true;
3365 }
3366 return 0;
3367}
3368
3369static int vega10_trim_dpm_states(struct pp_hwmgr *hwmgr,
3370 const struct vega10_power_state *vega10_ps)
3371{
690dc626 3372 struct vega10_hwmgr *data = hwmgr->backend;
f83a9991
EH
3373 uint32_t high_limit_count;
3374
3375 PP_ASSERT_WITH_CODE((vega10_ps->performance_level_count >= 1),
3376 "power state did not have any performance level",
3377 return -1);
3378
3379 high_limit_count = (vega10_ps->performance_level_count == 1) ? 0 : 1;
3380
3381 vega10_trim_single_dpm_states(hwmgr,
3382 &(data->dpm_table.soc_table),
3383 vega10_ps->performance_levels[0].soc_clock,
3384 vega10_ps->performance_levels[high_limit_count].soc_clock);
3385
3386 vega10_trim_single_dpm_states_with_mask(hwmgr,
3387 &(data->dpm_table.gfx_table),
3388 vega10_ps->performance_levels[0].gfx_clock,
3389 vega10_ps->performance_levels[high_limit_count].gfx_clock,
3390 data->disable_dpm_mask);
3391
3392 vega10_trim_single_dpm_states(hwmgr,
3393 &(data->dpm_table.mem_table),
3394 vega10_ps->performance_levels[0].mem_clock,
3395 vega10_ps->performance_levels[high_limit_count].mem_clock);
3396
3397 return 0;
3398}
3399
3400static uint32_t vega10_find_lowest_dpm_level(
3401 struct vega10_single_dpm_table *table)
3402{
3403 uint32_t i;
3404
3405 for (i = 0; i < table->count; i++) {
3406 if (table->dpm_levels[i].enabled)
3407 break;
3408 }
3409
3410 return i;
3411}
3412
3413static uint32_t vega10_find_highest_dpm_level(
3414 struct vega10_single_dpm_table *table)
3415{
3416 uint32_t i = 0;
3417
3418 if (table->count <= MAX_REGULAR_DPM_NUMBER) {
3419 for (i = table->count; i > 0; i--) {
3420 if (table->dpm_levels[i - 1].enabled)
3421 return i - 1;
3422 }
3423 } else {
3424 pr_info("DPM Table Has Too Many Entries!");
3425 return MAX_REGULAR_DPM_NUMBER - 1;
3426 }
3427
3428 return i;
3429}
3430
3431static void vega10_apply_dal_minimum_voltage_request(
3432 struct pp_hwmgr *hwmgr)
3433{
3434 return;
3435}
3436
3d4d4fd0
RZ
3437static int vega10_get_soc_index_for_max_uclk(struct pp_hwmgr *hwmgr)
3438{
3439 struct phm_ppt_v1_clock_voltage_dependency_table *vdd_dep_table_on_mclk;
3440 struct phm_ppt_v2_information *table_info =
3441 (struct phm_ppt_v2_information *)(hwmgr->pptable);
3442
3443 vdd_dep_table_on_mclk = table_info->vdd_dep_on_mclk;
3444
3445 return vdd_dep_table_on_mclk->entries[NUM_UCLK_DPM_LEVELS - 1].vddInd + 1;
3446}
3447
f83a9991
EH
3448static int vega10_upload_dpm_bootup_level(struct pp_hwmgr *hwmgr)
3449{
690dc626 3450 struct vega10_hwmgr *data = hwmgr->backend;
3d4d4fd0 3451 uint32_t socclk_idx;
f83a9991
EH
3452
3453 vega10_apply_dal_minimum_voltage_request(hwmgr);
3454
3455 if (!data->registry_data.sclk_dpm_key_disabled) {
3456 if (data->smc_state_table.gfx_boot_level !=
3457 data->dpm_table.gfx_table.dpm_state.soft_min_level) {
d246cd53 3458 smum_send_msg_to_smc_with_parameter(hwmgr,
f83a9991 3459 PPSMC_MSG_SetSoftMinGfxclkByIndex,
d246cd53 3460 data->smc_state_table.gfx_boot_level);
f83a9991
EH
3461 data->dpm_table.gfx_table.dpm_state.soft_min_level =
3462 data->smc_state_table.gfx_boot_level;
3463 }
3464 }
3465
3466 if (!data->registry_data.mclk_dpm_key_disabled) {
3467 if (data->smc_state_table.mem_boot_level !=
3468 data->dpm_table.mem_table.dpm_state.soft_min_level) {
3d4d4fd0
RZ
3469 if (data->smc_state_table.mem_boot_level == NUM_UCLK_DPM_LEVELS - 1) {
3470 socclk_idx = vega10_get_soc_index_for_max_uclk(hwmgr);
d246cd53 3471 smum_send_msg_to_smc_with_parameter(hwmgr,
3d4d4fd0 3472 PPSMC_MSG_SetSoftMinSocclkByIndex,
d246cd53 3473 socclk_idx);
3d4d4fd0 3474 } else {
d246cd53 3475 smum_send_msg_to_smc_with_parameter(hwmgr,
3d4d4fd0 3476 PPSMC_MSG_SetSoftMinUclkByIndex,
d246cd53 3477 data->smc_state_table.mem_boot_level);
3d4d4fd0 3478 }
f83a9991
EH
3479 data->dpm_table.mem_table.dpm_state.soft_min_level =
3480 data->smc_state_table.mem_boot_level;
3481 }
3482 }
3483
3484 return 0;
3485}
3486
3487static int vega10_upload_dpm_max_level(struct pp_hwmgr *hwmgr)
3488{
690dc626 3489 struct vega10_hwmgr *data = hwmgr->backend;
f83a9991
EH
3490
3491 vega10_apply_dal_minimum_voltage_request(hwmgr);
3492
3493 if (!data->registry_data.sclk_dpm_key_disabled) {
3494 if (data->smc_state_table.gfx_max_level !=
d246cd53
RZ
3495 data->dpm_table.gfx_table.dpm_state.soft_max_level) {
3496 smum_send_msg_to_smc_with_parameter(hwmgr,
f83a9991 3497 PPSMC_MSG_SetSoftMaxGfxclkByIndex,
d246cd53 3498 data->smc_state_table.gfx_max_level);
f83a9991
EH
3499 data->dpm_table.gfx_table.dpm_state.soft_max_level =
3500 data->smc_state_table.gfx_max_level;
3501 }
3502 }
3503
3504 if (!data->registry_data.mclk_dpm_key_disabled) {
3505 if (data->smc_state_table.mem_max_level !=
d246cd53
RZ
3506 data->dpm_table.mem_table.dpm_state.soft_max_level) {
3507 smum_send_msg_to_smc_with_parameter(hwmgr,
3508 PPSMC_MSG_SetSoftMaxUclkByIndex,
3509 data->smc_state_table.mem_max_level);
f83a9991
EH
3510 data->dpm_table.mem_table.dpm_state.soft_max_level =
3511 data->smc_state_table.mem_max_level;
3512 }
3513 }
3514
3515 return 0;
3516}
3517
3518static int vega10_generate_dpm_level_enable_mask(
3519 struct pp_hwmgr *hwmgr, const void *input)
3520{
690dc626 3521 struct vega10_hwmgr *data = hwmgr->backend;
f83a9991
EH
3522 const struct phm_set_power_state_input *states =
3523 (const struct phm_set_power_state_input *)input;
3524 const struct vega10_power_state *vega10_ps =
3525 cast_const_phw_vega10_power_state(states->pnew_state);
3526 int i;
3527
3528 PP_ASSERT_WITH_CODE(!vega10_trim_dpm_states(hwmgr, vega10_ps),
3529 "Attempt to Trim DPM States Failed!",
3530 return -1);
3531
3532 data->smc_state_table.gfx_boot_level =
3533 vega10_find_lowest_dpm_level(&(data->dpm_table.gfx_table));
3534 data->smc_state_table.gfx_max_level =
3535 vega10_find_highest_dpm_level(&(data->dpm_table.gfx_table));
3536 data->smc_state_table.mem_boot_level =
3537 vega10_find_lowest_dpm_level(&(data->dpm_table.mem_table));
3538 data->smc_state_table.mem_max_level =
3539 vega10_find_highest_dpm_level(&(data->dpm_table.mem_table));
3540
3541 PP_ASSERT_WITH_CODE(!vega10_upload_dpm_bootup_level(hwmgr),
3542 "Attempt to upload DPM Bootup Levels Failed!",
3543 return -1);
3544 PP_ASSERT_WITH_CODE(!vega10_upload_dpm_max_level(hwmgr),
3545 "Attempt to upload DPM Max Levels Failed!",
3546 return -1);
3547 for(i = data->smc_state_table.gfx_boot_level; i < data->smc_state_table.gfx_max_level; i++)
3548 data->dpm_table.gfx_table.dpm_levels[i].enabled = true;
3549
3550
3551 for(i = data->smc_state_table.mem_boot_level; i < data->smc_state_table.mem_max_level; i++)
3552 data->dpm_table.mem_table.dpm_levels[i].enabled = true;
3553
3554 return 0;
3555}
3556
3557int vega10_enable_disable_vce_dpm(struct pp_hwmgr *hwmgr, bool enable)
3558{
690dc626 3559 struct vega10_hwmgr *data = hwmgr->backend;
f83a9991
EH
3560
3561 if (data->smu_features[GNLD_DPM_VCE].supported) {
d3f8c0ab 3562 PP_ASSERT_WITH_CODE(!vega10_enable_smc_features(hwmgr,
f83a9991
EH
3563 enable,
3564 data->smu_features[GNLD_DPM_VCE].smu_feature_bitmap),
3565 "Attempt to Enable/Disable DPM VCE Failed!",
3566 return -1);
3567 data->smu_features[GNLD_DPM_VCE].enabled = enable;
3568 }
3569
3570 return 0;
3571}
3572
3573static int vega10_update_sclk_threshold(struct pp_hwmgr *hwmgr)
3574{
690dc626 3575 struct vega10_hwmgr *data = hwmgr->backend;
f83a9991
EH
3576 uint32_t low_sclk_interrupt_threshold = 0;
3577
dd5a6fe2 3578 if (PP_CAP(PHM_PlatformCaps_SclkThrottleLowNotification) &&
29411f05 3579 (data->low_sclk_interrupt_threshold != 0)) {
f83a9991
EH
3580 low_sclk_interrupt_threshold =
3581 data->low_sclk_interrupt_threshold;
3582
3583 data->smc_state_table.pp_table.LowGfxclkInterruptThreshold =
3584 cpu_to_le32(low_sclk_interrupt_threshold);
3585
3586 /* This message will also enable SmcToHost Interrupt */
d246cd53 3587 smum_send_msg_to_smc_with_parameter(hwmgr,
f83a9991
EH
3588 PPSMC_MSG_SetLowGfxclkInterruptThreshold,
3589 (uint32_t)low_sclk_interrupt_threshold);
3590 }
3591
d246cd53 3592 return 0;
f83a9991
EH
3593}
3594
3595static int vega10_set_power_state_tasks(struct pp_hwmgr *hwmgr,
3596 const void *input)
3597{
3598 int tmp_result, result = 0;
690dc626 3599 struct vega10_hwmgr *data = hwmgr->backend;
f83a9991
EH
3600 PPTable_t *pp_table = &(data->smc_state_table.pp_table);
3601
3602 tmp_result = vega10_find_dpm_states_clocks_in_dpm_table(hwmgr, input);
3603 PP_ASSERT_WITH_CODE(!tmp_result,
3604 "Failed to find DPM states clocks in DPM table!",
3605 result = tmp_result);
3606
3607 tmp_result = vega10_populate_and_upload_sclk_mclk_dpm_levels(hwmgr, input);
3608 PP_ASSERT_WITH_CODE(!tmp_result,
3609 "Failed to populate and upload SCLK MCLK DPM levels!",
3610 result = tmp_result);
3611
3612 tmp_result = vega10_generate_dpm_level_enable_mask(hwmgr, input);
3613 PP_ASSERT_WITH_CODE(!tmp_result,
3614 "Failed to generate DPM level enabled mask!",
3615 result = tmp_result);
3616
3617 tmp_result = vega10_update_sclk_threshold(hwmgr);
3618 PP_ASSERT_WITH_CODE(!tmp_result,
3619 "Failed to update SCLK threshold!",
3620 result = tmp_result);
3621
3f9ca14a 3622 result = smum_smc_table_manager(hwmgr, (uint8_t *)pp_table, PPTABLE, false);
f83a9991
EH
3623 PP_ASSERT_WITH_CODE(!result,
3624 "Failed to upload PPtable!", return result);
3625
c5a44849
RZ
3626 vega10_update_avfs(hwmgr);
3627
3628 data->need_update_dpm_table &= DPMTABLE_OD_UPDATE_VDDC;
f83a9991
EH
3629
3630 return 0;
3631}
3632
f93f0c3a 3633static uint32_t vega10_dpm_get_sclk(struct pp_hwmgr *hwmgr, bool low)
f83a9991
EH
3634{
3635 struct pp_power_state *ps;
3636 struct vega10_power_state *vega10_ps;
3637
3638 if (hwmgr == NULL)
3639 return -EINVAL;
3640
3641 ps = hwmgr->request_ps;
3642
3643 if (ps == NULL)
3644 return -EINVAL;
3645
3646 vega10_ps = cast_phw_vega10_power_state(&ps->hardware);
3647
3648 if (low)
3649 return vega10_ps->performance_levels[0].gfx_clock;
3650 else
3651 return vega10_ps->performance_levels
3652 [vega10_ps->performance_level_count - 1].gfx_clock;
3653}
3654
f93f0c3a 3655static uint32_t vega10_dpm_get_mclk(struct pp_hwmgr *hwmgr, bool low)
f83a9991
EH
3656{
3657 struct pp_power_state *ps;
3658 struct vega10_power_state *vega10_ps;
3659
3660 if (hwmgr == NULL)
3661 return -EINVAL;
3662
3663 ps = hwmgr->request_ps;
3664
3665 if (ps == NULL)
3666 return -EINVAL;
3667
3668 vega10_ps = cast_phw_vega10_power_state(&ps->hardware);
3669
3670 if (low)
3671 return vega10_ps->performance_levels[0].mem_clock;
3672 else
3673 return vega10_ps->performance_levels
3674 [vega10_ps->performance_level_count-1].mem_clock;
3675}
3676
17d176a5 3677static int vega10_get_gpu_power(struct pp_hwmgr *hwmgr,
5b79d048 3678 uint32_t *query)
17d176a5 3679{
6b5defd6
EH
3680 uint32_t value;
3681
5b79d048
RZ
3682 if (!query)
3683 return -EINVAL;
3684
d246cd53 3685 smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetCurrPkgPwr);
3f9ca14a 3686 value = smum_get_argument(hwmgr);
fda519fb 3687
5b79d048
RZ
3688 /* SMC returning actual watts, keep consistent with legacy asics, low 8 bit as 8 fractional bits */
3689 *query = value << 8;
6b5defd6
EH
3690
3691 return 0;
17d176a5
EH
3692}
3693
f83a9991
EH
3694static int vega10_read_sensor(struct pp_hwmgr *hwmgr, int idx,
3695 void *value, int *size)
3696{
b8a55591 3697 struct amdgpu_device *adev = hwmgr->adev;
c11d8afe 3698 uint32_t sclk_mhz, mclk_idx, activity_percent = 0;
690dc626 3699 struct vega10_hwmgr *data = hwmgr->backend;
f83a9991
EH
3700 struct vega10_dpm_table *dpm_table = &data->dpm_table;
3701 int ret = 0;
b8a55591 3702 uint32_t val_vid;
f83a9991
EH
3703
3704 switch (idx) {
3705 case AMDGPU_PP_SENSOR_GFX_SCLK:
c11d8afe
EQ
3706 smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetAverageGfxclkActualFrequency);
3707 sclk_mhz = smum_get_argument(hwmgr);
3708 *((uint32_t *)value) = sclk_mhz * 100;
f83a9991
EH
3709 break;
3710 case AMDGPU_PP_SENSOR_GFX_MCLK:
952e5daa 3711 smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetCurrentUclkIndex);
3f9ca14a 3712 mclk_idx = smum_get_argument(hwmgr);
952e5daa 3713 if (mclk_idx < dpm_table->mem_table.count) {
f83a9991
EH
3714 *((uint32_t *)value) = dpm_table->mem_table.dpm_levels[mclk_idx].value;
3715 *size = 4;
952e5daa
RZ
3716 } else {
3717 ret = -EINVAL;
f83a9991
EH
3718 }
3719 break;
3720 case AMDGPU_PP_SENSOR_GPU_LOAD:
952e5daa 3721 smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_GetAverageGfxActivity, 0);
3f9ca14a 3722 activity_percent = smum_get_argument(hwmgr);
952e5daa
RZ
3723 *((uint32_t *)value) = activity_percent > 100 ? 100 : activity_percent;
3724 *size = 4;
f83a9991
EH
3725 break;
3726 case AMDGPU_PP_SENSOR_GPU_TEMP:
3727 *((uint32_t *)value) = vega10_thermal_get_temperature(hwmgr);
3728 *size = 4;
3729 break;
3730 case AMDGPU_PP_SENSOR_UVD_POWER:
3731 *((uint32_t *)value) = data->uvd_power_gated ? 0 : 1;
3732 *size = 4;
3733 break;
3734 case AMDGPU_PP_SENSOR_VCE_POWER:
3735 *((uint32_t *)value) = data->vce_power_gated ? 0 : 1;
3736 *size = 4;
3737 break;
17d176a5 3738 case AMDGPU_PP_SENSOR_GPU_POWER:
5b79d048 3739 ret = vega10_get_gpu_power(hwmgr, (uint32_t *)value);
17d176a5 3740 break;
59655cb6 3741 case AMDGPU_PP_SENSOR_VDDGFX:
b8a55591 3742 val_vid = (RREG32_SOC15(SMUIO, 0, mmSMUSVI0_PLANE0_CURRENTVID) &
59655cb6
RZ
3743 SMUSVI0_PLANE0_CURRENTVID__CURRENT_SVI0_PLANE0_VID_MASK) >>
3744 SMUSVI0_PLANE0_CURRENTVID__CURRENT_SVI0_PLANE0_VID__SHIFT;
3745 *((uint32_t *)value) = (uint32_t)convert_to_vddc((uint8_t)val_vid);
3746 return 0;
1f6c52ed
AD
3747 case AMDGPU_PP_SENSOR_ENABLED_SMC_FEATURES_MASK:
3748 ret = vega10_get_enabled_smc_features(hwmgr, (uint64_t *)value);
3749 if (!ret)
3750 *size = 8;
3751 break;
f83a9991
EH
3752 default:
3753 ret = -EINVAL;
3754 break;
3755 }
6390258a 3756
f83a9991
EH
3757 return ret;
3758}
3759
d246cd53 3760static void vega10_notify_smc_display_change(struct pp_hwmgr *hwmgr,
f83a9991
EH
3761 bool has_disp)
3762{
d246cd53 3763 smum_send_msg_to_smc_with_parameter(hwmgr,
f83a9991 3764 PPSMC_MSG_SetUclkFastSwitch,
f132d561 3765 has_disp ? 1 : 0);
f83a9991
EH
3766}
3767
3768int vega10_display_clock_voltage_request(struct pp_hwmgr *hwmgr,
3769 struct pp_display_clock_request *clock_req)
3770{
3771 int result = 0;
3772 enum amd_pp_clock_type clk_type = clock_req->clock_type;
75f0e32b 3773 uint32_t clk_freq = clock_req->clock_freq_in_khz / 1000;
f83a9991
EH
3774 DSPCLK_e clk_select = 0;
3775 uint32_t clk_request = 0;
3776
3777 switch (clk_type) {
3778 case amd_pp_dcef_clock:
3779 clk_select = DSPCLK_DCEFCLK;
3780 break;
3781 case amd_pp_disp_clock:
3782 clk_select = DSPCLK_DISPCLK;
3783 break;
3784 case amd_pp_pixel_clock:
3785 clk_select = DSPCLK_PIXCLK;
3786 break;
3787 case amd_pp_phy_clock:
3788 clk_select = DSPCLK_PHYCLK;
3789 break;
3790 default:
3791 pr_info("[DisplayClockVoltageRequest]Invalid Clock Type!");
3792 result = -1;
3793 break;
3794 }
3795
3796 if (!result) {
3797 clk_request = (clk_freq << 16) | clk_select;
d246cd53 3798 smum_send_msg_to_smc_with_parameter(hwmgr,
f83a9991
EH
3799 PPSMC_MSG_RequestDisplayClockByFreq,
3800 clk_request);
3801 }
3802
3803 return result;
3804}
3805
75f0e32b
RZ
3806static uint8_t vega10_get_uclk_index(struct pp_hwmgr *hwmgr,
3807 struct phm_ppt_v1_clock_voltage_dependency_table *mclk_table,
3808 uint32_t frequency)
3809{
3810 uint8_t count;
3811 uint8_t i;
3812
3813 if (mclk_table == NULL || mclk_table->count == 0)
3814 return 0;
3815
3816 count = (uint8_t)(mclk_table->count);
3817
3818 for(i = 0; i < count; i++) {
3819 if(mclk_table->entries[i].clk >= frequency)
3820 return i;
3821 }
3822
3823 return i-1;
3824}
3825
f83a9991
EH
3826static int vega10_notify_smc_display_config_after_ps_adjustment(
3827 struct pp_hwmgr *hwmgr)
3828{
690dc626 3829 struct vega10_hwmgr *data = hwmgr->backend;
f83a9991
EH
3830 struct vega10_single_dpm_table *dpm_table =
3831 &data->dpm_table.dcef_table;
75f0e32b
RZ
3832 struct phm_ppt_v2_information *table_info =
3833 (struct phm_ppt_v2_information *)hwmgr->pptable;
3834 struct phm_ppt_v1_clock_voltage_dependency_table *mclk_table = table_info->vdd_dep_on_mclk;
3835 uint32_t idx;
f83a9991
EH
3836 struct PP_Clocks min_clocks = {0};
3837 uint32_t i;
3838 struct pp_display_clock_request clock_req;
3839
59a8348f 3840 if ((hwmgr->display_config->num_display > 1) &&
92859e0d
EQ
3841 !hwmgr->display_config->multi_monitor_in_sync &&
3842 !hwmgr->display_config->nb_pstate_switch_disable)
f83a9991
EH
3843 vega10_notify_smc_display_change(hwmgr, false);
3844 else
3845 vega10_notify_smc_display_change(hwmgr, true);
3846
555fd70c
RZ
3847 min_clocks.dcefClock = hwmgr->display_config->min_dcef_set_clk;
3848 min_clocks.dcefClockInSR = hwmgr->display_config->min_dcef_deep_sleep_set_clk;
3849 min_clocks.memoryClock = hwmgr->display_config->min_mem_set_clock;
f83a9991
EH
3850
3851 for (i = 0; i < dpm_table->count; i++) {
3852 if (dpm_table->dpm_levels[i].value == min_clocks.dcefClock)
3853 break;
3854 }
3855
3856 if (i < dpm_table->count) {
3857 clock_req.clock_type = amd_pp_dcef_clock;
ed092664 3858 clock_req.clock_freq_in_khz = dpm_table->dpm_levels[i].value * 10;
f83a9991 3859 if (!vega10_display_clock_voltage_request(hwmgr, &clock_req)) {
d246cd53 3860 smum_send_msg_to_smc_with_parameter(
d3f8c0ab 3861 hwmgr, PPSMC_MSG_SetMinDeepSleepDcefclk,
d246cd53 3862 min_clocks.dcefClockInSR / 100);
75f0e32b 3863 } else {
f83a9991 3864 pr_info("Attempt to set Hard Min for DCEFCLK Failed!");
75f0e32b
RZ
3865 }
3866 } else {
5bbc5c64 3867 pr_debug("Cannot find requested DCEFCLK!");
75f0e32b
RZ
3868 }
3869
3870 if (min_clocks.memoryClock != 0) {
3871 idx = vega10_get_uclk_index(hwmgr, mclk_table, min_clocks.memoryClock);
d3f8c0ab 3872 smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_SetSoftMinUclkByIndex, idx);
75f0e32b
RZ
3873 data->dpm_table.mem_table.dpm_state.soft_min_level= idx;
3874 }
f83a9991
EH
3875
3876 return 0;
3877}
3878
3879static int vega10_force_dpm_highest(struct pp_hwmgr *hwmgr)
3880{
690dc626 3881 struct vega10_hwmgr *data = hwmgr->backend;
f83a9991
EH
3882
3883 data->smc_state_table.gfx_boot_level =
3884 data->smc_state_table.gfx_max_level =
3885 vega10_find_highest_dpm_level(&(data->dpm_table.gfx_table));
3886 data->smc_state_table.mem_boot_level =
3887 data->smc_state_table.mem_max_level =
3888 vega10_find_highest_dpm_level(&(data->dpm_table.mem_table));
3889
3890 PP_ASSERT_WITH_CODE(!vega10_upload_dpm_bootup_level(hwmgr),
3891 "Failed to upload boot level to highest!",
3892 return -1);
3893
3894 PP_ASSERT_WITH_CODE(!vega10_upload_dpm_max_level(hwmgr),
3895 "Failed to upload dpm max level to highest!",
3896 return -1);
3897
3898 return 0;
3899}
3900
3901static int vega10_force_dpm_lowest(struct pp_hwmgr *hwmgr)
3902{
690dc626 3903 struct vega10_hwmgr *data = hwmgr->backend;
f83a9991
EH
3904
3905 data->smc_state_table.gfx_boot_level =
3906 data->smc_state_table.gfx_max_level =
3907 vega10_find_lowest_dpm_level(&(data->dpm_table.gfx_table));
3908 data->smc_state_table.mem_boot_level =
3909 data->smc_state_table.mem_max_level =
3910 vega10_find_lowest_dpm_level(&(data->dpm_table.mem_table));
3911
3912 PP_ASSERT_WITH_CODE(!vega10_upload_dpm_bootup_level(hwmgr),
3913 "Failed to upload boot level to highest!",
3914 return -1);
3915
3916 PP_ASSERT_WITH_CODE(!vega10_upload_dpm_max_level(hwmgr),
3917 "Failed to upload dpm max level to highest!",
3918 return -1);
3919
3920 return 0;
3921
3922}
3923
3924static int vega10_unforce_dpm_levels(struct pp_hwmgr *hwmgr)
3925{
690dc626 3926 struct vega10_hwmgr *data = hwmgr->backend;
f83a9991
EH
3927
3928 data->smc_state_table.gfx_boot_level =
3929 vega10_find_lowest_dpm_level(&(data->dpm_table.gfx_table));
3930 data->smc_state_table.gfx_max_level =
3931 vega10_find_highest_dpm_level(&(data->dpm_table.gfx_table));
3932 data->smc_state_table.mem_boot_level =
3933 vega10_find_lowest_dpm_level(&(data->dpm_table.mem_table));
3934 data->smc_state_table.mem_max_level =
3935 vega10_find_highest_dpm_level(&(data->dpm_table.mem_table));
3936
3937 PP_ASSERT_WITH_CODE(!vega10_upload_dpm_bootup_level(hwmgr),
3938 "Failed to upload DPM Bootup Levels!",
3939 return -1);
3940
3941 PP_ASSERT_WITH_CODE(!vega10_upload_dpm_max_level(hwmgr),
3942 "Failed to upload DPM Max Levels!",
3943 return -1);
3944 return 0;
3945}
3946
53a4b90d
RZ
3947static int vega10_get_profiling_clk_mask(struct pp_hwmgr *hwmgr, enum amd_dpm_forced_level level,
3948 uint32_t *sclk_mask, uint32_t *mclk_mask, uint32_t *soc_mask)
f83a9991 3949{
53a4b90d
RZ
3950 struct phm_ppt_v2_information *table_info =
3951 (struct phm_ppt_v2_information *)(hwmgr->pptable);
f83a9991 3952
53a4b90d
RZ
3953 if (table_info->vdd_dep_on_sclk->count > VEGA10_UMD_PSTATE_GFXCLK_LEVEL &&
3954 table_info->vdd_dep_on_socclk->count > VEGA10_UMD_PSTATE_SOCCLK_LEVEL &&
3955 table_info->vdd_dep_on_mclk->count > VEGA10_UMD_PSTATE_MCLK_LEVEL) {
3956 *sclk_mask = VEGA10_UMD_PSTATE_GFXCLK_LEVEL;
3957 *soc_mask = VEGA10_UMD_PSTATE_SOCCLK_LEVEL;
3958 *mclk_mask = VEGA10_UMD_PSTATE_MCLK_LEVEL;
dd70949d
RZ
3959 hwmgr->pstate_sclk = table_info->vdd_dep_on_sclk->entries[VEGA10_UMD_PSTATE_GFXCLK_LEVEL].clk;
3960 hwmgr->pstate_mclk = table_info->vdd_dep_on_mclk->entries[VEGA10_UMD_PSTATE_MCLK_LEVEL].clk;
f83a9991
EH
3961 }
3962
53a4b90d
RZ
3963 if (level == AMD_DPM_FORCED_LEVEL_PROFILE_MIN_SCLK) {
3964 *sclk_mask = 0;
3965 } else if (level == AMD_DPM_FORCED_LEVEL_PROFILE_MIN_MCLK) {
3966 *mclk_mask = 0;
3967 } else if (level == AMD_DPM_FORCED_LEVEL_PROFILE_PEAK) {
3968 *sclk_mask = table_info->vdd_dep_on_sclk->count - 1;
3969 *soc_mask = table_info->vdd_dep_on_socclk->count - 1;
3970 *mclk_mask = table_info->vdd_dep_on_mclk->count - 1;
3971 }
3972 return 0;
f83a9991
EH
3973}
3974
f93f0c3a 3975static void vega10_set_fan_control_mode(struct pp_hwmgr *hwmgr, uint32_t mode)
f83a9991 3976{
7522ffc4
RZ
3977 switch (mode) {
3978 case AMD_FAN_CTRL_NONE:
f93f0c3a 3979 vega10_fan_ctrl_set_fan_speed_percent(hwmgr, 100);
7522ffc4
RZ
3980 break;
3981 case AMD_FAN_CTRL_MANUAL:
dd5a6fe2 3982 if (PP_CAP(PHM_PlatformCaps_MicrocodeFanControl))
f93f0c3a 3983 vega10_fan_ctrl_stop_smc_fan_control(hwmgr);
7522ffc4
RZ
3984 break;
3985 case AMD_FAN_CTRL_AUTO:
710931c2 3986 if (PP_CAP(PHM_PlatformCaps_MicrocodeFanControl))
f93f0c3a 3987 vega10_fan_ctrl_start_smc_fan_control(hwmgr);
7522ffc4
RZ
3988 break;
3989 default:
3990 break;
3991 }
f83a9991
EH
3992}
3993
29ae1118
RZ
3994static int vega10_force_clock_level(struct pp_hwmgr *hwmgr,
3995 enum pp_clock_type type, uint32_t mask)
3996{
3997 struct vega10_hwmgr *data = hwmgr->backend;
3998
3999 switch (type) {
4000 case PP_SCLK:
4001 data->smc_state_table.gfx_boot_level = mask ? (ffs(mask) - 1) : 0;
4002 data->smc_state_table.gfx_max_level = mask ? (fls(mask) - 1) : 0;
4003
4004 PP_ASSERT_WITH_CODE(!vega10_upload_dpm_bootup_level(hwmgr),
4005 "Failed to upload boot level to lowest!",
4006 return -EINVAL);
4007
4008 PP_ASSERT_WITH_CODE(!vega10_upload_dpm_max_level(hwmgr),
4009 "Failed to upload dpm max level to highest!",
4010 return -EINVAL);
4011 break;
4012
4013 case PP_MCLK:
4014 data->smc_state_table.mem_boot_level = mask ? (ffs(mask) - 1) : 0;
4015 data->smc_state_table.mem_max_level = mask ? (fls(mask) - 1) : 0;
4016
4017 PP_ASSERT_WITH_CODE(!vega10_upload_dpm_bootup_level(hwmgr),
4018 "Failed to upload boot level to lowest!",
4019 return -EINVAL);
4020
4021 PP_ASSERT_WITH_CODE(!vega10_upload_dpm_max_level(hwmgr),
4022 "Failed to upload dpm max level to highest!",
4023 return -EINVAL);
4024
4025 break;
4026
4027 case PP_PCIE:
4028 default:
4029 break;
4030 }
4031
4032 return 0;
4033}
4034
53a4b90d
RZ
4035static int vega10_dpm_force_dpm_level(struct pp_hwmgr *hwmgr,
4036 enum amd_dpm_forced_level level)
4037{
4038 int ret = 0;
4039 uint32_t sclk_mask = 0;
4040 uint32_t mclk_mask = 0;
4041 uint32_t soc_mask = 0;
53a4b90d 4042
dd70949d
RZ
4043 if (hwmgr->pstate_sclk == 0)
4044 vega10_get_profiling_clk_mask(hwmgr, level, &sclk_mask, &mclk_mask, &soc_mask);
4045
53a4b90d
RZ
4046 switch (level) {
4047 case AMD_DPM_FORCED_LEVEL_HIGH:
4048 ret = vega10_force_dpm_highest(hwmgr);
53a4b90d
RZ
4049 break;
4050 case AMD_DPM_FORCED_LEVEL_LOW:
4051 ret = vega10_force_dpm_lowest(hwmgr);
53a4b90d
RZ
4052 break;
4053 case AMD_DPM_FORCED_LEVEL_AUTO:
4054 ret = vega10_unforce_dpm_levels(hwmgr);
53a4b90d
RZ
4055 break;
4056 case AMD_DPM_FORCED_LEVEL_PROFILE_STANDARD:
4057 case AMD_DPM_FORCED_LEVEL_PROFILE_MIN_SCLK:
4058 case AMD_DPM_FORCED_LEVEL_PROFILE_MIN_MCLK:
4059 case AMD_DPM_FORCED_LEVEL_PROFILE_PEAK:
4060 ret = vega10_get_profiling_clk_mask(hwmgr, level, &sclk_mask, &mclk_mask, &soc_mask);
4061 if (ret)
4062 return ret;
53a4b90d
RZ
4063 vega10_force_clock_level(hwmgr, PP_SCLK, 1<<sclk_mask);
4064 vega10_force_clock_level(hwmgr, PP_MCLK, 1<<mclk_mask);
4065 break;
4066 case AMD_DPM_FORCED_LEVEL_MANUAL:
53a4b90d
RZ
4067 case AMD_DPM_FORCED_LEVEL_PROFILE_EXIT:
4068 default:
4069 break;
4070 }
4071
9947f704
RZ
4072 if (!ret) {
4073 if (level == AMD_DPM_FORCED_LEVEL_PROFILE_PEAK && hwmgr->dpm_level != AMD_DPM_FORCED_LEVEL_PROFILE_PEAK)
4074 vega10_set_fan_control_mode(hwmgr, AMD_FAN_CTRL_NONE);
4075 else if (level != AMD_DPM_FORCED_LEVEL_PROFILE_PEAK && hwmgr->dpm_level == AMD_DPM_FORCED_LEVEL_PROFILE_PEAK)
4076 vega10_set_fan_control_mode(hwmgr, AMD_FAN_CTRL_AUTO);
4077 }
9ac870c7 4078
9947f704 4079 return ret;
53a4b90d
RZ
4080}
4081
f93f0c3a 4082static uint32_t vega10_get_fan_control_mode(struct pp_hwmgr *hwmgr)
f83a9991 4083{
690dc626 4084 struct vega10_hwmgr *data = hwmgr->backend;
f83a9991 4085
7522ffc4
RZ
4086 if (data->smu_features[GNLD_FAN_CONTROL].enabled == false)
4087 return AMD_FAN_CTRL_MANUAL;
4088 else
4089 return AMD_FAN_CTRL_AUTO;
f83a9991
EH
4090}
4091
4092static int vega10_get_dal_power_level(struct pp_hwmgr *hwmgr,
4093 struct amd_pp_simple_clock_info *info)
4094{
4095 struct phm_ppt_v2_information *table_info =
4096 (struct phm_ppt_v2_information *)hwmgr->pptable;
4097 struct phm_clock_and_voltage_limits *max_limits =
4098 &table_info->max_clock_voltage_on_ac;
4099
4100 info->engine_max_clock = max_limits->sclk;
4101 info->memory_max_clock = max_limits->mclk;
4102
4103 return 0;
4104}
4105
4106static void vega10_get_sclks(struct pp_hwmgr *hwmgr,
4107 struct pp_clock_levels_with_latency *clocks)
4108{
4109 struct phm_ppt_v2_information *table_info =
4110 (struct phm_ppt_v2_information *)hwmgr->pptable;
4111 struct phm_ppt_v1_clock_voltage_dependency_table *dep_table =
4112 table_info->vdd_dep_on_sclk;
4113 uint32_t i;
4114
23ec3d14 4115 clocks->num_levels = 0;
f83a9991
EH
4116 for (i = 0; i < dep_table->count; i++) {
4117 if (dep_table->entries[i].clk) {
4118 clocks->data[clocks->num_levels].clocks_in_khz =
23ec3d14 4119 dep_table->entries[i].clk * 10;
f83a9991
EH
4120 clocks->num_levels++;
4121 }
4122 }
4123
4124}
4125
f83a9991
EH
4126static void vega10_get_memclocks(struct pp_hwmgr *hwmgr,
4127 struct pp_clock_levels_with_latency *clocks)
4128{
4129 struct phm_ppt_v2_information *table_info =
4130 (struct phm_ppt_v2_information *)hwmgr->pptable;
4131 struct phm_ppt_v1_clock_voltage_dependency_table *dep_table =
4132 table_info->vdd_dep_on_mclk;
690dc626 4133 struct vega10_hwmgr *data = hwmgr->backend;
23ec3d14 4134 uint32_t j = 0;
f83a9991
EH
4135 uint32_t i;
4136
f83a9991
EH
4137 for (i = 0; i < dep_table->count; i++) {
4138 if (dep_table->entries[i].clk) {
6eb9d603 4139
23ec3d14
RZ
4140 clocks->data[j].clocks_in_khz =
4141 dep_table->entries[i].clk * 10;
4142 data->mclk_latency_table.entries[j].frequency =
4143 dep_table->entries[i].clk;
4144 clocks->data[j].latency_in_us =
6eb9d603 4145 data->mclk_latency_table.entries[j].latency = 25;
23ec3d14 4146 j++;
f83a9991
EH
4147 }
4148 }
23ec3d14 4149 clocks->num_levels = data->mclk_latency_table.count = j;
f83a9991
EH
4150}
4151
4152static void vega10_get_dcefclocks(struct pp_hwmgr *hwmgr,
4153 struct pp_clock_levels_with_latency *clocks)
4154{
4155 struct phm_ppt_v2_information *table_info =
4156 (struct phm_ppt_v2_information *)hwmgr->pptable;
4157 struct phm_ppt_v1_clock_voltage_dependency_table *dep_table =
4158 table_info->vdd_dep_on_dcefclk;
4159 uint32_t i;
4160
4161 for (i = 0; i < dep_table->count; i++) {
23ec3d14 4162 clocks->data[i].clocks_in_khz = dep_table->entries[i].clk * 10;
f83a9991
EH
4163 clocks->data[i].latency_in_us = 0;
4164 clocks->num_levels++;
4165 }
4166}
4167
4168static void vega10_get_socclocks(struct pp_hwmgr *hwmgr,
4169 struct pp_clock_levels_with_latency *clocks)
4170{
4171 struct phm_ppt_v2_information *table_info =
4172 (struct phm_ppt_v2_information *)hwmgr->pptable;
4173 struct phm_ppt_v1_clock_voltage_dependency_table *dep_table =
4174 table_info->vdd_dep_on_socclk;
4175 uint32_t i;
4176
4177 for (i = 0; i < dep_table->count; i++) {
23ec3d14 4178 clocks->data[i].clocks_in_khz = dep_table->entries[i].clk * 10;
f83a9991
EH
4179 clocks->data[i].latency_in_us = 0;
4180 clocks->num_levels++;
4181 }
4182}
4183
4184static int vega10_get_clock_by_type_with_latency(struct pp_hwmgr *hwmgr,
4185 enum amd_pp_clock_type type,
4186 struct pp_clock_levels_with_latency *clocks)
4187{
4188 switch (type) {
4189 case amd_pp_sys_clock:
4190 vega10_get_sclks(hwmgr, clocks);
4191 break;
4192 case amd_pp_mem_clock:
4193 vega10_get_memclocks(hwmgr, clocks);
4194 break;
4195 case amd_pp_dcef_clock:
4196 vega10_get_dcefclocks(hwmgr, clocks);
4197 break;
4198 case amd_pp_soc_clock:
4199 vega10_get_socclocks(hwmgr, clocks);
4200 break;
4201 default:
4202 return -1;
4203 }
4204
4205 return 0;
4206}
4207
4208static int vega10_get_clock_by_type_with_voltage(struct pp_hwmgr *hwmgr,
4209 enum amd_pp_clock_type type,
4210 struct pp_clock_levels_with_voltage *clocks)
4211{
4212 struct phm_ppt_v2_information *table_info =
4213 (struct phm_ppt_v2_information *)hwmgr->pptable;
4214 struct phm_ppt_v1_clock_voltage_dependency_table *dep_table;
4215 uint32_t i;
4216
4217 switch (type) {
4218 case amd_pp_mem_clock:
4219 dep_table = table_info->vdd_dep_on_mclk;
4220 break;
4221 case amd_pp_dcef_clock:
4222 dep_table = table_info->vdd_dep_on_dcefclk;
4223 break;
4224 case amd_pp_disp_clock:
4225 dep_table = table_info->vdd_dep_on_dispclk;
4226 break;
4227 case amd_pp_pixel_clock:
4228 dep_table = table_info->vdd_dep_on_pixclk;
4229 break;
4230 case amd_pp_phy_clock:
4231 dep_table = table_info->vdd_dep_on_phyclk;
4232 break;
4233 default:
4234 return -1;
4235 }
4236
4237 for (i = 0; i < dep_table->count; i++) {
23ec3d14 4238 clocks->data[i].clocks_in_khz = dep_table->entries[i].clk * 10;
f83a9991
EH
4239 clocks->data[i].voltage_in_mv = (uint32_t)(table_info->vddc_lookup_table->
4240 entries[dep_table->entries[i].vddInd].us_vdd);
4241 clocks->num_levels++;
4242 }
4243
4244 if (i < dep_table->count)
4245 return -1;
4246
4247 return 0;
4248}
4249
4250static int vega10_set_watermarks_for_clocks_ranges(struct pp_hwmgr *hwmgr,
99c5e27d 4251 void *clock_range)
f83a9991 4252{
690dc626 4253 struct vega10_hwmgr *data = hwmgr->backend;
20582319 4254 struct dm_pp_wm_sets_with_clock_ranges_soc15 *wm_with_clock_ranges = clock_range;
f83a9991
EH
4255 Watermarks_t *table = &(data->smc_state_table.water_marks_table);
4256 int result = 0;
f83a9991
EH
4257
4258 if (!data->registry_data.disable_water_mark) {
63c2f7ed 4259 smu_set_watermarks_for_clocks_ranges(table, wm_with_clock_ranges);
f83a9991
EH
4260 data->water_marks_bitmap = WaterMarksExist;
4261 }
4262
4263 return result;
4264}
4265
f83a9991
EH
4266static int vega10_print_clock_levels(struct pp_hwmgr *hwmgr,
4267 enum pp_clock_type type, char *buf)
4268{
690dc626 4269 struct vega10_hwmgr *data = hwmgr->backend;
f83a9991
EH
4270 struct vega10_single_dpm_table *sclk_table = &(data->dpm_table.gfx_table);
4271 struct vega10_single_dpm_table *mclk_table = &(data->dpm_table.mem_table);
4272 struct vega10_pcie_table *pcie_table = &(data->dpm_table.pcie_table);
c5a44849
RZ
4273 struct vega10_odn_clock_voltage_dependency_table *podn_vdd_dep = NULL;
4274
f83a9991
EH
4275 int i, now, size = 0;
4276
4277 switch (type) {
4278 case PP_SCLK:
4279 if (data->registry_data.sclk_dpm_key_disabled)
4280 break;
4281
d246cd53 4282 smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetCurrentGfxclkIndex);
3f9ca14a 4283 now = smum_get_argument(hwmgr);
f83a9991
EH
4284
4285 for (i = 0; i < sclk_table->count; i++)
4286 size += sprintf(buf + size, "%d: %uMhz %s\n",
4287 i, sclk_table->dpm_levels[i].value / 100,
4288 (i == now) ? "*" : "");
4289 break;
4290 case PP_MCLK:
4291 if (data->registry_data.mclk_dpm_key_disabled)
4292 break;
4293
d246cd53 4294 smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetCurrentUclkIndex);
3f9ca14a 4295 now = smum_get_argument(hwmgr);
f83a9991
EH
4296
4297 for (i = 0; i < mclk_table->count; i++)
4298 size += sprintf(buf + size, "%d: %uMhz %s\n",
4299 i, mclk_table->dpm_levels[i].value / 100,
4300 (i == now) ? "*" : "");
4301 break;
4302 case PP_PCIE:
d246cd53 4303 smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetCurrentLinkIndex);
3f9ca14a 4304 now = smum_get_argument(hwmgr);
f83a9991
EH
4305
4306 for (i = 0; i < pcie_table->count; i++)
4307 size += sprintf(buf + size, "%d: %s %s\n", i,
7413d2fa
EQ
4308 (pcie_table->pcie_gen[i] == 0) ? "2.5GT/s, x1" :
4309 (pcie_table->pcie_gen[i] == 1) ? "5.0GT/s, x16" :
4310 (pcie_table->pcie_gen[i] == 2) ? "8.0GT/s, x16" : "",
f83a9991
EH
4311 (i == now) ? "*" : "");
4312 break;
c5a44849
RZ
4313 case OD_SCLK:
4314 if (hwmgr->od_enabled) {
4315 size = sprintf(buf, "%s:\n", "OD_SCLK");
4316 podn_vdd_dep = &data->odn_dpm_table.vdd_dep_on_sclk;
4317 for (i = 0; i < podn_vdd_dep->count; i++)
4318 size += sprintf(buf + size, "%d: %10uMhz %10umV\n",
4319 i, podn_vdd_dep->entries[i].clk / 100,
4320 podn_vdd_dep->entries[i].vddc);
4321 }
4322 break;
4323 case OD_MCLK:
4324 if (hwmgr->od_enabled) {
4325 size = sprintf(buf, "%s:\n", "OD_MCLK");
4326 podn_vdd_dep = &data->odn_dpm_table.vdd_dep_on_mclk;
4327 for (i = 0; i < podn_vdd_dep->count; i++)
4328 size += sprintf(buf + size, "%d: %10uMhz %10umV\n",
4329 i, podn_vdd_dep->entries[i].clk/100,
4330 podn_vdd_dep->entries[i].vddc);
4331 }
4332 break;
4333 case OD_RANGE:
4334 if (hwmgr->od_enabled) {
4335 size = sprintf(buf, "%s:\n", "OD_RANGE");
4336 size += sprintf(buf + size, "SCLK: %7uMHz %10uMHz\n",
4337 data->golden_dpm_table.gfx_table.dpm_levels[0].value/100,
4338 hwmgr->platform_descriptor.overdriveLimit.engineClock/100);
4339 size += sprintf(buf + size, "MCLK: %7uMHz %10uMHz\n",
4340 data->golden_dpm_table.mem_table.dpm_levels[0].value/100,
4341 hwmgr->platform_descriptor.overdriveLimit.memoryClock/100);
4342 size += sprintf(buf + size, "VDDC: %7umV %11umV\n",
4343 data->odn_dpm_table.min_vddc,
4344 data->odn_dpm_table.max_vddc);
4345 }
4346 break;
f83a9991
EH
4347 default:
4348 break;
4349 }
4350 return size;
4351}
4352
4353static int vega10_display_configuration_changed_task(struct pp_hwmgr *hwmgr)
4354{
690dc626 4355 struct vega10_hwmgr *data = hwmgr->backend;
f83a9991 4356 Watermarks_t *wm_table = &(data->smc_state_table.water_marks_table);
555fd70c 4357 int result = 0;
f83a9991
EH
4358
4359 if ((data->water_marks_bitmap & WaterMarksExist) &&
4360 !(data->water_marks_bitmap & WaterMarksLoaded)) {
3f9ca14a 4361 result = smum_smc_table_manager(hwmgr, (uint8_t *)wm_table, WMTABLE, false);
f83a9991
EH
4362 PP_ASSERT_WITH_CODE(result, "Failed to update WMTABLE!", return EINVAL);
4363 data->water_marks_bitmap |= WaterMarksLoaded;
4364 }
4365
4366 if (data->water_marks_bitmap & WaterMarksLoaded) {
d3f8c0ab 4367 smum_send_msg_to_smc_with_parameter(hwmgr,
555fd70c 4368 PPSMC_MSG_NumOfDisplays, hwmgr->display_config->num_display);
f83a9991
EH
4369 }
4370
4371 return result;
4372}
4373
4374int vega10_enable_disable_uvd_dpm(struct pp_hwmgr *hwmgr, bool enable)
4375{
690dc626 4376 struct vega10_hwmgr *data = hwmgr->backend;
f83a9991
EH
4377
4378 if (data->smu_features[GNLD_DPM_UVD].supported) {
d3f8c0ab 4379 PP_ASSERT_WITH_CODE(!vega10_enable_smc_features(hwmgr,
f83a9991
EH
4380 enable,
4381 data->smu_features[GNLD_DPM_UVD].smu_feature_bitmap),
4382 "Attempt to Enable/Disable DPM UVD Failed!",
4383 return -1);
4384 data->smu_features[GNLD_DPM_UVD].enabled = enable;
4385 }
4386 return 0;
4387}
4388
f93f0c3a 4389static void vega10_power_gate_vce(struct pp_hwmgr *hwmgr, bool bgate)
f83a9991 4390{
690dc626 4391 struct vega10_hwmgr *data = hwmgr->backend;
f83a9991
EH
4392
4393 data->vce_power_gated = bgate;
f93f0c3a 4394 vega10_enable_disable_vce_dpm(hwmgr, !bgate);
f83a9991
EH
4395}
4396
f93f0c3a 4397static void vega10_power_gate_uvd(struct pp_hwmgr *hwmgr, bool bgate)
f83a9991 4398{
690dc626 4399 struct vega10_hwmgr *data = hwmgr->backend;
f83a9991
EH
4400
4401 data->uvd_power_gated = bgate;
f93f0c3a 4402 vega10_enable_disable_uvd_dpm(hwmgr, !bgate);
f83a9991
EH
4403}
4404
4405static inline bool vega10_are_power_levels_equal(
4406 const struct vega10_performance_level *pl1,
4407 const struct vega10_performance_level *pl2)
4408{
4409 return ((pl1->soc_clock == pl2->soc_clock) &&
4410 (pl1->gfx_clock == pl2->gfx_clock) &&
4411 (pl1->mem_clock == pl2->mem_clock));
4412}
4413
4414static int vega10_check_states_equal(struct pp_hwmgr *hwmgr,
4415 const struct pp_hw_power_state *pstate1,
4416 const struct pp_hw_power_state *pstate2, bool *equal)
4417{
4418 const struct vega10_power_state *psa;
4419 const struct vega10_power_state *psb;
4420 int i;
4421
4422 if (pstate1 == NULL || pstate2 == NULL || equal == NULL)
4423 return -EINVAL;
4424
4425 psa = cast_const_phw_vega10_power_state(pstate1);
4426 psb = cast_const_phw_vega10_power_state(pstate2);
4427 /* If the two states don't even have the same number of performance levels they cannot be the same state. */
4428 if (psa->performance_level_count != psb->performance_level_count) {
4429 *equal = false;
4430 return 0;
4431 }
4432
4433 for (i = 0; i < psa->performance_level_count; i++) {
4434 if (!vega10_are_power_levels_equal(&(psa->performance_levels[i]), &(psb->performance_levels[i]))) {
4435 /* If we have found even one performance level pair that is different the states are different. */
4436 *equal = false;
4437 return 0;
4438 }
4439 }
4440
4441 /* If all performance levels are the same try to use the UVD clocks to break the tie.*/
4442 *equal = ((psa->uvd_clks.vclk == psb->uvd_clks.vclk) && (psa->uvd_clks.dclk == psb->uvd_clks.dclk));
4443 *equal &= ((psa->vce_clks.evclk == psb->vce_clks.evclk) && (psa->vce_clks.ecclk == psb->vce_clks.ecclk));
4444 *equal &= (psa->sclk_threshold == psb->sclk_threshold);
4445
4446 return 0;
4447}
4448
4449static bool
4450vega10_check_smc_update_required_for_display_configuration(struct pp_hwmgr *hwmgr)
4451{
690dc626 4452 struct vega10_hwmgr *data = hwmgr->backend;
f83a9991 4453 bool is_update_required = false;
f83a9991 4454
555fd70c 4455 if (data->display_timing.num_existing_displays != hwmgr->display_config->num_display)
f83a9991
EH
4456 is_update_required = true;
4457
dd5a6fe2 4458 if (PP_CAP(PHM_PlatformCaps_SclkDeepSleep)) {
555fd70c 4459 if (data->display_timing.min_clock_in_sr != hwmgr->display_config->min_core_set_clock_in_sr)
f83a9991
EH
4460 is_update_required = true;
4461 }
4462
4463 return is_update_required;
4464}
4465
8b9242ed
RZ
4466static int vega10_disable_dpm_tasks(struct pp_hwmgr *hwmgr)
4467{
4468 int tmp_result, result = 0;
4469
dd5a6fe2 4470 if (PP_CAP(PHM_PlatformCaps_ThermalController))
8b9242ed
RZ
4471 vega10_disable_thermal_protection(hwmgr);
4472
4473 tmp_result = vega10_disable_power_containment(hwmgr);
4474 PP_ASSERT_WITH_CODE((tmp_result == 0),
4475 "Failed to disable power containment!", result = tmp_result);
4476
9b7b8154
EQ
4477 tmp_result = vega10_disable_didt_config(hwmgr);
4478 PP_ASSERT_WITH_CODE((tmp_result == 0),
4479 "Failed to disable didt config!", result = tmp_result);
4480
8b9242ed
RZ
4481 tmp_result = vega10_avfs_enable(hwmgr, false);
4482 PP_ASSERT_WITH_CODE((tmp_result == 0),
4483 "Failed to disable AVFS!", result = tmp_result);
4484
4485 tmp_result = vega10_stop_dpm(hwmgr, SMC_DPM_FEATURES);
4486 PP_ASSERT_WITH_CODE((tmp_result == 0),
4487 "Failed to stop DPM!", result = tmp_result);
4488
df057e02
RZ
4489 tmp_result = vega10_disable_deep_sleep_master_switch(hwmgr);
4490 PP_ASSERT_WITH_CODE((tmp_result == 0),
4491 "Failed to disable deep sleep!", result = tmp_result);
4492
4022e4f2
RZ
4493 tmp_result = vega10_disable_ulv(hwmgr);
4494 PP_ASSERT_WITH_CODE((tmp_result == 0),
4495 "Failed to disable ulv!", result = tmp_result);
4496
bdb8cd10
RZ
4497 tmp_result = vega10_acg_disable(hwmgr);
4498 PP_ASSERT_WITH_CODE((tmp_result == 0),
4499 "Failed to disable acg!", result = tmp_result);
15826fbf
RZ
4500
4501 vega10_enable_disable_PCC_limit_feature(hwmgr, false);
8b9242ed
RZ
4502 return result;
4503}
4504
4505static int vega10_power_off_asic(struct pp_hwmgr *hwmgr)
4506{
690dc626 4507 struct vega10_hwmgr *data = hwmgr->backend;
8b9242ed
RZ
4508 int result;
4509
4510 result = vega10_disable_dpm_tasks(hwmgr);
4511 PP_ASSERT_WITH_CODE((0 == result),
4512 "[disable_dpm_tasks] Failed to disable DPM!",
4513 );
4514 data->water_marks_bitmap &= ~(WaterMarksLoaded);
4515
4516 return result;
4517}
4518
dd4e2237
EH
4519static int vega10_get_sclk_od(struct pp_hwmgr *hwmgr)
4520{
690dc626 4521 struct vega10_hwmgr *data = hwmgr->backend;
dd4e2237
EH
4522 struct vega10_single_dpm_table *sclk_table = &(data->dpm_table.gfx_table);
4523 struct vega10_single_dpm_table *golden_sclk_table =
4524 &(data->golden_dpm_table.gfx_table);
a4233cc9
GJ
4525 int value = sclk_table->dpm_levels[sclk_table->count - 1].value;
4526 int golden_value = golden_sclk_table->dpm_levels
dd4e2237
EH
4527 [golden_sclk_table->count - 1].value;
4528
a4233cc9
GJ
4529 value -= golden_value;
4530 value = DIV_ROUND_UP(value * 100, golden_value);
4531
dd4e2237
EH
4532 return value;
4533}
4534
4535static int vega10_set_sclk_od(struct pp_hwmgr *hwmgr, uint32_t value)
4536{
690dc626 4537 struct vega10_hwmgr *data = hwmgr->backend;
dd4e2237
EH
4538 struct vega10_single_dpm_table *golden_sclk_table =
4539 &(data->golden_dpm_table.gfx_table);
4540 struct pp_power_state *ps;
4541 struct vega10_power_state *vega10_ps;
4542
4543 ps = hwmgr->request_ps;
4544
4545 if (ps == NULL)
4546 return -EINVAL;
4547
4548 vega10_ps = cast_phw_vega10_power_state(&ps->hardware);
4549
4550 vega10_ps->performance_levels
4551 [vega10_ps->performance_level_count - 1].gfx_clock =
4552 golden_sclk_table->dpm_levels
4553 [golden_sclk_table->count - 1].value *
4554 value / 100 +
4555 golden_sclk_table->dpm_levels
4556 [golden_sclk_table->count - 1].value;
4557
4558 if (vega10_ps->performance_levels
4559 [vega10_ps->performance_level_count - 1].gfx_clock >
a4c3f247 4560 hwmgr->platform_descriptor.overdriveLimit.engineClock) {
dd4e2237
EH
4561 vega10_ps->performance_levels
4562 [vega10_ps->performance_level_count - 1].gfx_clock =
4563 hwmgr->platform_descriptor.overdriveLimit.engineClock;
a4c3f247
RZ
4564 pr_warn("max sclk supported by vbios is %d\n",
4565 hwmgr->platform_descriptor.overdriveLimit.engineClock);
4566 }
dd4e2237
EH
4567 return 0;
4568}
4569
4570static int vega10_get_mclk_od(struct pp_hwmgr *hwmgr)
4571{
690dc626 4572 struct vega10_hwmgr *data = hwmgr->backend;
dd4e2237
EH
4573 struct vega10_single_dpm_table *mclk_table = &(data->dpm_table.mem_table);
4574 struct vega10_single_dpm_table *golden_mclk_table =
4575 &(data->golden_dpm_table.mem_table);
a4233cc9
GJ
4576 int value = mclk_table->dpm_levels[mclk_table->count - 1].value;
4577 int golden_value = golden_mclk_table->dpm_levels
dd4e2237
EH
4578 [golden_mclk_table->count - 1].value;
4579
a4233cc9
GJ
4580 value -= golden_value;
4581 value = DIV_ROUND_UP(value * 100, golden_value);
4582
dd4e2237
EH
4583 return value;
4584}
4585
4586static int vega10_set_mclk_od(struct pp_hwmgr *hwmgr, uint32_t value)
4587{
690dc626 4588 struct vega10_hwmgr *data = hwmgr->backend;
dd4e2237
EH
4589 struct vega10_single_dpm_table *golden_mclk_table =
4590 &(data->golden_dpm_table.mem_table);
4591 struct pp_power_state *ps;
4592 struct vega10_power_state *vega10_ps;
4593
4594 ps = hwmgr->request_ps;
4595
4596 if (ps == NULL)
4597 return -EINVAL;
4598
4599 vega10_ps = cast_phw_vega10_power_state(&ps->hardware);
4600
4601 vega10_ps->performance_levels
4602 [vega10_ps->performance_level_count - 1].mem_clock =
4603 golden_mclk_table->dpm_levels
4604 [golden_mclk_table->count - 1].value *
4605 value / 100 +
4606 golden_mclk_table->dpm_levels
4607 [golden_mclk_table->count - 1].value;
4608
4609 if (vega10_ps->performance_levels
4610 [vega10_ps->performance_level_count - 1].mem_clock >
a4c3f247 4611 hwmgr->platform_descriptor.overdriveLimit.memoryClock) {
dd4e2237
EH
4612 vega10_ps->performance_levels
4613 [vega10_ps->performance_level_count - 1].mem_clock =
4614 hwmgr->platform_descriptor.overdriveLimit.memoryClock;
a4c3f247
RZ
4615 pr_warn("max mclk supported by vbios is %d\n",
4616 hwmgr->platform_descriptor.overdriveLimit.memoryClock);
4617 }
dd4e2237
EH
4618
4619 return 0;
4620}
8b9242ed 4621
52afb85e
RZ
4622static int vega10_notify_cac_buffer_info(struct pp_hwmgr *hwmgr,
4623 uint32_t virtual_addr_low,
4624 uint32_t virtual_addr_hi,
4625 uint32_t mc_addr_low,
4626 uint32_t mc_addr_hi,
4627 uint32_t size)
4628{
4629 smum_send_msg_to_smc_with_parameter(hwmgr,
4630 PPSMC_MSG_SetSystemVirtualDramAddrHigh,
4631 virtual_addr_hi);
4632 smum_send_msg_to_smc_with_parameter(hwmgr,
4633 PPSMC_MSG_SetSystemVirtualDramAddrLow,
4634 virtual_addr_low);
4635 smum_send_msg_to_smc_with_parameter(hwmgr,
4636 PPSMC_MSG_DramLogSetDramAddrHigh,
4637 mc_addr_hi);
4638
4639 smum_send_msg_to_smc_with_parameter(hwmgr,
4640 PPSMC_MSG_DramLogSetDramAddrLow,
4641 mc_addr_low);
4642
4643 smum_send_msg_to_smc_with_parameter(hwmgr,
4644 PPSMC_MSG_DramLogSetDramSize,
4645 size);
4646 return 0;
4647}
4648
0a91ee07
EQ
4649static int vega10_get_thermal_temperature_range(struct pp_hwmgr *hwmgr,
4650 struct PP_TemperatureRange *thermal_data)
4651{
4652 struct phm_ppt_v2_information *table_info =
4653 (struct phm_ppt_v2_information *)hwmgr->pptable;
4654
4655 memcpy(thermal_data, &SMU7ThermalWithDelayPolicy[0], sizeof(struct PP_TemperatureRange));
4656
4657 thermal_data->max = table_info->tdp_table->usSoftwareShutdownTemp *
4658 PP_TEMPERATURE_UNITS_PER_CENTIGRADES;
4659
4660 return 0;
4661}
4662
6390258a
RZ
4663static int vega10_get_power_profile_mode(struct pp_hwmgr *hwmgr, char *buf)
4664{
690dc626 4665 struct vega10_hwmgr *data = hwmgr->backend;
6390258a
RZ
4666 uint32_t i, size = 0;
4667 static const uint8_t profile_mode_setting[5][4] = {{70, 60, 1, 3,},
4668 {90, 60, 0, 0,},
4669 {70, 60, 0, 0,},
4670 {70, 90, 0, 0,},
4671 {30, 60, 0, 6,},
4672 };
4673 static const char *profile_name[6] = {"3D_FULL_SCREEN",
4674 "POWER_SAVING",
4675 "VIDEO",
4676 "VR",
04f618eb 4677 "COMPUTE",
6390258a
RZ
4678 "CUSTOM"};
4679 static const char *title[6] = {"NUM",
4680 "MODE_NAME",
4681 "BUSY_SET_POINT",
4682 "FPS",
4683 "USE_RLC_BUSY",
4684 "MIN_ACTIVE_LEVEL"};
4685
4686 if (!buf)
4687 return -EINVAL;
4688
4689 size += sprintf(buf + size, "%s %16s %s %s %s %s\n",title[0],
4690 title[1], title[2], title[3], title[4], title[5]);
4691
4692 for (i = 0; i < PP_SMC_POWER_PROFILE_CUSTOM; i++)
4693 size += sprintf(buf + size, "%3d %14s%s: %14d %3d %10d %14d\n",
4694 i, profile_name[i], (i == hwmgr->power_profile_mode) ? "*" : " ",
4695 profile_mode_setting[i][0], profile_mode_setting[i][1],
4696 profile_mode_setting[i][2], profile_mode_setting[i][3]);
4697 size += sprintf(buf + size, "%3d %14s%s: %14d %3d %10d %14d\n", i,
4698 profile_name[i], (i == hwmgr->power_profile_mode) ? "*" : " ",
4699 data->custom_profile_mode[0], data->custom_profile_mode[1],
4700 data->custom_profile_mode[2], data->custom_profile_mode[3]);
4701 return size;
4702}
4703
4704static int vega10_set_power_profile_mode(struct pp_hwmgr *hwmgr, long *input, uint32_t size)
4705{
690dc626 4706 struct vega10_hwmgr *data = hwmgr->backend;
6390258a
RZ
4707 uint8_t busy_set_point;
4708 uint8_t FPS;
4709 uint8_t use_rlc_busy;
4710 uint8_t min_active_level;
4711
6390258a
RZ
4712 hwmgr->power_profile_mode = input[size];
4713
4714 smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_SetWorkloadMask,
4715 1<<hwmgr->power_profile_mode);
4716
4717 if (hwmgr->power_profile_mode == PP_SMC_POWER_PROFILE_CUSTOM) {
4718 if (size == 0 || size > 4)
4719 return -EINVAL;
4720
4721 data->custom_profile_mode[0] = busy_set_point = input[0];
4722 data->custom_profile_mode[1] = FPS = input[1];
4723 data->custom_profile_mode[2] = use_rlc_busy = input[2];
4724 data->custom_profile_mode[3] = min_active_level = input[3];
4725 smum_send_msg_to_smc_with_parameter(hwmgr,
4726 PPSMC_MSG_SetCustomGfxDpmParameters,
4727 busy_set_point | FPS<<8 |
4728 use_rlc_busy << 16 | min_active_level<<24);
6390258a
RZ
4729 }
4730
4731 return 0;
4732}
4733
c5a44849
RZ
4734
4735static bool vega10_check_clk_voltage_valid(struct pp_hwmgr *hwmgr,
4736 enum PP_OD_DPM_TABLE_COMMAND type,
4737 uint32_t clk,
4738 uint32_t voltage)
4739{
4740 struct vega10_hwmgr *data = hwmgr->backend;
4741 struct vega10_odn_dpm_table *odn_table = &(data->odn_dpm_table);
4742 struct vega10_single_dpm_table *golden_table;
4743
4744 if (voltage < odn_table->min_vddc || voltage > odn_table->max_vddc) {
4745 pr_info("OD voltage is out of range [%d - %d] mV\n", odn_table->min_vddc, odn_table->max_vddc);
4746 return false;
4747 }
4748
4749 if (type == PP_OD_EDIT_SCLK_VDDC_TABLE) {
4750 golden_table = &(data->golden_dpm_table.gfx_table);
4751 if (golden_table->dpm_levels[0].value > clk ||
4752 hwmgr->platform_descriptor.overdriveLimit.engineClock < clk) {
4753 pr_info("OD engine clock is out of range [%d - %d] MHz\n",
4754 golden_table->dpm_levels[0].value/100,
4755 hwmgr->platform_descriptor.overdriveLimit.engineClock/100);
4756 return false;
4757 }
4758 } else if (type == PP_OD_EDIT_MCLK_VDDC_TABLE) {
4759 golden_table = &(data->golden_dpm_table.mem_table);
4760 if (golden_table->dpm_levels[0].value > clk ||
4761 hwmgr->platform_descriptor.overdriveLimit.memoryClock < clk) {
4762 pr_info("OD memory clock is out of range [%d - %d] MHz\n",
4763 golden_table->dpm_levels[0].value/100,
4764 hwmgr->platform_descriptor.overdriveLimit.memoryClock/100);
4765 return false;
4766 }
4767 } else {
4768 return false;
4769 }
4770
4771 return true;
4772}
4773
c5a44849
RZ
4774static void vega10_odn_update_soc_table(struct pp_hwmgr *hwmgr,
4775 enum PP_OD_DPM_TABLE_COMMAND type)
4776{
4777 struct vega10_hwmgr *data = hwmgr->backend;
4778 struct phm_ppt_v2_information *table_info = hwmgr->pptable;
4779 struct phm_ppt_v1_clock_voltage_dependency_table *dep_table = table_info->vdd_dep_on_socclk;
4780 struct vega10_single_dpm_table *dpm_table = &data->golden_dpm_table.soc_table;
4781
4782 struct vega10_odn_clock_voltage_dependency_table *podn_vdd_dep_on_socclk =
4783 &data->odn_dpm_table.vdd_dep_on_socclk;
4784 struct vega10_odn_vddc_lookup_table *od_vddc_lookup_table = &data->odn_dpm_table.vddc_lookup_table;
4785
4786 struct vega10_odn_clock_voltage_dependency_table *podn_vdd_dep;
4787 uint8_t i, j;
4788
4789 if (type == PP_OD_EDIT_SCLK_VDDC_TABLE) {
4790 podn_vdd_dep = &data->odn_dpm_table.vdd_dep_on_sclk;
4791 for (i = 0; i < podn_vdd_dep->count - 1; i++)
4792 od_vddc_lookup_table->entries[i].us_vdd = podn_vdd_dep->entries[i].vddc;
4793 if (od_vddc_lookup_table->entries[i].us_vdd < podn_vdd_dep->entries[i].vddc)
4794 od_vddc_lookup_table->entries[i].us_vdd = podn_vdd_dep->entries[i].vddc;
4795 } else if (type == PP_OD_EDIT_MCLK_VDDC_TABLE) {
4796 podn_vdd_dep = &data->odn_dpm_table.vdd_dep_on_mclk;
4797 for (i = 0; i < dpm_table->count; i++) {
4798 for (j = 0; j < od_vddc_lookup_table->count; j++) {
4799 if (od_vddc_lookup_table->entries[j].us_vdd >
4800 podn_vdd_dep->entries[i].vddc)
4801 break;
4802 }
4803 if (j == od_vddc_lookup_table->count) {
4804 od_vddc_lookup_table->entries[j-1].us_vdd =
4805 podn_vdd_dep->entries[i].vddc;
4806 data->need_update_dpm_table |= DPMTABLE_OD_UPDATE_VDDC;
4807 }
4808 podn_vdd_dep->entries[i].vddInd = j;
4809 }
4810 dpm_table = &data->dpm_table.soc_table;
4811 for (i = 0; i < dep_table->count; i++) {
4812 if (dep_table->entries[i].vddInd == podn_vdd_dep->entries[dep_table->count-1].vddInd &&
4813 dep_table->entries[i].clk < podn_vdd_dep->entries[dep_table->count-1].clk) {
4814 data->need_update_dpm_table |= DPMTABLE_UPDATE_SOCCLK;
4815 podn_vdd_dep_on_socclk->entries[i].clk = podn_vdd_dep->entries[dep_table->count-1].clk;
4816 dpm_table->dpm_levels[i].value = podn_vdd_dep_on_socclk->entries[i].clk;
4817 }
4818 }
4819 if (podn_vdd_dep_on_socclk->entries[podn_vdd_dep_on_socclk->count - 1].clk <
4820 podn_vdd_dep->entries[dep_table->count-1].clk) {
4821 data->need_update_dpm_table |= DPMTABLE_UPDATE_SOCCLK;
4822 podn_vdd_dep_on_socclk->entries[podn_vdd_dep_on_socclk->count - 1].clk = podn_vdd_dep->entries[dep_table->count-1].clk;
4823 dpm_table->dpm_levels[podn_vdd_dep_on_socclk->count - 1].value = podn_vdd_dep->entries[dep_table->count-1].clk;
4824 }
4825 if (podn_vdd_dep_on_socclk->entries[podn_vdd_dep_on_socclk->count - 1].vddInd <
4826 podn_vdd_dep->entries[dep_table->count-1].vddInd) {
4827 data->need_update_dpm_table |= DPMTABLE_UPDATE_SOCCLK;
4828 podn_vdd_dep_on_socclk->entries[podn_vdd_dep_on_socclk->count - 1].vddInd = podn_vdd_dep->entries[dep_table->count-1].vddInd;
4829 }
4830 }
4831}
4832
4833static int vega10_odn_edit_dpm_table(struct pp_hwmgr *hwmgr,
4834 enum PP_OD_DPM_TABLE_COMMAND type,
4835 long *input, uint32_t size)
4836{
4837 struct vega10_hwmgr *data = hwmgr->backend;
4838 struct vega10_odn_clock_voltage_dependency_table *podn_vdd_dep_table;
4839 struct vega10_single_dpm_table *dpm_table;
4840
4841 uint32_t input_clk;
4842 uint32_t input_vol;
4843 uint32_t input_level;
4844 uint32_t i;
4845
4846 PP_ASSERT_WITH_CODE(input, "NULL user input for clock and voltage",
4847 return -EINVAL);
4848
4849 if (!hwmgr->od_enabled) {
4850 pr_info("OverDrive feature not enabled\n");
4851 return -EINVAL;
4852 }
4853
4854 if (PP_OD_EDIT_SCLK_VDDC_TABLE == type) {
4855 dpm_table = &data->dpm_table.gfx_table;
4856 podn_vdd_dep_table = &data->odn_dpm_table.vdd_dep_on_sclk;
4857 data->need_update_dpm_table |= DPMTABLE_OD_UPDATE_SCLK;
4858 } else if (PP_OD_EDIT_MCLK_VDDC_TABLE == type) {
4859 dpm_table = &data->dpm_table.mem_table;
4860 podn_vdd_dep_table = &data->odn_dpm_table.vdd_dep_on_mclk;
4861 data->need_update_dpm_table |= DPMTABLE_OD_UPDATE_MCLK;
4862 } else if (PP_OD_RESTORE_DEFAULT_TABLE == type) {
4863 memcpy(&(data->dpm_table), &(data->golden_dpm_table), sizeof(struct vega10_dpm_table));
4864 vega10_odn_initial_default_setting(hwmgr);
4865 return 0;
4866 } else if (PP_OD_COMMIT_DPM_TABLE == type) {
4867 vega10_check_dpm_table_updated(hwmgr);
4868 return 0;
4869 } else {
4870 return -EINVAL;
4871 }
4872
4873 for (i = 0; i < size; i += 3) {
4874 if (i + 3 > size || input[i] >= podn_vdd_dep_table->count) {
4875 pr_info("invalid clock voltage input\n");
4876 return 0;
4877 }
4878 input_level = input[i];
4879 input_clk = input[i+1] * 100;
4880 input_vol = input[i+2];
4881
4882 if (vega10_check_clk_voltage_valid(hwmgr, type, input_clk, input_vol)) {
4883 dpm_table->dpm_levels[input_level].value = input_clk;
4884 podn_vdd_dep_table->entries[input_level].clk = input_clk;
4885 podn_vdd_dep_table->entries[input_level].vddc = input_vol;
4886 } else {
4887 return -EINVAL;
4888 }
4889 }
4890 vega10_odn_update_soc_table(hwmgr, type);
4891 return 0;
4892}
4893
f688b614
RZ
4894static int vega10_get_performance_level(struct pp_hwmgr *hwmgr, const struct pp_hw_power_state *state,
4895 PHM_PerformanceLevelDesignation designation, uint32_t index,
4896 PHM_PerformanceLevel *level)
4897{
4898 const struct vega10_power_state *ps;
4899 struct vega10_hwmgr *data;
4900 uint32_t i;
4901
4902 if (level == NULL || hwmgr == NULL || state == NULL)
4903 return -EINVAL;
4904
4905 data = hwmgr->backend;
4906 ps = cast_const_phw_vega10_power_state(state);
4907
4908 i = index > ps->performance_level_count - 1 ?
4909 ps->performance_level_count - 1 : index;
4910
4911 level->coreClock = ps->performance_levels[i].gfx_clock;
4912 level->memory_clock = ps->performance_levels[i].mem_clock;
4913
4914 return 0;
4915}
4916
f83a9991
EH
4917static const struct pp_hwmgr_func vega10_hwmgr_funcs = {
4918 .backend_init = vega10_hwmgr_backend_init,
4919 .backend_fini = vega10_hwmgr_backend_fini,
4920 .asic_setup = vega10_setup_asic_task,
4921 .dynamic_state_management_enable = vega10_enable_dpm_tasks,
8b9242ed 4922 .dynamic_state_management_disable = vega10_disable_dpm_tasks,
f83a9991
EH
4923 .get_num_of_pp_table_entries =
4924 vega10_get_number_of_powerplay_table_entries,
4925 .get_power_state_size = vega10_get_power_state_size,
4926 .get_pp_table_entry = vega10_get_pp_table_entry,
4927 .patch_boot_state = vega10_patch_boot_state,
4928 .apply_state_adjust_rules = vega10_apply_state_adjust_rules,
4929 .power_state_set = vega10_set_power_state_tasks,
4930 .get_sclk = vega10_dpm_get_sclk,
4931 .get_mclk = vega10_dpm_get_mclk,
4932 .notify_smc_display_config_after_ps_adjustment =
4933 vega10_notify_smc_display_config_after_ps_adjustment,
4934 .force_dpm_level = vega10_dpm_force_dpm_level,
f83a9991
EH
4935 .stop_thermal_controller = vega10_thermal_stop_thermal_controller,
4936 .get_fan_speed_info = vega10_fan_ctrl_get_fan_speed_info,
4937 .get_fan_speed_percent = vega10_fan_ctrl_get_fan_speed_percent,
4938 .set_fan_speed_percent = vega10_fan_ctrl_set_fan_speed_percent,
4939 .reset_fan_speed_to_default =
4940 vega10_fan_ctrl_reset_fan_speed_to_default,
4941 .get_fan_speed_rpm = vega10_fan_ctrl_get_fan_speed_rpm,
4942 .set_fan_speed_rpm = vega10_fan_ctrl_set_fan_speed_rpm,
4943 .uninitialize_thermal_controller =
4944 vega10_thermal_ctrl_uninitialize_thermal_controller,
4945 .set_fan_control_mode = vega10_set_fan_control_mode,
4946 .get_fan_control_mode = vega10_get_fan_control_mode,
4947 .read_sensor = vega10_read_sensor,
4948 .get_dal_power_level = vega10_get_dal_power_level,
4949 .get_clock_by_type_with_latency = vega10_get_clock_by_type_with_latency,
4950 .get_clock_by_type_with_voltage = vega10_get_clock_by_type_with_voltage,
4951 .set_watermarks_for_clocks_ranges = vega10_set_watermarks_for_clocks_ranges,
4952 .display_clock_voltage_request = vega10_display_clock_voltage_request,
4953 .force_clock_level = vega10_force_clock_level,
4954 .print_clock_levels = vega10_print_clock_levels,
4955 .display_config_changed = vega10_display_configuration_changed_task,
4956 .powergate_uvd = vega10_power_gate_uvd,
4957 .powergate_vce = vega10_power_gate_vce,
4958 .check_states_equal = vega10_check_states_equal,
4959 .check_smc_update_required_for_display_configuration =
4960 vega10_check_smc_update_required_for_display_configuration,
8b9242ed
RZ
4961 .power_off_asic = vega10_power_off_asic,
4962 .disable_smc_firmware_ctf = vega10_thermal_disable_alert,
dd4e2237
EH
4963 .get_sclk_od = vega10_get_sclk_od,
4964 .set_sclk_od = vega10_set_sclk_od,
4965 .get_mclk_od = vega10_get_mclk_od,
4966 .set_mclk_od = vega10_set_mclk_od,
9d90f0bd 4967 .avfs_control = vega10_avfs_enable,
52afb85e 4968 .notify_cac_buffer_info = vega10_notify_cac_buffer_info,
0a91ee07 4969 .get_thermal_temperature_range = vega10_get_thermal_temperature_range,
4d200372 4970 .register_irq_handlers = smu9_register_irq_handlers,
1ed05ff4 4971 .start_thermal_controller = vega10_start_thermal_controller,
6390258a
RZ
4972 .get_power_profile_mode = vega10_get_power_profile_mode,
4973 .set_power_profile_mode = vega10_set_power_profile_mode,
6ab8555e 4974 .set_power_limit = vega10_set_power_limit,
c5a44849 4975 .odn_edit_dpm_table = vega10_odn_edit_dpm_table,
f688b614 4976 .get_performance_level = vega10_get_performance_level,
f83a9991
EH
4977};
4978
4979int vega10_hwmgr_init(struct pp_hwmgr *hwmgr)
4980{
4981 hwmgr->hwmgr_func = &vega10_hwmgr_funcs;
4982 hwmgr->pptable_func = &vega10_pptable_funcs;
1ab47204 4983
f83a9991
EH
4984 return 0;
4985}
This page took 0.919095 seconds and 4 git commands to generate.