2 * Copyright 2011 Advanced Micro Devices, Inc.
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
22 * Authors: Alex Deucher
26 #include "amdgpu_atombios.h"
27 #include "amdgpu_i2c.h"
28 #include "amdgpu_dpm.h"
31 #include "amdgpu_display.h"
33 #include <linux/power_supply.h>
34 #include "amdgpu_smu.h"
36 #define amdgpu_dpm_enable_bapm(adev, e) \
37 ((adev)->powerplay.pp_funcs->enable_bapm((adev)->powerplay.pp_handle, (e)))
39 int amdgpu_dpm_get_sclk(struct amdgpu_device *adev, bool low)
41 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
44 if (!pp_funcs->get_sclk)
47 mutex_lock(&adev->pm.mutex);
48 ret = pp_funcs->get_sclk((adev)->powerplay.pp_handle,
50 mutex_unlock(&adev->pm.mutex);
55 int amdgpu_dpm_get_mclk(struct amdgpu_device *adev, bool low)
57 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
60 if (!pp_funcs->get_mclk)
63 mutex_lock(&adev->pm.mutex);
64 ret = pp_funcs->get_mclk((adev)->powerplay.pp_handle,
66 mutex_unlock(&adev->pm.mutex);
71 int amdgpu_dpm_set_powergating_by_smu(struct amdgpu_device *adev, uint32_t block_type, bool gate)
74 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
75 enum ip_power_state pwr_state = gate ? POWER_STATE_OFF : POWER_STATE_ON;
77 if (atomic_read(&adev->pm.pwr_state[block_type]) == pwr_state) {
78 dev_dbg(adev->dev, "IP block%d already in the target %s state!",
79 block_type, gate ? "gate" : "ungate");
83 mutex_lock(&adev->pm.mutex);
86 case AMD_IP_BLOCK_TYPE_UVD:
87 case AMD_IP_BLOCK_TYPE_VCE:
88 case AMD_IP_BLOCK_TYPE_GFX:
89 case AMD_IP_BLOCK_TYPE_VCN:
90 case AMD_IP_BLOCK_TYPE_SDMA:
91 case AMD_IP_BLOCK_TYPE_JPEG:
92 case AMD_IP_BLOCK_TYPE_GMC:
93 case AMD_IP_BLOCK_TYPE_ACP:
94 if (pp_funcs && pp_funcs->set_powergating_by_smu)
95 ret = (pp_funcs->set_powergating_by_smu(
96 (adev)->powerplay.pp_handle, block_type, gate));
103 atomic_set(&adev->pm.pwr_state[block_type], pwr_state);
105 mutex_unlock(&adev->pm.mutex);
110 int amdgpu_dpm_baco_enter(struct amdgpu_device *adev)
112 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
113 void *pp_handle = adev->powerplay.pp_handle;
116 if (!pp_funcs || !pp_funcs->set_asic_baco_state)
119 mutex_lock(&adev->pm.mutex);
121 /* enter BACO state */
122 ret = pp_funcs->set_asic_baco_state(pp_handle, 1);
124 mutex_unlock(&adev->pm.mutex);
129 int amdgpu_dpm_baco_exit(struct amdgpu_device *adev)
131 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
132 void *pp_handle = adev->powerplay.pp_handle;
135 if (!pp_funcs || !pp_funcs->set_asic_baco_state)
138 mutex_lock(&adev->pm.mutex);
140 /* exit BACO state */
141 ret = pp_funcs->set_asic_baco_state(pp_handle, 0);
143 mutex_unlock(&adev->pm.mutex);
148 int amdgpu_dpm_set_mp1_state(struct amdgpu_device *adev,
149 enum pp_mp1_state mp1_state)
152 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
154 if (pp_funcs && pp_funcs->set_mp1_state) {
155 mutex_lock(&adev->pm.mutex);
157 ret = pp_funcs->set_mp1_state(
158 adev->powerplay.pp_handle,
161 mutex_unlock(&adev->pm.mutex);
167 bool amdgpu_dpm_is_baco_supported(struct amdgpu_device *adev)
169 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
170 void *pp_handle = adev->powerplay.pp_handle;
174 if (!pp_funcs || !pp_funcs->get_asic_baco_capability)
177 mutex_lock(&adev->pm.mutex);
179 ret = pp_funcs->get_asic_baco_capability(pp_handle,
182 mutex_unlock(&adev->pm.mutex);
184 return ret ? false : baco_cap;
187 int amdgpu_dpm_mode2_reset(struct amdgpu_device *adev)
189 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
190 void *pp_handle = adev->powerplay.pp_handle;
193 if (!pp_funcs || !pp_funcs->asic_reset_mode_2)
196 mutex_lock(&adev->pm.mutex);
198 ret = pp_funcs->asic_reset_mode_2(pp_handle);
200 mutex_unlock(&adev->pm.mutex);
205 int amdgpu_dpm_baco_reset(struct amdgpu_device *adev)
207 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
208 void *pp_handle = adev->powerplay.pp_handle;
211 if (!pp_funcs || !pp_funcs->set_asic_baco_state)
214 mutex_lock(&adev->pm.mutex);
216 /* enter BACO state */
217 ret = pp_funcs->set_asic_baco_state(pp_handle, 1);
221 /* exit BACO state */
222 ret = pp_funcs->set_asic_baco_state(pp_handle, 0);
225 mutex_unlock(&adev->pm.mutex);
229 bool amdgpu_dpm_is_mode1_reset_supported(struct amdgpu_device *adev)
231 struct smu_context *smu = adev->powerplay.pp_handle;
232 bool support_mode1_reset = false;
234 if (is_support_sw_smu(adev)) {
235 mutex_lock(&adev->pm.mutex);
236 support_mode1_reset = smu_mode1_reset_is_support(smu);
237 mutex_unlock(&adev->pm.mutex);
240 return support_mode1_reset;
243 int amdgpu_dpm_mode1_reset(struct amdgpu_device *adev)
245 struct smu_context *smu = adev->powerplay.pp_handle;
246 int ret = -EOPNOTSUPP;
248 if (is_support_sw_smu(adev)) {
249 mutex_lock(&adev->pm.mutex);
250 ret = smu_mode1_reset(smu);
251 mutex_unlock(&adev->pm.mutex);
257 int amdgpu_dpm_switch_power_profile(struct amdgpu_device *adev,
258 enum PP_SMC_POWER_PROFILE type,
261 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
264 if (amdgpu_sriov_vf(adev))
267 if (pp_funcs && pp_funcs->switch_power_profile) {
268 mutex_lock(&adev->pm.mutex);
269 ret = pp_funcs->switch_power_profile(
270 adev->powerplay.pp_handle, type, en);
271 mutex_unlock(&adev->pm.mutex);
277 int amdgpu_dpm_set_xgmi_pstate(struct amdgpu_device *adev,
280 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
283 if (pp_funcs && pp_funcs->set_xgmi_pstate) {
284 mutex_lock(&adev->pm.mutex);
285 ret = pp_funcs->set_xgmi_pstate(adev->powerplay.pp_handle,
287 mutex_unlock(&adev->pm.mutex);
293 int amdgpu_dpm_set_df_cstate(struct amdgpu_device *adev,
297 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
298 void *pp_handle = adev->powerplay.pp_handle;
300 if (pp_funcs && pp_funcs->set_df_cstate) {
301 mutex_lock(&adev->pm.mutex);
302 ret = pp_funcs->set_df_cstate(pp_handle, cstate);
303 mutex_unlock(&adev->pm.mutex);
309 int amdgpu_dpm_allow_xgmi_power_down(struct amdgpu_device *adev, bool en)
311 struct smu_context *smu = adev->powerplay.pp_handle;
314 if (is_support_sw_smu(adev)) {
315 mutex_lock(&adev->pm.mutex);
316 ret = smu_allow_xgmi_power_down(smu, en);
317 mutex_unlock(&adev->pm.mutex);
323 int amdgpu_dpm_enable_mgpu_fan_boost(struct amdgpu_device *adev)
325 void *pp_handle = adev->powerplay.pp_handle;
326 const struct amd_pm_funcs *pp_funcs =
327 adev->powerplay.pp_funcs;
330 if (pp_funcs && pp_funcs->enable_mgpu_fan_boost) {
331 mutex_lock(&adev->pm.mutex);
332 ret = pp_funcs->enable_mgpu_fan_boost(pp_handle);
333 mutex_unlock(&adev->pm.mutex);
339 int amdgpu_dpm_set_clockgating_by_smu(struct amdgpu_device *adev,
342 void *pp_handle = adev->powerplay.pp_handle;
343 const struct amd_pm_funcs *pp_funcs =
344 adev->powerplay.pp_funcs;
347 if (pp_funcs && pp_funcs->set_clockgating_by_smu) {
348 mutex_lock(&adev->pm.mutex);
349 ret = pp_funcs->set_clockgating_by_smu(pp_handle,
351 mutex_unlock(&adev->pm.mutex);
357 int amdgpu_dpm_smu_i2c_bus_access(struct amdgpu_device *adev,
360 void *pp_handle = adev->powerplay.pp_handle;
361 const struct amd_pm_funcs *pp_funcs =
362 adev->powerplay.pp_funcs;
363 int ret = -EOPNOTSUPP;
365 if (pp_funcs && pp_funcs->smu_i2c_bus_access) {
366 mutex_lock(&adev->pm.mutex);
367 ret = pp_funcs->smu_i2c_bus_access(pp_handle,
369 mutex_unlock(&adev->pm.mutex);
375 void amdgpu_pm_acpi_event_handler(struct amdgpu_device *adev)
377 if (adev->pm.dpm_enabled) {
378 mutex_lock(&adev->pm.mutex);
379 if (power_supply_is_system_supplied() > 0)
380 adev->pm.ac_power = true;
382 adev->pm.ac_power = false;
384 if (adev->powerplay.pp_funcs &&
385 adev->powerplay.pp_funcs->enable_bapm)
386 amdgpu_dpm_enable_bapm(adev, adev->pm.ac_power);
388 if (is_support_sw_smu(adev))
389 smu_set_ac_dc(adev->powerplay.pp_handle);
391 mutex_unlock(&adev->pm.mutex);
395 int amdgpu_dpm_read_sensor(struct amdgpu_device *adev, enum amd_pp_sensors sensor,
396 void *data, uint32_t *size)
398 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
404 if (pp_funcs && pp_funcs->read_sensor) {
405 mutex_lock(&adev->pm.mutex);
406 ret = pp_funcs->read_sensor(adev->powerplay.pp_handle,
410 mutex_unlock(&adev->pm.mutex);
416 void amdgpu_dpm_compute_clocks(struct amdgpu_device *adev)
418 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
420 if (!adev->pm.dpm_enabled)
423 if (!pp_funcs->pm_compute_clocks)
426 mutex_lock(&adev->pm.mutex);
427 pp_funcs->pm_compute_clocks(adev->powerplay.pp_handle);
428 mutex_unlock(&adev->pm.mutex);
431 void amdgpu_dpm_enable_uvd(struct amdgpu_device *adev, bool enable)
435 ret = amdgpu_dpm_set_powergating_by_smu(adev, AMD_IP_BLOCK_TYPE_UVD, !enable);
437 DRM_ERROR("Dpm %s uvd failed, ret = %d. \n",
438 enable ? "enable" : "disable", ret);
441 void amdgpu_dpm_enable_vce(struct amdgpu_device *adev, bool enable)
445 ret = amdgpu_dpm_set_powergating_by_smu(adev, AMD_IP_BLOCK_TYPE_VCE, !enable);
447 DRM_ERROR("Dpm %s vce failed, ret = %d. \n",
448 enable ? "enable" : "disable", ret);
451 void amdgpu_dpm_enable_jpeg(struct amdgpu_device *adev, bool enable)
455 ret = amdgpu_dpm_set_powergating_by_smu(adev, AMD_IP_BLOCK_TYPE_JPEG, !enable);
457 DRM_ERROR("Dpm %s jpeg failed, ret = %d. \n",
458 enable ? "enable" : "disable", ret);
461 int amdgpu_pm_load_smu_firmware(struct amdgpu_device *adev, uint32_t *smu_version)
463 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
466 if (!pp_funcs || !pp_funcs->load_firmware)
469 mutex_lock(&adev->pm.mutex);
470 r = pp_funcs->load_firmware(adev->powerplay.pp_handle);
472 pr_err("smu firmware loading failed\n");
477 *smu_version = adev->pm.fw_version;
480 mutex_unlock(&adev->pm.mutex);
484 int amdgpu_dpm_handle_passthrough_sbr(struct amdgpu_device *adev, bool enable)
488 if (is_support_sw_smu(adev)) {
489 mutex_lock(&adev->pm.mutex);
490 ret = smu_handle_passthrough_sbr(adev->powerplay.pp_handle,
492 mutex_unlock(&adev->pm.mutex);
498 int amdgpu_dpm_send_hbm_bad_pages_num(struct amdgpu_device *adev, uint32_t size)
500 struct smu_context *smu = adev->powerplay.pp_handle;
503 mutex_lock(&adev->pm.mutex);
504 ret = smu_send_hbm_bad_pages_num(smu, size);
505 mutex_unlock(&adev->pm.mutex);
510 int amdgpu_dpm_send_hbm_bad_channel_flag(struct amdgpu_device *adev, uint32_t size)
512 struct smu_context *smu = adev->powerplay.pp_handle;
515 mutex_lock(&adev->pm.mutex);
516 ret = smu_send_hbm_bad_channel_flag(smu, size);
517 mutex_unlock(&adev->pm.mutex);
522 int amdgpu_dpm_get_dpm_freq_range(struct amdgpu_device *adev,
523 enum pp_clock_type type,
532 if (!is_support_sw_smu(adev))
535 mutex_lock(&adev->pm.mutex);
536 ret = smu_get_dpm_freq_range(adev->powerplay.pp_handle,
540 mutex_unlock(&adev->pm.mutex);
545 int amdgpu_dpm_set_soft_freq_range(struct amdgpu_device *adev,
546 enum pp_clock_type type,
550 struct smu_context *smu = adev->powerplay.pp_handle;
556 if (!is_support_sw_smu(adev))
559 mutex_lock(&adev->pm.mutex);
560 ret = smu_set_soft_freq_range(smu,
564 mutex_unlock(&adev->pm.mutex);
569 int amdgpu_dpm_write_watermarks_table(struct amdgpu_device *adev)
571 struct smu_context *smu = adev->powerplay.pp_handle;
574 if (!is_support_sw_smu(adev))
577 mutex_lock(&adev->pm.mutex);
578 ret = smu_write_watermarks_table(smu);
579 mutex_unlock(&adev->pm.mutex);
584 int amdgpu_dpm_wait_for_event(struct amdgpu_device *adev,
585 enum smu_event_type event,
588 struct smu_context *smu = adev->powerplay.pp_handle;
591 if (!is_support_sw_smu(adev))
594 mutex_lock(&adev->pm.mutex);
595 ret = smu_wait_for_event(smu, event, event_arg);
596 mutex_unlock(&adev->pm.mutex);
601 int amdgpu_dpm_get_status_gfxoff(struct amdgpu_device *adev, uint32_t *value)
603 struct smu_context *smu = adev->powerplay.pp_handle;
606 if (!is_support_sw_smu(adev))
609 mutex_lock(&adev->pm.mutex);
610 ret = smu_get_status_gfxoff(smu, value);
611 mutex_unlock(&adev->pm.mutex);
616 uint64_t amdgpu_dpm_get_thermal_throttling_counter(struct amdgpu_device *adev)
618 struct smu_context *smu = adev->powerplay.pp_handle;
620 if (!is_support_sw_smu(adev))
623 return atomic64_read(&smu->throttle_int_counter);
626 /* amdgpu_dpm_gfx_state_change - Handle gfx power state change set
627 * @adev: amdgpu_device pointer
628 * @state: gfx power state(1 -sGpuChangeState_D0Entry and 2 -sGpuChangeState_D3Entry)
631 void amdgpu_dpm_gfx_state_change(struct amdgpu_device *adev,
632 enum gfx_change_state state)
634 mutex_lock(&adev->pm.mutex);
635 if (adev->powerplay.pp_funcs &&
636 adev->powerplay.pp_funcs->gfx_state_change_set)
637 ((adev)->powerplay.pp_funcs->gfx_state_change_set(
638 (adev)->powerplay.pp_handle, state));
639 mutex_unlock(&adev->pm.mutex);
642 int amdgpu_dpm_get_ecc_info(struct amdgpu_device *adev,
645 struct smu_context *smu = adev->powerplay.pp_handle;
648 if (!is_support_sw_smu(adev))
651 mutex_lock(&adev->pm.mutex);
652 ret = smu_get_ecc_info(smu, umc_ecc);
653 mutex_unlock(&adev->pm.mutex);
658 struct amd_vce_state *amdgpu_dpm_get_vce_clock_state(struct amdgpu_device *adev,
661 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
662 struct amd_vce_state *vstate = NULL;
664 if (!pp_funcs->get_vce_clock_state)
667 mutex_lock(&adev->pm.mutex);
668 vstate = pp_funcs->get_vce_clock_state(adev->powerplay.pp_handle,
670 mutex_unlock(&adev->pm.mutex);
675 void amdgpu_dpm_get_current_power_state(struct amdgpu_device *adev,
676 enum amd_pm_state_type *state)
678 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
680 mutex_lock(&adev->pm.mutex);
682 if (!pp_funcs->get_current_power_state) {
683 *state = adev->pm.dpm.user_state;
687 *state = pp_funcs->get_current_power_state(adev->powerplay.pp_handle);
688 if (*state < POWER_STATE_TYPE_DEFAULT ||
689 *state > POWER_STATE_TYPE_INTERNAL_3DPERF)
690 *state = adev->pm.dpm.user_state;
693 mutex_unlock(&adev->pm.mutex);
696 void amdgpu_dpm_set_power_state(struct amdgpu_device *adev,
697 enum amd_pm_state_type state)
699 mutex_lock(&adev->pm.mutex);
700 adev->pm.dpm.user_state = state;
701 mutex_unlock(&adev->pm.mutex);
703 if (is_support_sw_smu(adev))
706 if (amdgpu_dpm_dispatch_task(adev,
707 AMD_PP_TASK_ENABLE_USER_STATE,
708 &state) == -EOPNOTSUPP)
709 amdgpu_dpm_compute_clocks(adev);
712 enum amd_dpm_forced_level amdgpu_dpm_get_performance_level(struct amdgpu_device *adev)
714 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
715 enum amd_dpm_forced_level level;
717 mutex_lock(&adev->pm.mutex);
718 if (pp_funcs->get_performance_level)
719 level = pp_funcs->get_performance_level(adev->powerplay.pp_handle);
721 level = adev->pm.dpm.forced_level;
722 mutex_unlock(&adev->pm.mutex);
727 int amdgpu_dpm_force_performance_level(struct amdgpu_device *adev,
728 enum amd_dpm_forced_level level)
730 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
731 enum amd_dpm_forced_level current_level;
732 uint32_t profile_mode_mask = AMD_DPM_FORCED_LEVEL_PROFILE_STANDARD |
733 AMD_DPM_FORCED_LEVEL_PROFILE_MIN_SCLK |
734 AMD_DPM_FORCED_LEVEL_PROFILE_MIN_MCLK |
735 AMD_DPM_FORCED_LEVEL_PROFILE_PEAK;
737 if (!pp_funcs->force_performance_level)
740 if (adev->pm.dpm.thermal_active)
743 current_level = amdgpu_dpm_get_performance_level(adev);
744 if (current_level == level)
747 if (adev->asic_type == CHIP_RAVEN) {
748 if (!(adev->apu_flags & AMD_APU_IS_RAVEN2)) {
749 if (current_level != AMD_DPM_FORCED_LEVEL_MANUAL &&
750 level == AMD_DPM_FORCED_LEVEL_MANUAL)
751 amdgpu_gfx_off_ctrl(adev, false);
752 else if (current_level == AMD_DPM_FORCED_LEVEL_MANUAL &&
753 level != AMD_DPM_FORCED_LEVEL_MANUAL)
754 amdgpu_gfx_off_ctrl(adev, true);
758 if (!(current_level & profile_mode_mask) &&
759 (level == AMD_DPM_FORCED_LEVEL_PROFILE_EXIT))
762 if (!(current_level & profile_mode_mask) &&
763 (level & profile_mode_mask)) {
764 /* enter UMD Pstate */
765 amdgpu_device_ip_set_powergating_state(adev,
766 AMD_IP_BLOCK_TYPE_GFX,
767 AMD_PG_STATE_UNGATE);
768 amdgpu_device_ip_set_clockgating_state(adev,
769 AMD_IP_BLOCK_TYPE_GFX,
770 AMD_CG_STATE_UNGATE);
771 } else if ((current_level & profile_mode_mask) &&
772 !(level & profile_mode_mask)) {
773 /* exit UMD Pstate */
774 amdgpu_device_ip_set_clockgating_state(adev,
775 AMD_IP_BLOCK_TYPE_GFX,
777 amdgpu_device_ip_set_powergating_state(adev,
778 AMD_IP_BLOCK_TYPE_GFX,
782 mutex_lock(&adev->pm.mutex);
784 if (pp_funcs->force_performance_level(adev->powerplay.pp_handle,
786 mutex_unlock(&adev->pm.mutex);
790 adev->pm.dpm.forced_level = level;
792 mutex_unlock(&adev->pm.mutex);
797 int amdgpu_dpm_get_pp_num_states(struct amdgpu_device *adev,
798 struct pp_states_info *states)
800 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
803 if (!pp_funcs->get_pp_num_states)
806 mutex_lock(&adev->pm.mutex);
807 ret = pp_funcs->get_pp_num_states(adev->powerplay.pp_handle,
809 mutex_unlock(&adev->pm.mutex);
814 int amdgpu_dpm_dispatch_task(struct amdgpu_device *adev,
815 enum amd_pp_task task_id,
816 enum amd_pm_state_type *user_state)
818 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
821 if (!pp_funcs->dispatch_tasks)
824 mutex_lock(&adev->pm.mutex);
825 ret = pp_funcs->dispatch_tasks(adev->powerplay.pp_handle,
828 mutex_unlock(&adev->pm.mutex);
833 int amdgpu_dpm_get_pp_table(struct amdgpu_device *adev, char **table)
835 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
838 if (!pp_funcs->get_pp_table)
841 mutex_lock(&adev->pm.mutex);
842 ret = pp_funcs->get_pp_table(adev->powerplay.pp_handle,
844 mutex_unlock(&adev->pm.mutex);
849 int amdgpu_dpm_set_fine_grain_clk_vol(struct amdgpu_device *adev,
854 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
857 if (!pp_funcs->set_fine_grain_clk_vol)
860 mutex_lock(&adev->pm.mutex);
861 ret = pp_funcs->set_fine_grain_clk_vol(adev->powerplay.pp_handle,
865 mutex_unlock(&adev->pm.mutex);
870 int amdgpu_dpm_odn_edit_dpm_table(struct amdgpu_device *adev,
875 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
878 if (!pp_funcs->odn_edit_dpm_table)
881 mutex_lock(&adev->pm.mutex);
882 ret = pp_funcs->odn_edit_dpm_table(adev->powerplay.pp_handle,
886 mutex_unlock(&adev->pm.mutex);
891 int amdgpu_dpm_print_clock_levels(struct amdgpu_device *adev,
892 enum pp_clock_type type,
895 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
898 if (!pp_funcs->print_clock_levels)
901 mutex_lock(&adev->pm.mutex);
902 ret = pp_funcs->print_clock_levels(adev->powerplay.pp_handle,
905 mutex_unlock(&adev->pm.mutex);
910 int amdgpu_dpm_emit_clock_levels(struct amdgpu_device *adev,
911 enum pp_clock_type type,
915 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
918 if (!pp_funcs->emit_clock_levels)
921 mutex_lock(&adev->pm.mutex);
922 ret = pp_funcs->emit_clock_levels(adev->powerplay.pp_handle,
926 mutex_unlock(&adev->pm.mutex);
931 int amdgpu_dpm_set_ppfeature_status(struct amdgpu_device *adev,
932 uint64_t ppfeature_masks)
934 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
937 if (!pp_funcs->set_ppfeature_status)
940 mutex_lock(&adev->pm.mutex);
941 ret = pp_funcs->set_ppfeature_status(adev->powerplay.pp_handle,
943 mutex_unlock(&adev->pm.mutex);
948 int amdgpu_dpm_get_ppfeature_status(struct amdgpu_device *adev, char *buf)
950 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
953 if (!pp_funcs->get_ppfeature_status)
956 mutex_lock(&adev->pm.mutex);
957 ret = pp_funcs->get_ppfeature_status(adev->powerplay.pp_handle,
959 mutex_unlock(&adev->pm.mutex);
964 int amdgpu_dpm_force_clock_level(struct amdgpu_device *adev,
965 enum pp_clock_type type,
968 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
971 if (!pp_funcs->force_clock_level)
974 mutex_lock(&adev->pm.mutex);
975 ret = pp_funcs->force_clock_level(adev->powerplay.pp_handle,
978 mutex_unlock(&adev->pm.mutex);
983 int amdgpu_dpm_get_sclk_od(struct amdgpu_device *adev)
985 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
988 if (!pp_funcs->get_sclk_od)
991 mutex_lock(&adev->pm.mutex);
992 ret = pp_funcs->get_sclk_od(adev->powerplay.pp_handle);
993 mutex_unlock(&adev->pm.mutex);
998 int amdgpu_dpm_set_sclk_od(struct amdgpu_device *adev, uint32_t value)
1000 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1002 if (is_support_sw_smu(adev))
1005 mutex_lock(&adev->pm.mutex);
1006 if (pp_funcs->set_sclk_od)
1007 pp_funcs->set_sclk_od(adev->powerplay.pp_handle, value);
1008 mutex_unlock(&adev->pm.mutex);
1010 if (amdgpu_dpm_dispatch_task(adev,
1011 AMD_PP_TASK_READJUST_POWER_STATE,
1012 NULL) == -EOPNOTSUPP) {
1013 adev->pm.dpm.current_ps = adev->pm.dpm.boot_ps;
1014 amdgpu_dpm_compute_clocks(adev);
1020 int amdgpu_dpm_get_mclk_od(struct amdgpu_device *adev)
1022 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1025 if (!pp_funcs->get_mclk_od)
1028 mutex_lock(&adev->pm.mutex);
1029 ret = pp_funcs->get_mclk_od(adev->powerplay.pp_handle);
1030 mutex_unlock(&adev->pm.mutex);
1035 int amdgpu_dpm_set_mclk_od(struct amdgpu_device *adev, uint32_t value)
1037 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1039 if (is_support_sw_smu(adev))
1042 mutex_lock(&adev->pm.mutex);
1043 if (pp_funcs->set_mclk_od)
1044 pp_funcs->set_mclk_od(adev->powerplay.pp_handle, value);
1045 mutex_unlock(&adev->pm.mutex);
1047 if (amdgpu_dpm_dispatch_task(adev,
1048 AMD_PP_TASK_READJUST_POWER_STATE,
1049 NULL) == -EOPNOTSUPP) {
1050 adev->pm.dpm.current_ps = adev->pm.dpm.boot_ps;
1051 amdgpu_dpm_compute_clocks(adev);
1057 int amdgpu_dpm_get_power_profile_mode(struct amdgpu_device *adev,
1060 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1063 if (!pp_funcs->get_power_profile_mode)
1066 mutex_lock(&adev->pm.mutex);
1067 ret = pp_funcs->get_power_profile_mode(adev->powerplay.pp_handle,
1069 mutex_unlock(&adev->pm.mutex);
1074 int amdgpu_dpm_set_power_profile_mode(struct amdgpu_device *adev,
1075 long *input, uint32_t size)
1077 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1080 if (!pp_funcs->set_power_profile_mode)
1083 mutex_lock(&adev->pm.mutex);
1084 ret = pp_funcs->set_power_profile_mode(adev->powerplay.pp_handle,
1087 mutex_unlock(&adev->pm.mutex);
1092 int amdgpu_dpm_get_gpu_metrics(struct amdgpu_device *adev, void **table)
1094 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1097 if (!pp_funcs->get_gpu_metrics)
1100 mutex_lock(&adev->pm.mutex);
1101 ret = pp_funcs->get_gpu_metrics(adev->powerplay.pp_handle,
1103 mutex_unlock(&adev->pm.mutex);
1108 int amdgpu_dpm_get_fan_control_mode(struct amdgpu_device *adev,
1111 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1114 if (!pp_funcs->get_fan_control_mode)
1117 mutex_lock(&adev->pm.mutex);
1118 ret = pp_funcs->get_fan_control_mode(adev->powerplay.pp_handle,
1120 mutex_unlock(&adev->pm.mutex);
1125 int amdgpu_dpm_set_fan_speed_pwm(struct amdgpu_device *adev,
1128 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1131 if (!pp_funcs->set_fan_speed_pwm)
1134 mutex_lock(&adev->pm.mutex);
1135 ret = pp_funcs->set_fan_speed_pwm(adev->powerplay.pp_handle,
1137 mutex_unlock(&adev->pm.mutex);
1142 int amdgpu_dpm_get_fan_speed_pwm(struct amdgpu_device *adev,
1145 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1148 if (!pp_funcs->get_fan_speed_pwm)
1151 mutex_lock(&adev->pm.mutex);
1152 ret = pp_funcs->get_fan_speed_pwm(adev->powerplay.pp_handle,
1154 mutex_unlock(&adev->pm.mutex);
1159 int amdgpu_dpm_get_fan_speed_rpm(struct amdgpu_device *adev,
1162 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1165 if (!pp_funcs->get_fan_speed_rpm)
1168 mutex_lock(&adev->pm.mutex);
1169 ret = pp_funcs->get_fan_speed_rpm(adev->powerplay.pp_handle,
1171 mutex_unlock(&adev->pm.mutex);
1176 int amdgpu_dpm_set_fan_speed_rpm(struct amdgpu_device *adev,
1179 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1182 if (!pp_funcs->set_fan_speed_rpm)
1185 mutex_lock(&adev->pm.mutex);
1186 ret = pp_funcs->set_fan_speed_rpm(adev->powerplay.pp_handle,
1188 mutex_unlock(&adev->pm.mutex);
1193 int amdgpu_dpm_set_fan_control_mode(struct amdgpu_device *adev,
1196 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1199 if (!pp_funcs->set_fan_control_mode)
1202 mutex_lock(&adev->pm.mutex);
1203 ret = pp_funcs->set_fan_control_mode(adev->powerplay.pp_handle,
1205 mutex_unlock(&adev->pm.mutex);
1210 int amdgpu_dpm_get_power_limit(struct amdgpu_device *adev,
1212 enum pp_power_limit_level pp_limit_level,
1213 enum pp_power_type power_type)
1215 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1218 if (!pp_funcs->get_power_limit)
1221 mutex_lock(&adev->pm.mutex);
1222 ret = pp_funcs->get_power_limit(adev->powerplay.pp_handle,
1226 mutex_unlock(&adev->pm.mutex);
1231 int amdgpu_dpm_set_power_limit(struct amdgpu_device *adev,
1234 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1237 if (!pp_funcs->set_power_limit)
1240 mutex_lock(&adev->pm.mutex);
1241 ret = pp_funcs->set_power_limit(adev->powerplay.pp_handle,
1243 mutex_unlock(&adev->pm.mutex);
1248 int amdgpu_dpm_is_cclk_dpm_supported(struct amdgpu_device *adev)
1250 bool cclk_dpm_supported = false;
1252 if (!is_support_sw_smu(adev))
1255 mutex_lock(&adev->pm.mutex);
1256 cclk_dpm_supported = is_support_cclk_dpm(adev);
1257 mutex_unlock(&adev->pm.mutex);
1259 return (int)cclk_dpm_supported;
1262 int amdgpu_dpm_debugfs_print_current_performance_level(struct amdgpu_device *adev,
1265 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1267 if (!pp_funcs->debugfs_print_current_performance_level)
1270 mutex_lock(&adev->pm.mutex);
1271 pp_funcs->debugfs_print_current_performance_level(adev->powerplay.pp_handle,
1273 mutex_unlock(&adev->pm.mutex);
1278 int amdgpu_dpm_get_smu_prv_buf_details(struct amdgpu_device *adev,
1282 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1285 if (!pp_funcs->get_smu_prv_buf_details)
1288 mutex_lock(&adev->pm.mutex);
1289 ret = pp_funcs->get_smu_prv_buf_details(adev->powerplay.pp_handle,
1292 mutex_unlock(&adev->pm.mutex);
1297 int amdgpu_dpm_is_overdrive_supported(struct amdgpu_device *adev)
1299 struct pp_hwmgr *hwmgr = adev->powerplay.pp_handle;
1300 struct smu_context *smu = adev->powerplay.pp_handle;
1302 if ((is_support_sw_smu(adev) && smu->od_enabled) ||
1303 (is_support_sw_smu(adev) && smu->is_apu) ||
1304 (!is_support_sw_smu(adev) && hwmgr->od_enabled))
1310 int amdgpu_dpm_set_pp_table(struct amdgpu_device *adev,
1314 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1317 if (!pp_funcs->set_pp_table)
1320 mutex_lock(&adev->pm.mutex);
1321 ret = pp_funcs->set_pp_table(adev->powerplay.pp_handle,
1324 mutex_unlock(&adev->pm.mutex);
1329 int amdgpu_dpm_get_num_cpu_cores(struct amdgpu_device *adev)
1331 struct smu_context *smu = adev->powerplay.pp_handle;
1333 if (!is_support_sw_smu(adev))
1336 return smu->cpu_core_num;
1339 void amdgpu_dpm_stb_debug_fs_init(struct amdgpu_device *adev)
1341 if (!is_support_sw_smu(adev))
1344 amdgpu_smu_stb_debug_fs_init(adev);
1347 int amdgpu_dpm_display_configuration_change(struct amdgpu_device *adev,
1348 const struct amd_pp_display_configuration *input)
1350 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1353 if (!pp_funcs->display_configuration_change)
1356 mutex_lock(&adev->pm.mutex);
1357 ret = pp_funcs->display_configuration_change(adev->powerplay.pp_handle,
1359 mutex_unlock(&adev->pm.mutex);
1364 int amdgpu_dpm_get_clock_by_type(struct amdgpu_device *adev,
1365 enum amd_pp_clock_type type,
1366 struct amd_pp_clocks *clocks)
1368 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1371 if (!pp_funcs->get_clock_by_type)
1374 mutex_lock(&adev->pm.mutex);
1375 ret = pp_funcs->get_clock_by_type(adev->powerplay.pp_handle,
1378 mutex_unlock(&adev->pm.mutex);
1383 int amdgpu_dpm_get_display_mode_validation_clks(struct amdgpu_device *adev,
1384 struct amd_pp_simple_clock_info *clocks)
1386 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1389 if (!pp_funcs->get_display_mode_validation_clocks)
1392 mutex_lock(&adev->pm.mutex);
1393 ret = pp_funcs->get_display_mode_validation_clocks(adev->powerplay.pp_handle,
1395 mutex_unlock(&adev->pm.mutex);
1400 int amdgpu_dpm_get_clock_by_type_with_latency(struct amdgpu_device *adev,
1401 enum amd_pp_clock_type type,
1402 struct pp_clock_levels_with_latency *clocks)
1404 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1407 if (!pp_funcs->get_clock_by_type_with_latency)
1410 mutex_lock(&adev->pm.mutex);
1411 ret = pp_funcs->get_clock_by_type_with_latency(adev->powerplay.pp_handle,
1414 mutex_unlock(&adev->pm.mutex);
1419 int amdgpu_dpm_get_clock_by_type_with_voltage(struct amdgpu_device *adev,
1420 enum amd_pp_clock_type type,
1421 struct pp_clock_levels_with_voltage *clocks)
1423 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1426 if (!pp_funcs->get_clock_by_type_with_voltage)
1429 mutex_lock(&adev->pm.mutex);
1430 ret = pp_funcs->get_clock_by_type_with_voltage(adev->powerplay.pp_handle,
1433 mutex_unlock(&adev->pm.mutex);
1438 int amdgpu_dpm_set_watermarks_for_clocks_ranges(struct amdgpu_device *adev,
1441 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1444 if (!pp_funcs->set_watermarks_for_clocks_ranges)
1447 mutex_lock(&adev->pm.mutex);
1448 ret = pp_funcs->set_watermarks_for_clocks_ranges(adev->powerplay.pp_handle,
1450 mutex_unlock(&adev->pm.mutex);
1455 int amdgpu_dpm_display_clock_voltage_request(struct amdgpu_device *adev,
1456 struct pp_display_clock_request *clock)
1458 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1461 if (!pp_funcs->display_clock_voltage_request)
1464 mutex_lock(&adev->pm.mutex);
1465 ret = pp_funcs->display_clock_voltage_request(adev->powerplay.pp_handle,
1467 mutex_unlock(&adev->pm.mutex);
1472 int amdgpu_dpm_get_current_clocks(struct amdgpu_device *adev,
1473 struct amd_pp_clock_info *clocks)
1475 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1478 if (!pp_funcs->get_current_clocks)
1481 mutex_lock(&adev->pm.mutex);
1482 ret = pp_funcs->get_current_clocks(adev->powerplay.pp_handle,
1484 mutex_unlock(&adev->pm.mutex);
1489 void amdgpu_dpm_notify_smu_enable_pwe(struct amdgpu_device *adev)
1491 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1493 if (!pp_funcs->notify_smu_enable_pwe)
1496 mutex_lock(&adev->pm.mutex);
1497 pp_funcs->notify_smu_enable_pwe(adev->powerplay.pp_handle);
1498 mutex_unlock(&adev->pm.mutex);
1501 int amdgpu_dpm_set_active_display_count(struct amdgpu_device *adev,
1504 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1507 if (!pp_funcs->set_active_display_count)
1510 mutex_lock(&adev->pm.mutex);
1511 ret = pp_funcs->set_active_display_count(adev->powerplay.pp_handle,
1513 mutex_unlock(&adev->pm.mutex);
1518 int amdgpu_dpm_set_min_deep_sleep_dcefclk(struct amdgpu_device *adev,
1521 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1524 if (!pp_funcs->set_min_deep_sleep_dcefclk)
1527 mutex_lock(&adev->pm.mutex);
1528 ret = pp_funcs->set_min_deep_sleep_dcefclk(adev->powerplay.pp_handle,
1530 mutex_unlock(&adev->pm.mutex);
1535 void amdgpu_dpm_set_hard_min_dcefclk_by_freq(struct amdgpu_device *adev,
1538 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1540 if (!pp_funcs->set_hard_min_dcefclk_by_freq)
1543 mutex_lock(&adev->pm.mutex);
1544 pp_funcs->set_hard_min_dcefclk_by_freq(adev->powerplay.pp_handle,
1546 mutex_unlock(&adev->pm.mutex);
1549 void amdgpu_dpm_set_hard_min_fclk_by_freq(struct amdgpu_device *adev,
1552 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1554 if (!pp_funcs->set_hard_min_fclk_by_freq)
1557 mutex_lock(&adev->pm.mutex);
1558 pp_funcs->set_hard_min_fclk_by_freq(adev->powerplay.pp_handle,
1560 mutex_unlock(&adev->pm.mutex);
1563 int amdgpu_dpm_display_disable_memory_clock_switch(struct amdgpu_device *adev,
1564 bool disable_memory_clock_switch)
1566 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1569 if (!pp_funcs->display_disable_memory_clock_switch)
1572 mutex_lock(&adev->pm.mutex);
1573 ret = pp_funcs->display_disable_memory_clock_switch(adev->powerplay.pp_handle,
1574 disable_memory_clock_switch);
1575 mutex_unlock(&adev->pm.mutex);
1580 int amdgpu_dpm_get_max_sustainable_clocks_by_dc(struct amdgpu_device *adev,
1581 struct pp_smu_nv_clock_table *max_clocks)
1583 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1586 if (!pp_funcs->get_max_sustainable_clocks_by_dc)
1589 mutex_lock(&adev->pm.mutex);
1590 ret = pp_funcs->get_max_sustainable_clocks_by_dc(adev->powerplay.pp_handle,
1592 mutex_unlock(&adev->pm.mutex);
1597 enum pp_smu_status amdgpu_dpm_get_uclk_dpm_states(struct amdgpu_device *adev,
1598 unsigned int *clock_values_in_khz,
1599 unsigned int *num_states)
1601 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1604 if (!pp_funcs->get_uclk_dpm_states)
1607 mutex_lock(&adev->pm.mutex);
1608 ret = pp_funcs->get_uclk_dpm_states(adev->powerplay.pp_handle,
1609 clock_values_in_khz,
1611 mutex_unlock(&adev->pm.mutex);
1616 int amdgpu_dpm_get_dpm_clock_table(struct amdgpu_device *adev,
1617 struct dpm_clocks *clock_table)
1619 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1622 if (!pp_funcs->get_dpm_clock_table)
1625 mutex_lock(&adev->pm.mutex);
1626 ret = pp_funcs->get_dpm_clock_table(adev->powerplay.pp_handle,
1628 mutex_unlock(&adev->pm.mutex);