2 * Copyright 2011 Advanced Micro Devices, Inc.
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
22 * Authors: Alex Deucher
26 #include "amdgpu_atombios.h"
27 #include "amdgpu_i2c.h"
28 #include "amdgpu_dpm.h"
31 #include "amdgpu_display.h"
33 #include <linux/power_supply.h>
34 #include "amdgpu_smu.h"
36 #define amdgpu_dpm_enable_bapm(adev, e) \
37 ((adev)->powerplay.pp_funcs->enable_bapm((adev)->powerplay.pp_handle, (e)))
39 #define amdgpu_dpm_is_legacy_dpm(adev) ((adev)->powerplay.pp_handle == (adev))
41 int amdgpu_dpm_get_sclk(struct amdgpu_device *adev, bool low)
43 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
46 if (!pp_funcs->get_sclk)
49 mutex_lock(&adev->pm.mutex);
50 ret = pp_funcs->get_sclk((adev)->powerplay.pp_handle,
52 mutex_unlock(&adev->pm.mutex);
57 int amdgpu_dpm_get_mclk(struct amdgpu_device *adev, bool low)
59 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
62 if (!pp_funcs->get_mclk)
65 mutex_lock(&adev->pm.mutex);
66 ret = pp_funcs->get_mclk((adev)->powerplay.pp_handle,
68 mutex_unlock(&adev->pm.mutex);
73 int amdgpu_dpm_set_powergating_by_smu(struct amdgpu_device *adev, uint32_t block_type, bool gate)
76 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
77 enum ip_power_state pwr_state = gate ? POWER_STATE_OFF : POWER_STATE_ON;
79 if (atomic_read(&adev->pm.pwr_state[block_type]) == pwr_state) {
80 dev_dbg(adev->dev, "IP block%d already in the target %s state!",
81 block_type, gate ? "gate" : "ungate");
85 mutex_lock(&adev->pm.mutex);
88 case AMD_IP_BLOCK_TYPE_UVD:
89 case AMD_IP_BLOCK_TYPE_VCE:
90 case AMD_IP_BLOCK_TYPE_GFX:
91 case AMD_IP_BLOCK_TYPE_VCN:
92 case AMD_IP_BLOCK_TYPE_SDMA:
93 case AMD_IP_BLOCK_TYPE_JPEG:
94 case AMD_IP_BLOCK_TYPE_GMC:
95 case AMD_IP_BLOCK_TYPE_ACP:
96 case AMD_IP_BLOCK_TYPE_VPE:
97 if (pp_funcs && pp_funcs->set_powergating_by_smu)
98 ret = (pp_funcs->set_powergating_by_smu(
99 (adev)->powerplay.pp_handle, block_type, gate));
106 atomic_set(&adev->pm.pwr_state[block_type], pwr_state);
108 mutex_unlock(&adev->pm.mutex);
113 int amdgpu_dpm_set_gfx_power_up_by_imu(struct amdgpu_device *adev)
115 struct smu_context *smu = adev->powerplay.pp_handle;
116 int ret = -EOPNOTSUPP;
118 mutex_lock(&adev->pm.mutex);
119 ret = smu_set_gfx_power_up_by_imu(smu);
120 mutex_unlock(&adev->pm.mutex);
127 int amdgpu_dpm_baco_enter(struct amdgpu_device *adev)
129 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
130 void *pp_handle = adev->powerplay.pp_handle;
133 if (!pp_funcs || !pp_funcs->set_asic_baco_state)
136 mutex_lock(&adev->pm.mutex);
138 /* enter BACO state */
139 ret = pp_funcs->set_asic_baco_state(pp_handle, 1);
141 mutex_unlock(&adev->pm.mutex);
146 int amdgpu_dpm_baco_exit(struct amdgpu_device *adev)
148 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
149 void *pp_handle = adev->powerplay.pp_handle;
152 if (!pp_funcs || !pp_funcs->set_asic_baco_state)
155 mutex_lock(&adev->pm.mutex);
157 /* exit BACO state */
158 ret = pp_funcs->set_asic_baco_state(pp_handle, 0);
160 mutex_unlock(&adev->pm.mutex);
165 int amdgpu_dpm_set_mp1_state(struct amdgpu_device *adev,
166 enum pp_mp1_state mp1_state)
169 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
171 if (pp_funcs && pp_funcs->set_mp1_state) {
172 mutex_lock(&adev->pm.mutex);
174 ret = pp_funcs->set_mp1_state(
175 adev->powerplay.pp_handle,
178 mutex_unlock(&adev->pm.mutex);
184 int amdgpu_dpm_notify_rlc_state(struct amdgpu_device *adev, bool en)
187 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
189 if (pp_funcs && pp_funcs->notify_rlc_state) {
190 mutex_lock(&adev->pm.mutex);
192 ret = pp_funcs->notify_rlc_state(
193 adev->powerplay.pp_handle,
196 mutex_unlock(&adev->pm.mutex);
202 int amdgpu_dpm_is_baco_supported(struct amdgpu_device *adev)
204 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
205 void *pp_handle = adev->powerplay.pp_handle;
208 if (!pp_funcs || !pp_funcs->get_asic_baco_capability)
210 /* Don't use baco for reset in S3.
211 * This is a workaround for some platforms
212 * where entering BACO during suspend
213 * seems to cause reboots or hangs.
214 * This might be related to the fact that BACO controls
215 * power to the whole GPU including devices like audio and USB.
216 * Powering down/up everything may adversely affect these other
217 * devices. Needs more investigation.
222 mutex_lock(&adev->pm.mutex);
224 ret = pp_funcs->get_asic_baco_capability(pp_handle);
226 mutex_unlock(&adev->pm.mutex);
231 int amdgpu_dpm_mode2_reset(struct amdgpu_device *adev)
233 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
234 void *pp_handle = adev->powerplay.pp_handle;
237 if (!pp_funcs || !pp_funcs->asic_reset_mode_2)
240 mutex_lock(&adev->pm.mutex);
242 ret = pp_funcs->asic_reset_mode_2(pp_handle);
244 mutex_unlock(&adev->pm.mutex);
249 int amdgpu_dpm_enable_gfx_features(struct amdgpu_device *adev)
251 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
252 void *pp_handle = adev->powerplay.pp_handle;
255 if (!pp_funcs || !pp_funcs->asic_reset_enable_gfx_features)
258 mutex_lock(&adev->pm.mutex);
260 ret = pp_funcs->asic_reset_enable_gfx_features(pp_handle);
262 mutex_unlock(&adev->pm.mutex);
267 int amdgpu_dpm_baco_reset(struct amdgpu_device *adev)
269 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
270 void *pp_handle = adev->powerplay.pp_handle;
273 if (!pp_funcs || !pp_funcs->set_asic_baco_state)
276 mutex_lock(&adev->pm.mutex);
278 /* enter BACO state */
279 ret = pp_funcs->set_asic_baco_state(pp_handle, 1);
283 /* exit BACO state */
284 ret = pp_funcs->set_asic_baco_state(pp_handle, 0);
287 mutex_unlock(&adev->pm.mutex);
291 bool amdgpu_dpm_is_mode1_reset_supported(struct amdgpu_device *adev)
293 struct smu_context *smu = adev->powerplay.pp_handle;
294 bool support_mode1_reset = false;
296 if (is_support_sw_smu(adev)) {
297 mutex_lock(&adev->pm.mutex);
298 support_mode1_reset = smu_mode1_reset_is_support(smu);
299 mutex_unlock(&adev->pm.mutex);
302 return support_mode1_reset;
305 int amdgpu_dpm_mode1_reset(struct amdgpu_device *adev)
307 struct smu_context *smu = adev->powerplay.pp_handle;
308 int ret = -EOPNOTSUPP;
310 if (is_support_sw_smu(adev)) {
311 mutex_lock(&adev->pm.mutex);
312 ret = smu_mode1_reset(smu);
313 mutex_unlock(&adev->pm.mutex);
319 int amdgpu_dpm_switch_power_profile(struct amdgpu_device *adev,
320 enum PP_SMC_POWER_PROFILE type,
323 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
326 if (amdgpu_sriov_vf(adev))
329 if (pp_funcs && pp_funcs->switch_power_profile) {
330 mutex_lock(&adev->pm.mutex);
331 ret = pp_funcs->switch_power_profile(
332 adev->powerplay.pp_handle, type, en);
333 mutex_unlock(&adev->pm.mutex);
339 int amdgpu_dpm_set_xgmi_pstate(struct amdgpu_device *adev,
342 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
345 if (pp_funcs && pp_funcs->set_xgmi_pstate) {
346 mutex_lock(&adev->pm.mutex);
347 ret = pp_funcs->set_xgmi_pstate(adev->powerplay.pp_handle,
349 mutex_unlock(&adev->pm.mutex);
355 int amdgpu_dpm_set_df_cstate(struct amdgpu_device *adev,
359 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
360 void *pp_handle = adev->powerplay.pp_handle;
362 if (pp_funcs && pp_funcs->set_df_cstate) {
363 mutex_lock(&adev->pm.mutex);
364 ret = pp_funcs->set_df_cstate(pp_handle, cstate);
365 mutex_unlock(&adev->pm.mutex);
371 ssize_t amdgpu_dpm_get_pm_policy_info(struct amdgpu_device *adev,
372 enum pp_pm_policy p_type, char *buf)
374 struct smu_context *smu = adev->powerplay.pp_handle;
375 int ret = -EOPNOTSUPP;
377 if (is_support_sw_smu(adev)) {
378 mutex_lock(&adev->pm.mutex);
379 ret = smu_get_pm_policy_info(smu, p_type, buf);
380 mutex_unlock(&adev->pm.mutex);
386 int amdgpu_dpm_set_pm_policy(struct amdgpu_device *adev, int policy_type,
389 struct smu_context *smu = adev->powerplay.pp_handle;
390 int ret = -EOPNOTSUPP;
392 if (is_support_sw_smu(adev)) {
393 mutex_lock(&adev->pm.mutex);
394 ret = smu_set_pm_policy(smu, policy_type, policy_level);
395 mutex_unlock(&adev->pm.mutex);
401 int amdgpu_dpm_enable_mgpu_fan_boost(struct amdgpu_device *adev)
403 void *pp_handle = adev->powerplay.pp_handle;
404 const struct amd_pm_funcs *pp_funcs =
405 adev->powerplay.pp_funcs;
408 if (pp_funcs && pp_funcs->enable_mgpu_fan_boost) {
409 mutex_lock(&adev->pm.mutex);
410 ret = pp_funcs->enable_mgpu_fan_boost(pp_handle);
411 mutex_unlock(&adev->pm.mutex);
417 int amdgpu_dpm_set_clockgating_by_smu(struct amdgpu_device *adev,
420 void *pp_handle = adev->powerplay.pp_handle;
421 const struct amd_pm_funcs *pp_funcs =
422 adev->powerplay.pp_funcs;
425 if (pp_funcs && pp_funcs->set_clockgating_by_smu) {
426 mutex_lock(&adev->pm.mutex);
427 ret = pp_funcs->set_clockgating_by_smu(pp_handle,
429 mutex_unlock(&adev->pm.mutex);
435 int amdgpu_dpm_smu_i2c_bus_access(struct amdgpu_device *adev,
438 void *pp_handle = adev->powerplay.pp_handle;
439 const struct amd_pm_funcs *pp_funcs =
440 adev->powerplay.pp_funcs;
441 int ret = -EOPNOTSUPP;
443 if (pp_funcs && pp_funcs->smu_i2c_bus_access) {
444 mutex_lock(&adev->pm.mutex);
445 ret = pp_funcs->smu_i2c_bus_access(pp_handle,
447 mutex_unlock(&adev->pm.mutex);
453 void amdgpu_pm_acpi_event_handler(struct amdgpu_device *adev)
455 if (adev->pm.dpm_enabled) {
456 mutex_lock(&adev->pm.mutex);
457 if (power_supply_is_system_supplied() > 0)
458 adev->pm.ac_power = true;
460 adev->pm.ac_power = false;
462 if (adev->powerplay.pp_funcs &&
463 adev->powerplay.pp_funcs->enable_bapm)
464 amdgpu_dpm_enable_bapm(adev, adev->pm.ac_power);
466 if (is_support_sw_smu(adev))
467 smu_set_ac_dc(adev->powerplay.pp_handle);
469 mutex_unlock(&adev->pm.mutex);
473 int amdgpu_dpm_read_sensor(struct amdgpu_device *adev, enum amd_pp_sensors sensor,
474 void *data, uint32_t *size)
476 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
482 if (pp_funcs && pp_funcs->read_sensor) {
483 mutex_lock(&adev->pm.mutex);
484 ret = pp_funcs->read_sensor(adev->powerplay.pp_handle,
488 mutex_unlock(&adev->pm.mutex);
494 int amdgpu_dpm_get_apu_thermal_limit(struct amdgpu_device *adev, uint32_t *limit)
496 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
497 int ret = -EOPNOTSUPP;
499 if (pp_funcs && pp_funcs->get_apu_thermal_limit) {
500 mutex_lock(&adev->pm.mutex);
501 ret = pp_funcs->get_apu_thermal_limit(adev->powerplay.pp_handle, limit);
502 mutex_unlock(&adev->pm.mutex);
508 int amdgpu_dpm_set_apu_thermal_limit(struct amdgpu_device *adev, uint32_t limit)
510 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
511 int ret = -EOPNOTSUPP;
513 if (pp_funcs && pp_funcs->set_apu_thermal_limit) {
514 mutex_lock(&adev->pm.mutex);
515 ret = pp_funcs->set_apu_thermal_limit(adev->powerplay.pp_handle, limit);
516 mutex_unlock(&adev->pm.mutex);
522 void amdgpu_dpm_compute_clocks(struct amdgpu_device *adev)
524 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
527 if (!adev->pm.dpm_enabled)
530 if (!pp_funcs->pm_compute_clocks)
533 if (adev->mode_info.num_crtc)
534 amdgpu_display_bandwidth_update(adev);
536 for (i = 0; i < AMDGPU_MAX_RINGS; i++) {
537 struct amdgpu_ring *ring = adev->rings[i];
538 if (ring && ring->sched.ready)
539 amdgpu_fence_wait_empty(ring);
542 mutex_lock(&adev->pm.mutex);
543 pp_funcs->pm_compute_clocks(adev->powerplay.pp_handle);
544 mutex_unlock(&adev->pm.mutex);
547 void amdgpu_dpm_enable_uvd(struct amdgpu_device *adev, bool enable)
551 if (adev->family == AMDGPU_FAMILY_SI) {
552 mutex_lock(&adev->pm.mutex);
554 adev->pm.dpm.uvd_active = true;
555 adev->pm.dpm.state = POWER_STATE_TYPE_INTERNAL_UVD;
557 adev->pm.dpm.uvd_active = false;
559 mutex_unlock(&adev->pm.mutex);
561 amdgpu_dpm_compute_clocks(adev);
565 ret = amdgpu_dpm_set_powergating_by_smu(adev, AMD_IP_BLOCK_TYPE_UVD, !enable);
567 DRM_ERROR("Dpm %s uvd failed, ret = %d. \n",
568 enable ? "enable" : "disable", ret);
571 void amdgpu_dpm_enable_vce(struct amdgpu_device *adev, bool enable)
575 if (adev->family == AMDGPU_FAMILY_SI) {
576 mutex_lock(&adev->pm.mutex);
578 adev->pm.dpm.vce_active = true;
579 /* XXX select vce level based on ring/task */
580 adev->pm.dpm.vce_level = AMD_VCE_LEVEL_AC_ALL;
582 adev->pm.dpm.vce_active = false;
584 mutex_unlock(&adev->pm.mutex);
586 amdgpu_dpm_compute_clocks(adev);
590 ret = amdgpu_dpm_set_powergating_by_smu(adev, AMD_IP_BLOCK_TYPE_VCE, !enable);
592 DRM_ERROR("Dpm %s vce failed, ret = %d. \n",
593 enable ? "enable" : "disable", ret);
596 void amdgpu_dpm_enable_jpeg(struct amdgpu_device *adev, bool enable)
600 ret = amdgpu_dpm_set_powergating_by_smu(adev, AMD_IP_BLOCK_TYPE_JPEG, !enable);
602 DRM_ERROR("Dpm %s jpeg failed, ret = %d. \n",
603 enable ? "enable" : "disable", ret);
606 void amdgpu_dpm_enable_vpe(struct amdgpu_device *adev, bool enable)
610 ret = amdgpu_dpm_set_powergating_by_smu(adev, AMD_IP_BLOCK_TYPE_VPE, !enable);
612 DRM_ERROR("Dpm %s vpe failed, ret = %d.\n",
613 enable ? "enable" : "disable", ret);
616 int amdgpu_pm_load_smu_firmware(struct amdgpu_device *adev, uint32_t *smu_version)
618 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
621 if (!pp_funcs || !pp_funcs->load_firmware || adev->flags & AMD_IS_APU)
624 mutex_lock(&adev->pm.mutex);
625 r = pp_funcs->load_firmware(adev->powerplay.pp_handle);
627 pr_err("smu firmware loading failed\n");
632 *smu_version = adev->pm.fw_version;
635 mutex_unlock(&adev->pm.mutex);
639 int amdgpu_dpm_handle_passthrough_sbr(struct amdgpu_device *adev, bool enable)
643 if (is_support_sw_smu(adev)) {
644 mutex_lock(&adev->pm.mutex);
645 ret = smu_handle_passthrough_sbr(adev->powerplay.pp_handle,
647 mutex_unlock(&adev->pm.mutex);
653 int amdgpu_dpm_send_hbm_bad_pages_num(struct amdgpu_device *adev, uint32_t size)
655 struct smu_context *smu = adev->powerplay.pp_handle;
658 if (!is_support_sw_smu(adev))
661 mutex_lock(&adev->pm.mutex);
662 ret = smu_send_hbm_bad_pages_num(smu, size);
663 mutex_unlock(&adev->pm.mutex);
668 int amdgpu_dpm_send_hbm_bad_channel_flag(struct amdgpu_device *adev, uint32_t size)
670 struct smu_context *smu = adev->powerplay.pp_handle;
673 if (!is_support_sw_smu(adev))
676 mutex_lock(&adev->pm.mutex);
677 ret = smu_send_hbm_bad_channel_flag(smu, size);
678 mutex_unlock(&adev->pm.mutex);
683 int amdgpu_dpm_send_rma_reason(struct amdgpu_device *adev)
685 struct smu_context *smu = adev->powerplay.pp_handle;
688 if (!is_support_sw_smu(adev))
691 mutex_lock(&adev->pm.mutex);
692 ret = smu_send_rma_reason(smu);
693 mutex_unlock(&adev->pm.mutex);
698 int amdgpu_dpm_get_dpm_freq_range(struct amdgpu_device *adev,
699 enum pp_clock_type type,
708 if (!is_support_sw_smu(adev))
711 mutex_lock(&adev->pm.mutex);
712 ret = smu_get_dpm_freq_range(adev->powerplay.pp_handle,
716 mutex_unlock(&adev->pm.mutex);
721 int amdgpu_dpm_set_soft_freq_range(struct amdgpu_device *adev,
722 enum pp_clock_type type,
726 struct smu_context *smu = adev->powerplay.pp_handle;
732 if (!is_support_sw_smu(adev))
735 mutex_lock(&adev->pm.mutex);
736 ret = smu_set_soft_freq_range(smu,
740 mutex_unlock(&adev->pm.mutex);
745 int amdgpu_dpm_write_watermarks_table(struct amdgpu_device *adev)
747 struct smu_context *smu = adev->powerplay.pp_handle;
750 if (!is_support_sw_smu(adev))
753 mutex_lock(&adev->pm.mutex);
754 ret = smu_write_watermarks_table(smu);
755 mutex_unlock(&adev->pm.mutex);
760 int amdgpu_dpm_wait_for_event(struct amdgpu_device *adev,
761 enum smu_event_type event,
764 struct smu_context *smu = adev->powerplay.pp_handle;
767 if (!is_support_sw_smu(adev))
770 mutex_lock(&adev->pm.mutex);
771 ret = smu_wait_for_event(smu, event, event_arg);
772 mutex_unlock(&adev->pm.mutex);
777 int amdgpu_dpm_set_residency_gfxoff(struct amdgpu_device *adev, bool value)
779 struct smu_context *smu = adev->powerplay.pp_handle;
782 if (!is_support_sw_smu(adev))
785 mutex_lock(&adev->pm.mutex);
786 ret = smu_set_residency_gfxoff(smu, value);
787 mutex_unlock(&adev->pm.mutex);
792 int amdgpu_dpm_get_residency_gfxoff(struct amdgpu_device *adev, u32 *value)
794 struct smu_context *smu = adev->powerplay.pp_handle;
797 if (!is_support_sw_smu(adev))
800 mutex_lock(&adev->pm.mutex);
801 ret = smu_get_residency_gfxoff(smu, value);
802 mutex_unlock(&adev->pm.mutex);
807 int amdgpu_dpm_get_entrycount_gfxoff(struct amdgpu_device *adev, u64 *value)
809 struct smu_context *smu = adev->powerplay.pp_handle;
812 if (!is_support_sw_smu(adev))
815 mutex_lock(&adev->pm.mutex);
816 ret = smu_get_entrycount_gfxoff(smu, value);
817 mutex_unlock(&adev->pm.mutex);
822 int amdgpu_dpm_get_status_gfxoff(struct amdgpu_device *adev, uint32_t *value)
824 struct smu_context *smu = adev->powerplay.pp_handle;
827 if (!is_support_sw_smu(adev))
830 mutex_lock(&adev->pm.mutex);
831 ret = smu_get_status_gfxoff(smu, value);
832 mutex_unlock(&adev->pm.mutex);
837 uint64_t amdgpu_dpm_get_thermal_throttling_counter(struct amdgpu_device *adev)
839 struct smu_context *smu = adev->powerplay.pp_handle;
841 if (!is_support_sw_smu(adev))
844 return atomic64_read(&smu->throttle_int_counter);
847 /* amdgpu_dpm_gfx_state_change - Handle gfx power state change set
848 * @adev: amdgpu_device pointer
849 * @state: gfx power state(1 -sGpuChangeState_D0Entry and 2 -sGpuChangeState_D3Entry)
852 void amdgpu_dpm_gfx_state_change(struct amdgpu_device *adev,
853 enum gfx_change_state state)
855 mutex_lock(&adev->pm.mutex);
856 if (adev->powerplay.pp_funcs &&
857 adev->powerplay.pp_funcs->gfx_state_change_set)
858 ((adev)->powerplay.pp_funcs->gfx_state_change_set(
859 (adev)->powerplay.pp_handle, state));
860 mutex_unlock(&adev->pm.mutex);
863 int amdgpu_dpm_get_ecc_info(struct amdgpu_device *adev,
866 struct smu_context *smu = adev->powerplay.pp_handle;
869 if (!is_support_sw_smu(adev))
872 mutex_lock(&adev->pm.mutex);
873 ret = smu_get_ecc_info(smu, umc_ecc);
874 mutex_unlock(&adev->pm.mutex);
879 struct amd_vce_state *amdgpu_dpm_get_vce_clock_state(struct amdgpu_device *adev,
882 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
883 struct amd_vce_state *vstate = NULL;
885 if (!pp_funcs->get_vce_clock_state)
888 mutex_lock(&adev->pm.mutex);
889 vstate = pp_funcs->get_vce_clock_state(adev->powerplay.pp_handle,
891 mutex_unlock(&adev->pm.mutex);
896 void amdgpu_dpm_get_current_power_state(struct amdgpu_device *adev,
897 enum amd_pm_state_type *state)
899 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
901 mutex_lock(&adev->pm.mutex);
903 if (!pp_funcs->get_current_power_state) {
904 *state = adev->pm.dpm.user_state;
908 *state = pp_funcs->get_current_power_state(adev->powerplay.pp_handle);
909 if (*state < POWER_STATE_TYPE_DEFAULT ||
910 *state > POWER_STATE_TYPE_INTERNAL_3DPERF)
911 *state = adev->pm.dpm.user_state;
914 mutex_unlock(&adev->pm.mutex);
917 void amdgpu_dpm_set_power_state(struct amdgpu_device *adev,
918 enum amd_pm_state_type state)
920 mutex_lock(&adev->pm.mutex);
921 adev->pm.dpm.user_state = state;
922 mutex_unlock(&adev->pm.mutex);
924 if (is_support_sw_smu(adev))
927 if (amdgpu_dpm_dispatch_task(adev,
928 AMD_PP_TASK_ENABLE_USER_STATE,
929 &state) == -EOPNOTSUPP)
930 amdgpu_dpm_compute_clocks(adev);
933 enum amd_dpm_forced_level amdgpu_dpm_get_performance_level(struct amdgpu_device *adev)
935 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
936 enum amd_dpm_forced_level level;
939 return AMD_DPM_FORCED_LEVEL_AUTO;
941 mutex_lock(&adev->pm.mutex);
942 if (pp_funcs->get_performance_level)
943 level = pp_funcs->get_performance_level(adev->powerplay.pp_handle);
945 level = adev->pm.dpm.forced_level;
946 mutex_unlock(&adev->pm.mutex);
951 int amdgpu_dpm_force_performance_level(struct amdgpu_device *adev,
952 enum amd_dpm_forced_level level)
954 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
955 enum amd_dpm_forced_level current_level;
956 uint32_t profile_mode_mask = AMD_DPM_FORCED_LEVEL_PROFILE_STANDARD |
957 AMD_DPM_FORCED_LEVEL_PROFILE_MIN_SCLK |
958 AMD_DPM_FORCED_LEVEL_PROFILE_MIN_MCLK |
959 AMD_DPM_FORCED_LEVEL_PROFILE_PEAK;
961 if (!pp_funcs || !pp_funcs->force_performance_level)
964 if (adev->pm.dpm.thermal_active)
967 current_level = amdgpu_dpm_get_performance_level(adev);
968 if (current_level == level)
971 if (adev->asic_type == CHIP_RAVEN) {
972 if (!(adev->apu_flags & AMD_APU_IS_RAVEN2)) {
973 if (current_level != AMD_DPM_FORCED_LEVEL_MANUAL &&
974 level == AMD_DPM_FORCED_LEVEL_MANUAL)
975 amdgpu_gfx_off_ctrl(adev, false);
976 else if (current_level == AMD_DPM_FORCED_LEVEL_MANUAL &&
977 level != AMD_DPM_FORCED_LEVEL_MANUAL)
978 amdgpu_gfx_off_ctrl(adev, true);
982 if (!(current_level & profile_mode_mask) &&
983 (level == AMD_DPM_FORCED_LEVEL_PROFILE_EXIT))
986 if (!(current_level & profile_mode_mask) &&
987 (level & profile_mode_mask)) {
988 /* enter UMD Pstate */
989 amdgpu_device_ip_set_powergating_state(adev,
990 AMD_IP_BLOCK_TYPE_GFX,
991 AMD_PG_STATE_UNGATE);
992 amdgpu_device_ip_set_clockgating_state(adev,
993 AMD_IP_BLOCK_TYPE_GFX,
994 AMD_CG_STATE_UNGATE);
995 } else if ((current_level & profile_mode_mask) &&
996 !(level & profile_mode_mask)) {
997 /* exit UMD Pstate */
998 amdgpu_device_ip_set_clockgating_state(adev,
999 AMD_IP_BLOCK_TYPE_GFX,
1001 amdgpu_device_ip_set_powergating_state(adev,
1002 AMD_IP_BLOCK_TYPE_GFX,
1006 mutex_lock(&adev->pm.mutex);
1008 if (pp_funcs->force_performance_level(adev->powerplay.pp_handle,
1010 mutex_unlock(&adev->pm.mutex);
1014 adev->pm.dpm.forced_level = level;
1016 mutex_unlock(&adev->pm.mutex);
1021 int amdgpu_dpm_get_pp_num_states(struct amdgpu_device *adev,
1022 struct pp_states_info *states)
1024 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1027 if (!pp_funcs->get_pp_num_states)
1030 mutex_lock(&adev->pm.mutex);
1031 ret = pp_funcs->get_pp_num_states(adev->powerplay.pp_handle,
1033 mutex_unlock(&adev->pm.mutex);
1038 int amdgpu_dpm_dispatch_task(struct amdgpu_device *adev,
1039 enum amd_pp_task task_id,
1040 enum amd_pm_state_type *user_state)
1042 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1045 if (!pp_funcs->dispatch_tasks)
1048 mutex_lock(&adev->pm.mutex);
1049 ret = pp_funcs->dispatch_tasks(adev->powerplay.pp_handle,
1052 mutex_unlock(&adev->pm.mutex);
1057 int amdgpu_dpm_get_pp_table(struct amdgpu_device *adev, char **table)
1059 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1062 if (!pp_funcs->get_pp_table)
1065 mutex_lock(&adev->pm.mutex);
1066 ret = pp_funcs->get_pp_table(adev->powerplay.pp_handle,
1068 mutex_unlock(&adev->pm.mutex);
1073 int amdgpu_dpm_set_fine_grain_clk_vol(struct amdgpu_device *adev,
1078 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1081 if (!pp_funcs->set_fine_grain_clk_vol)
1084 mutex_lock(&adev->pm.mutex);
1085 ret = pp_funcs->set_fine_grain_clk_vol(adev->powerplay.pp_handle,
1089 mutex_unlock(&adev->pm.mutex);
1094 int amdgpu_dpm_odn_edit_dpm_table(struct amdgpu_device *adev,
1099 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1102 if (!pp_funcs->odn_edit_dpm_table)
1105 mutex_lock(&adev->pm.mutex);
1106 ret = pp_funcs->odn_edit_dpm_table(adev->powerplay.pp_handle,
1110 mutex_unlock(&adev->pm.mutex);
1115 int amdgpu_dpm_print_clock_levels(struct amdgpu_device *adev,
1116 enum pp_clock_type type,
1119 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1122 if (!pp_funcs->print_clock_levels)
1125 mutex_lock(&adev->pm.mutex);
1126 ret = pp_funcs->print_clock_levels(adev->powerplay.pp_handle,
1129 mutex_unlock(&adev->pm.mutex);
1134 int amdgpu_dpm_emit_clock_levels(struct amdgpu_device *adev,
1135 enum pp_clock_type type,
1139 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1142 if (!pp_funcs->emit_clock_levels)
1145 mutex_lock(&adev->pm.mutex);
1146 ret = pp_funcs->emit_clock_levels(adev->powerplay.pp_handle,
1150 mutex_unlock(&adev->pm.mutex);
1155 int amdgpu_dpm_set_ppfeature_status(struct amdgpu_device *adev,
1156 uint64_t ppfeature_masks)
1158 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1161 if (!pp_funcs->set_ppfeature_status)
1164 mutex_lock(&adev->pm.mutex);
1165 ret = pp_funcs->set_ppfeature_status(adev->powerplay.pp_handle,
1167 mutex_unlock(&adev->pm.mutex);
1172 int amdgpu_dpm_get_ppfeature_status(struct amdgpu_device *adev, char *buf)
1174 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1177 if (!pp_funcs->get_ppfeature_status)
1180 mutex_lock(&adev->pm.mutex);
1181 ret = pp_funcs->get_ppfeature_status(adev->powerplay.pp_handle,
1183 mutex_unlock(&adev->pm.mutex);
1188 int amdgpu_dpm_force_clock_level(struct amdgpu_device *adev,
1189 enum pp_clock_type type,
1192 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1195 if (!pp_funcs->force_clock_level)
1198 mutex_lock(&adev->pm.mutex);
1199 ret = pp_funcs->force_clock_level(adev->powerplay.pp_handle,
1202 mutex_unlock(&adev->pm.mutex);
1207 int amdgpu_dpm_get_sclk_od(struct amdgpu_device *adev)
1209 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1212 if (!pp_funcs->get_sclk_od)
1215 mutex_lock(&adev->pm.mutex);
1216 ret = pp_funcs->get_sclk_od(adev->powerplay.pp_handle);
1217 mutex_unlock(&adev->pm.mutex);
1222 int amdgpu_dpm_set_sclk_od(struct amdgpu_device *adev, uint32_t value)
1224 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1226 if (is_support_sw_smu(adev))
1229 mutex_lock(&adev->pm.mutex);
1230 if (pp_funcs->set_sclk_od)
1231 pp_funcs->set_sclk_od(adev->powerplay.pp_handle, value);
1232 mutex_unlock(&adev->pm.mutex);
1234 if (amdgpu_dpm_dispatch_task(adev,
1235 AMD_PP_TASK_READJUST_POWER_STATE,
1236 NULL) == -EOPNOTSUPP) {
1237 adev->pm.dpm.current_ps = adev->pm.dpm.boot_ps;
1238 amdgpu_dpm_compute_clocks(adev);
1244 int amdgpu_dpm_get_mclk_od(struct amdgpu_device *adev)
1246 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1249 if (!pp_funcs->get_mclk_od)
1252 mutex_lock(&adev->pm.mutex);
1253 ret = pp_funcs->get_mclk_od(adev->powerplay.pp_handle);
1254 mutex_unlock(&adev->pm.mutex);
1259 int amdgpu_dpm_set_mclk_od(struct amdgpu_device *adev, uint32_t value)
1261 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1263 if (is_support_sw_smu(adev))
1266 mutex_lock(&adev->pm.mutex);
1267 if (pp_funcs->set_mclk_od)
1268 pp_funcs->set_mclk_od(adev->powerplay.pp_handle, value);
1269 mutex_unlock(&adev->pm.mutex);
1271 if (amdgpu_dpm_dispatch_task(adev,
1272 AMD_PP_TASK_READJUST_POWER_STATE,
1273 NULL) == -EOPNOTSUPP) {
1274 adev->pm.dpm.current_ps = adev->pm.dpm.boot_ps;
1275 amdgpu_dpm_compute_clocks(adev);
1281 int amdgpu_dpm_get_power_profile_mode(struct amdgpu_device *adev,
1284 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1287 if (!pp_funcs->get_power_profile_mode)
1290 mutex_lock(&adev->pm.mutex);
1291 ret = pp_funcs->get_power_profile_mode(adev->powerplay.pp_handle,
1293 mutex_unlock(&adev->pm.mutex);
1298 int amdgpu_dpm_set_power_profile_mode(struct amdgpu_device *adev,
1299 long *input, uint32_t size)
1301 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1304 if (!pp_funcs->set_power_profile_mode)
1307 mutex_lock(&adev->pm.mutex);
1308 ret = pp_funcs->set_power_profile_mode(adev->powerplay.pp_handle,
1311 mutex_unlock(&adev->pm.mutex);
1316 int amdgpu_dpm_get_gpu_metrics(struct amdgpu_device *adev, void **table)
1318 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1321 if (!pp_funcs->get_gpu_metrics)
1324 mutex_lock(&adev->pm.mutex);
1325 ret = pp_funcs->get_gpu_metrics(adev->powerplay.pp_handle,
1327 mutex_unlock(&adev->pm.mutex);
1332 ssize_t amdgpu_dpm_get_pm_metrics(struct amdgpu_device *adev, void *pm_metrics,
1335 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1338 if (!pp_funcs->get_pm_metrics)
1341 mutex_lock(&adev->pm.mutex);
1342 ret = pp_funcs->get_pm_metrics(adev->powerplay.pp_handle, pm_metrics,
1344 mutex_unlock(&adev->pm.mutex);
1349 int amdgpu_dpm_get_fan_control_mode(struct amdgpu_device *adev,
1352 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1355 if (!pp_funcs->get_fan_control_mode)
1358 mutex_lock(&adev->pm.mutex);
1359 ret = pp_funcs->get_fan_control_mode(adev->powerplay.pp_handle,
1361 mutex_unlock(&adev->pm.mutex);
1366 int amdgpu_dpm_set_fan_speed_pwm(struct amdgpu_device *adev,
1369 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1372 if (!pp_funcs->set_fan_speed_pwm)
1375 mutex_lock(&adev->pm.mutex);
1376 ret = pp_funcs->set_fan_speed_pwm(adev->powerplay.pp_handle,
1378 mutex_unlock(&adev->pm.mutex);
1383 int amdgpu_dpm_get_fan_speed_pwm(struct amdgpu_device *adev,
1386 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1389 if (!pp_funcs->get_fan_speed_pwm)
1392 mutex_lock(&adev->pm.mutex);
1393 ret = pp_funcs->get_fan_speed_pwm(adev->powerplay.pp_handle,
1395 mutex_unlock(&adev->pm.mutex);
1400 int amdgpu_dpm_get_fan_speed_rpm(struct amdgpu_device *adev,
1403 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1406 if (!pp_funcs->get_fan_speed_rpm)
1409 mutex_lock(&adev->pm.mutex);
1410 ret = pp_funcs->get_fan_speed_rpm(adev->powerplay.pp_handle,
1412 mutex_unlock(&adev->pm.mutex);
1417 int amdgpu_dpm_set_fan_speed_rpm(struct amdgpu_device *adev,
1420 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1423 if (!pp_funcs->set_fan_speed_rpm)
1426 mutex_lock(&adev->pm.mutex);
1427 ret = pp_funcs->set_fan_speed_rpm(adev->powerplay.pp_handle,
1429 mutex_unlock(&adev->pm.mutex);
1434 int amdgpu_dpm_set_fan_control_mode(struct amdgpu_device *adev,
1437 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1440 if (!pp_funcs->set_fan_control_mode)
1443 mutex_lock(&adev->pm.mutex);
1444 ret = pp_funcs->set_fan_control_mode(adev->powerplay.pp_handle,
1446 mutex_unlock(&adev->pm.mutex);
1451 int amdgpu_dpm_get_power_limit(struct amdgpu_device *adev,
1453 enum pp_power_limit_level pp_limit_level,
1454 enum pp_power_type power_type)
1456 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1459 if (!pp_funcs->get_power_limit)
1462 mutex_lock(&adev->pm.mutex);
1463 ret = pp_funcs->get_power_limit(adev->powerplay.pp_handle,
1467 mutex_unlock(&adev->pm.mutex);
1472 int amdgpu_dpm_set_power_limit(struct amdgpu_device *adev,
1475 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1478 if (!pp_funcs->set_power_limit)
1481 mutex_lock(&adev->pm.mutex);
1482 ret = pp_funcs->set_power_limit(adev->powerplay.pp_handle,
1484 mutex_unlock(&adev->pm.mutex);
1489 int amdgpu_dpm_is_cclk_dpm_supported(struct amdgpu_device *adev)
1491 bool cclk_dpm_supported = false;
1493 if (!is_support_sw_smu(adev))
1496 mutex_lock(&adev->pm.mutex);
1497 cclk_dpm_supported = is_support_cclk_dpm(adev);
1498 mutex_unlock(&adev->pm.mutex);
1500 return (int)cclk_dpm_supported;
1503 int amdgpu_dpm_debugfs_print_current_performance_level(struct amdgpu_device *adev,
1506 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1508 if (!pp_funcs->debugfs_print_current_performance_level)
1511 mutex_lock(&adev->pm.mutex);
1512 pp_funcs->debugfs_print_current_performance_level(adev->powerplay.pp_handle,
1514 mutex_unlock(&adev->pm.mutex);
1519 int amdgpu_dpm_get_smu_prv_buf_details(struct amdgpu_device *adev,
1523 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1526 if (!pp_funcs->get_smu_prv_buf_details)
1529 mutex_lock(&adev->pm.mutex);
1530 ret = pp_funcs->get_smu_prv_buf_details(adev->powerplay.pp_handle,
1533 mutex_unlock(&adev->pm.mutex);
1538 int amdgpu_dpm_is_overdrive_supported(struct amdgpu_device *adev)
1540 if (is_support_sw_smu(adev)) {
1541 struct smu_context *smu = adev->powerplay.pp_handle;
1543 return (smu->od_enabled || smu->is_apu);
1545 struct pp_hwmgr *hwmgr;
1548 * dpm on some legacy asics don't carry od_enabled member
1549 * as its pp_handle is casted directly from adev.
1551 if (amdgpu_dpm_is_legacy_dpm(adev))
1554 hwmgr = (struct pp_hwmgr *)adev->powerplay.pp_handle;
1556 return hwmgr->od_enabled;
1560 int amdgpu_dpm_set_pp_table(struct amdgpu_device *adev,
1564 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1567 if (!pp_funcs->set_pp_table)
1570 mutex_lock(&adev->pm.mutex);
1571 ret = pp_funcs->set_pp_table(adev->powerplay.pp_handle,
1574 mutex_unlock(&adev->pm.mutex);
1579 int amdgpu_dpm_get_num_cpu_cores(struct amdgpu_device *adev)
1581 struct smu_context *smu = adev->powerplay.pp_handle;
1583 if (!is_support_sw_smu(adev))
1586 return smu->cpu_core_num;
1589 void amdgpu_dpm_stb_debug_fs_init(struct amdgpu_device *adev)
1591 if (!is_support_sw_smu(adev))
1594 amdgpu_smu_stb_debug_fs_init(adev);
1597 int amdgpu_dpm_display_configuration_change(struct amdgpu_device *adev,
1598 const struct amd_pp_display_configuration *input)
1600 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1603 if (!pp_funcs->display_configuration_change)
1606 mutex_lock(&adev->pm.mutex);
1607 ret = pp_funcs->display_configuration_change(adev->powerplay.pp_handle,
1609 mutex_unlock(&adev->pm.mutex);
1614 int amdgpu_dpm_get_clock_by_type(struct amdgpu_device *adev,
1615 enum amd_pp_clock_type type,
1616 struct amd_pp_clocks *clocks)
1618 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1621 if (!pp_funcs->get_clock_by_type)
1624 mutex_lock(&adev->pm.mutex);
1625 ret = pp_funcs->get_clock_by_type(adev->powerplay.pp_handle,
1628 mutex_unlock(&adev->pm.mutex);
1633 int amdgpu_dpm_get_display_mode_validation_clks(struct amdgpu_device *adev,
1634 struct amd_pp_simple_clock_info *clocks)
1636 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1639 if (!pp_funcs->get_display_mode_validation_clocks)
1642 mutex_lock(&adev->pm.mutex);
1643 ret = pp_funcs->get_display_mode_validation_clocks(adev->powerplay.pp_handle,
1645 mutex_unlock(&adev->pm.mutex);
1650 int amdgpu_dpm_get_clock_by_type_with_latency(struct amdgpu_device *adev,
1651 enum amd_pp_clock_type type,
1652 struct pp_clock_levels_with_latency *clocks)
1654 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1657 if (!pp_funcs->get_clock_by_type_with_latency)
1660 mutex_lock(&adev->pm.mutex);
1661 ret = pp_funcs->get_clock_by_type_with_latency(adev->powerplay.pp_handle,
1664 mutex_unlock(&adev->pm.mutex);
1669 int amdgpu_dpm_get_clock_by_type_with_voltage(struct amdgpu_device *adev,
1670 enum amd_pp_clock_type type,
1671 struct pp_clock_levels_with_voltage *clocks)
1673 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1676 if (!pp_funcs->get_clock_by_type_with_voltage)
1679 mutex_lock(&adev->pm.mutex);
1680 ret = pp_funcs->get_clock_by_type_with_voltage(adev->powerplay.pp_handle,
1683 mutex_unlock(&adev->pm.mutex);
1688 int amdgpu_dpm_set_watermarks_for_clocks_ranges(struct amdgpu_device *adev,
1691 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1694 if (!pp_funcs->set_watermarks_for_clocks_ranges)
1697 mutex_lock(&adev->pm.mutex);
1698 ret = pp_funcs->set_watermarks_for_clocks_ranges(adev->powerplay.pp_handle,
1700 mutex_unlock(&adev->pm.mutex);
1705 int amdgpu_dpm_display_clock_voltage_request(struct amdgpu_device *adev,
1706 struct pp_display_clock_request *clock)
1708 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1711 if (!pp_funcs->display_clock_voltage_request)
1714 mutex_lock(&adev->pm.mutex);
1715 ret = pp_funcs->display_clock_voltage_request(adev->powerplay.pp_handle,
1717 mutex_unlock(&adev->pm.mutex);
1722 int amdgpu_dpm_get_current_clocks(struct amdgpu_device *adev,
1723 struct amd_pp_clock_info *clocks)
1725 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1728 if (!pp_funcs->get_current_clocks)
1731 mutex_lock(&adev->pm.mutex);
1732 ret = pp_funcs->get_current_clocks(adev->powerplay.pp_handle,
1734 mutex_unlock(&adev->pm.mutex);
1739 void amdgpu_dpm_notify_smu_enable_pwe(struct amdgpu_device *adev)
1741 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1743 if (!pp_funcs->notify_smu_enable_pwe)
1746 mutex_lock(&adev->pm.mutex);
1747 pp_funcs->notify_smu_enable_pwe(adev->powerplay.pp_handle);
1748 mutex_unlock(&adev->pm.mutex);
1751 int amdgpu_dpm_set_active_display_count(struct amdgpu_device *adev,
1754 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1757 if (!pp_funcs->set_active_display_count)
1760 mutex_lock(&adev->pm.mutex);
1761 ret = pp_funcs->set_active_display_count(adev->powerplay.pp_handle,
1763 mutex_unlock(&adev->pm.mutex);
1768 int amdgpu_dpm_set_min_deep_sleep_dcefclk(struct amdgpu_device *adev,
1771 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1774 if (!pp_funcs->set_min_deep_sleep_dcefclk)
1777 mutex_lock(&adev->pm.mutex);
1778 ret = pp_funcs->set_min_deep_sleep_dcefclk(adev->powerplay.pp_handle,
1780 mutex_unlock(&adev->pm.mutex);
1785 void amdgpu_dpm_set_hard_min_dcefclk_by_freq(struct amdgpu_device *adev,
1788 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1790 if (!pp_funcs->set_hard_min_dcefclk_by_freq)
1793 mutex_lock(&adev->pm.mutex);
1794 pp_funcs->set_hard_min_dcefclk_by_freq(adev->powerplay.pp_handle,
1796 mutex_unlock(&adev->pm.mutex);
1799 void amdgpu_dpm_set_hard_min_fclk_by_freq(struct amdgpu_device *adev,
1802 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1804 if (!pp_funcs->set_hard_min_fclk_by_freq)
1807 mutex_lock(&adev->pm.mutex);
1808 pp_funcs->set_hard_min_fclk_by_freq(adev->powerplay.pp_handle,
1810 mutex_unlock(&adev->pm.mutex);
1813 int amdgpu_dpm_display_disable_memory_clock_switch(struct amdgpu_device *adev,
1814 bool disable_memory_clock_switch)
1816 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1819 if (!pp_funcs->display_disable_memory_clock_switch)
1822 mutex_lock(&adev->pm.mutex);
1823 ret = pp_funcs->display_disable_memory_clock_switch(adev->powerplay.pp_handle,
1824 disable_memory_clock_switch);
1825 mutex_unlock(&adev->pm.mutex);
1830 int amdgpu_dpm_get_max_sustainable_clocks_by_dc(struct amdgpu_device *adev,
1831 struct pp_smu_nv_clock_table *max_clocks)
1833 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1836 if (!pp_funcs->get_max_sustainable_clocks_by_dc)
1839 mutex_lock(&adev->pm.mutex);
1840 ret = pp_funcs->get_max_sustainable_clocks_by_dc(adev->powerplay.pp_handle,
1842 mutex_unlock(&adev->pm.mutex);
1847 enum pp_smu_status amdgpu_dpm_get_uclk_dpm_states(struct amdgpu_device *adev,
1848 unsigned int *clock_values_in_khz,
1849 unsigned int *num_states)
1851 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1854 if (!pp_funcs->get_uclk_dpm_states)
1857 mutex_lock(&adev->pm.mutex);
1858 ret = pp_funcs->get_uclk_dpm_states(adev->powerplay.pp_handle,
1859 clock_values_in_khz,
1861 mutex_unlock(&adev->pm.mutex);
1866 int amdgpu_dpm_get_dpm_clock_table(struct amdgpu_device *adev,
1867 struct dpm_clocks *clock_table)
1869 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1872 if (!pp_funcs->get_dpm_clock_table)
1875 mutex_lock(&adev->pm.mutex);
1876 ret = pp_funcs->get_dpm_clock_table(adev->powerplay.pp_handle,
1878 mutex_unlock(&adev->pm.mutex);