2 * Copyright 2011 Advanced Micro Devices, Inc.
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
22 * Authors: Alex Deucher
26 #include "amdgpu_atombios.h"
27 #include "amdgpu_i2c.h"
28 #include "amdgpu_dpm.h"
31 #include "amdgpu_display.h"
33 #include <linux/power_supply.h>
34 #include "amdgpu_smu.h"
36 #define amdgpu_dpm_enable_bapm(adev, e) \
37 ((adev)->powerplay.pp_funcs->enable_bapm((adev)->powerplay.pp_handle, (e)))
39 #define amdgpu_dpm_is_legacy_dpm(adev) ((adev)->powerplay.pp_handle == (adev))
41 int amdgpu_dpm_get_sclk(struct amdgpu_device *adev, bool low)
43 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
46 if (!pp_funcs->get_sclk)
49 mutex_lock(&adev->pm.mutex);
50 ret = pp_funcs->get_sclk((adev)->powerplay.pp_handle,
52 mutex_unlock(&adev->pm.mutex);
57 int amdgpu_dpm_get_mclk(struct amdgpu_device *adev, bool low)
59 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
62 if (!pp_funcs->get_mclk)
65 mutex_lock(&adev->pm.mutex);
66 ret = pp_funcs->get_mclk((adev)->powerplay.pp_handle,
68 mutex_unlock(&adev->pm.mutex);
73 int amdgpu_dpm_set_powergating_by_smu(struct amdgpu_device *adev,
79 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
80 enum ip_power_state pwr_state = gate ? POWER_STATE_OFF : POWER_STATE_ON;
81 bool is_vcn = (block_type == AMD_IP_BLOCK_TYPE_UVD || block_type == AMD_IP_BLOCK_TYPE_VCN);
83 if (atomic_read(&adev->pm.pwr_state[block_type]) == pwr_state &&
84 (!is_vcn || adev->vcn.num_vcn_inst == 1)) {
85 dev_dbg(adev->dev, "IP block%d already in the target %s state!",
86 block_type, gate ? "gate" : "ungate");
90 mutex_lock(&adev->pm.mutex);
93 case AMD_IP_BLOCK_TYPE_UVD:
94 case AMD_IP_BLOCK_TYPE_VCE:
95 case AMD_IP_BLOCK_TYPE_GFX:
96 case AMD_IP_BLOCK_TYPE_SDMA:
97 case AMD_IP_BLOCK_TYPE_JPEG:
98 case AMD_IP_BLOCK_TYPE_GMC:
99 case AMD_IP_BLOCK_TYPE_ACP:
100 case AMD_IP_BLOCK_TYPE_VPE:
101 if (pp_funcs && pp_funcs->set_powergating_by_smu)
102 ret = (pp_funcs->set_powergating_by_smu(
103 (adev)->powerplay.pp_handle, block_type, gate, 0));
105 case AMD_IP_BLOCK_TYPE_VCN:
106 if (pp_funcs && pp_funcs->set_powergating_by_smu)
107 ret = (pp_funcs->set_powergating_by_smu(
108 (adev)->powerplay.pp_handle, block_type, gate, inst));
115 atomic_set(&adev->pm.pwr_state[block_type], pwr_state);
117 mutex_unlock(&adev->pm.mutex);
122 int amdgpu_dpm_set_gfx_power_up_by_imu(struct amdgpu_device *adev)
124 struct smu_context *smu = adev->powerplay.pp_handle;
125 int ret = -EOPNOTSUPP;
127 mutex_lock(&adev->pm.mutex);
128 ret = smu_set_gfx_power_up_by_imu(smu);
129 mutex_unlock(&adev->pm.mutex);
136 int amdgpu_dpm_baco_enter(struct amdgpu_device *adev)
138 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
139 void *pp_handle = adev->powerplay.pp_handle;
142 if (!pp_funcs || !pp_funcs->set_asic_baco_state)
145 mutex_lock(&adev->pm.mutex);
147 /* enter BACO state */
148 ret = pp_funcs->set_asic_baco_state(pp_handle, 1);
150 mutex_unlock(&adev->pm.mutex);
155 int amdgpu_dpm_baco_exit(struct amdgpu_device *adev)
157 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
158 void *pp_handle = adev->powerplay.pp_handle;
161 if (!pp_funcs || !pp_funcs->set_asic_baco_state)
164 mutex_lock(&adev->pm.mutex);
166 /* exit BACO state */
167 ret = pp_funcs->set_asic_baco_state(pp_handle, 0);
169 mutex_unlock(&adev->pm.mutex);
174 int amdgpu_dpm_set_mp1_state(struct amdgpu_device *adev,
175 enum pp_mp1_state mp1_state)
178 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
180 if (mp1_state == PP_MP1_STATE_FLR) {
181 /* VF lost access to SMU */
182 if (amdgpu_sriov_vf(adev))
183 adev->pm.dpm_enabled = false;
184 } else if (pp_funcs && pp_funcs->set_mp1_state) {
185 mutex_lock(&adev->pm.mutex);
187 ret = pp_funcs->set_mp1_state(
188 adev->powerplay.pp_handle,
191 mutex_unlock(&adev->pm.mutex);
197 int amdgpu_dpm_notify_rlc_state(struct amdgpu_device *adev, bool en)
200 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
202 if (pp_funcs && pp_funcs->notify_rlc_state) {
203 mutex_lock(&adev->pm.mutex);
205 ret = pp_funcs->notify_rlc_state(
206 adev->powerplay.pp_handle,
209 mutex_unlock(&adev->pm.mutex);
215 int amdgpu_dpm_is_baco_supported(struct amdgpu_device *adev)
217 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
218 void *pp_handle = adev->powerplay.pp_handle;
221 if (!pp_funcs || !pp_funcs->get_asic_baco_capability)
223 /* Don't use baco for reset in S3.
224 * This is a workaround for some platforms
225 * where entering BACO during suspend
226 * seems to cause reboots or hangs.
227 * This might be related to the fact that BACO controls
228 * power to the whole GPU including devices like audio and USB.
229 * Powering down/up everything may adversely affect these other
230 * devices. Needs more investigation.
235 mutex_lock(&adev->pm.mutex);
237 ret = pp_funcs->get_asic_baco_capability(pp_handle);
239 mutex_unlock(&adev->pm.mutex);
244 int amdgpu_dpm_mode2_reset(struct amdgpu_device *adev)
246 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
247 void *pp_handle = adev->powerplay.pp_handle;
250 if (!pp_funcs || !pp_funcs->asic_reset_mode_2)
253 mutex_lock(&adev->pm.mutex);
255 ret = pp_funcs->asic_reset_mode_2(pp_handle);
257 mutex_unlock(&adev->pm.mutex);
262 int amdgpu_dpm_enable_gfx_features(struct amdgpu_device *adev)
264 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
265 void *pp_handle = adev->powerplay.pp_handle;
268 if (!pp_funcs || !pp_funcs->asic_reset_enable_gfx_features)
271 mutex_lock(&adev->pm.mutex);
273 ret = pp_funcs->asic_reset_enable_gfx_features(pp_handle);
275 mutex_unlock(&adev->pm.mutex);
280 int amdgpu_dpm_baco_reset(struct amdgpu_device *adev)
282 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
283 void *pp_handle = adev->powerplay.pp_handle;
286 if (!pp_funcs || !pp_funcs->set_asic_baco_state)
289 mutex_lock(&adev->pm.mutex);
291 /* enter BACO state */
292 ret = pp_funcs->set_asic_baco_state(pp_handle, 1);
296 /* exit BACO state */
297 ret = pp_funcs->set_asic_baco_state(pp_handle, 0);
300 mutex_unlock(&adev->pm.mutex);
304 bool amdgpu_dpm_is_mode1_reset_supported(struct amdgpu_device *adev)
306 struct smu_context *smu = adev->powerplay.pp_handle;
307 bool support_mode1_reset = false;
309 if (is_support_sw_smu(adev)) {
310 mutex_lock(&adev->pm.mutex);
311 support_mode1_reset = smu_mode1_reset_is_support(smu);
312 mutex_unlock(&adev->pm.mutex);
315 return support_mode1_reset;
318 int amdgpu_dpm_mode1_reset(struct amdgpu_device *adev)
320 struct smu_context *smu = adev->powerplay.pp_handle;
321 int ret = -EOPNOTSUPP;
323 if (is_support_sw_smu(adev)) {
324 mutex_lock(&adev->pm.mutex);
325 ret = smu_mode1_reset(smu);
326 mutex_unlock(&adev->pm.mutex);
332 int amdgpu_dpm_switch_power_profile(struct amdgpu_device *adev,
333 enum PP_SMC_POWER_PROFILE type,
336 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
339 if (amdgpu_sriov_vf(adev))
342 if (pp_funcs && pp_funcs->switch_power_profile) {
343 mutex_lock(&adev->pm.mutex);
344 ret = pp_funcs->switch_power_profile(
345 adev->powerplay.pp_handle, type, en);
346 mutex_unlock(&adev->pm.mutex);
352 int amdgpu_dpm_set_xgmi_pstate(struct amdgpu_device *adev,
355 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
358 if (pp_funcs && pp_funcs->set_xgmi_pstate) {
359 mutex_lock(&adev->pm.mutex);
360 ret = pp_funcs->set_xgmi_pstate(adev->powerplay.pp_handle,
362 mutex_unlock(&adev->pm.mutex);
368 int amdgpu_dpm_set_df_cstate(struct amdgpu_device *adev,
372 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
373 void *pp_handle = adev->powerplay.pp_handle;
375 if (pp_funcs && pp_funcs->set_df_cstate) {
376 mutex_lock(&adev->pm.mutex);
377 ret = pp_funcs->set_df_cstate(pp_handle, cstate);
378 mutex_unlock(&adev->pm.mutex);
384 ssize_t amdgpu_dpm_get_pm_policy_info(struct amdgpu_device *adev,
385 enum pp_pm_policy p_type, char *buf)
387 struct smu_context *smu = adev->powerplay.pp_handle;
388 int ret = -EOPNOTSUPP;
390 if (is_support_sw_smu(adev)) {
391 mutex_lock(&adev->pm.mutex);
392 ret = smu_get_pm_policy_info(smu, p_type, buf);
393 mutex_unlock(&adev->pm.mutex);
399 int amdgpu_dpm_set_pm_policy(struct amdgpu_device *adev, int policy_type,
402 struct smu_context *smu = adev->powerplay.pp_handle;
403 int ret = -EOPNOTSUPP;
405 if (is_support_sw_smu(adev)) {
406 mutex_lock(&adev->pm.mutex);
407 ret = smu_set_pm_policy(smu, policy_type, policy_level);
408 mutex_unlock(&adev->pm.mutex);
414 int amdgpu_dpm_enable_mgpu_fan_boost(struct amdgpu_device *adev)
416 void *pp_handle = adev->powerplay.pp_handle;
417 const struct amd_pm_funcs *pp_funcs =
418 adev->powerplay.pp_funcs;
421 if (pp_funcs && pp_funcs->enable_mgpu_fan_boost) {
422 mutex_lock(&adev->pm.mutex);
423 ret = pp_funcs->enable_mgpu_fan_boost(pp_handle);
424 mutex_unlock(&adev->pm.mutex);
430 int amdgpu_dpm_set_clockgating_by_smu(struct amdgpu_device *adev,
433 void *pp_handle = adev->powerplay.pp_handle;
434 const struct amd_pm_funcs *pp_funcs =
435 adev->powerplay.pp_funcs;
438 if (pp_funcs && pp_funcs->set_clockgating_by_smu) {
439 mutex_lock(&adev->pm.mutex);
440 ret = pp_funcs->set_clockgating_by_smu(pp_handle,
442 mutex_unlock(&adev->pm.mutex);
448 int amdgpu_dpm_smu_i2c_bus_access(struct amdgpu_device *adev,
451 void *pp_handle = adev->powerplay.pp_handle;
452 const struct amd_pm_funcs *pp_funcs =
453 adev->powerplay.pp_funcs;
454 int ret = -EOPNOTSUPP;
456 if (pp_funcs && pp_funcs->smu_i2c_bus_access) {
457 mutex_lock(&adev->pm.mutex);
458 ret = pp_funcs->smu_i2c_bus_access(pp_handle,
460 mutex_unlock(&adev->pm.mutex);
466 void amdgpu_pm_acpi_event_handler(struct amdgpu_device *adev)
468 if (adev->pm.dpm_enabled) {
469 mutex_lock(&adev->pm.mutex);
470 if (power_supply_is_system_supplied() > 0)
471 adev->pm.ac_power = true;
473 adev->pm.ac_power = false;
475 if (adev->powerplay.pp_funcs &&
476 adev->powerplay.pp_funcs->enable_bapm)
477 amdgpu_dpm_enable_bapm(adev, adev->pm.ac_power);
479 if (is_support_sw_smu(adev))
480 smu_set_ac_dc(adev->powerplay.pp_handle);
482 mutex_unlock(&adev->pm.mutex);
486 int amdgpu_dpm_read_sensor(struct amdgpu_device *adev, enum amd_pp_sensors sensor,
487 void *data, uint32_t *size)
489 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
495 if (pp_funcs && pp_funcs->read_sensor) {
496 mutex_lock(&adev->pm.mutex);
497 ret = pp_funcs->read_sensor(adev->powerplay.pp_handle,
501 mutex_unlock(&adev->pm.mutex);
507 int amdgpu_dpm_get_apu_thermal_limit(struct amdgpu_device *adev, uint32_t *limit)
509 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
510 int ret = -EOPNOTSUPP;
512 if (pp_funcs && pp_funcs->get_apu_thermal_limit) {
513 mutex_lock(&adev->pm.mutex);
514 ret = pp_funcs->get_apu_thermal_limit(adev->powerplay.pp_handle, limit);
515 mutex_unlock(&adev->pm.mutex);
521 int amdgpu_dpm_set_apu_thermal_limit(struct amdgpu_device *adev, uint32_t limit)
523 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
524 int ret = -EOPNOTSUPP;
526 if (pp_funcs && pp_funcs->set_apu_thermal_limit) {
527 mutex_lock(&adev->pm.mutex);
528 ret = pp_funcs->set_apu_thermal_limit(adev->powerplay.pp_handle, limit);
529 mutex_unlock(&adev->pm.mutex);
535 void amdgpu_dpm_compute_clocks(struct amdgpu_device *adev)
537 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
540 if (!adev->pm.dpm_enabled)
543 if (!pp_funcs->pm_compute_clocks)
546 if (adev->mode_info.num_crtc)
547 amdgpu_display_bandwidth_update(adev);
549 for (i = 0; i < AMDGPU_MAX_RINGS; i++) {
550 struct amdgpu_ring *ring = adev->rings[i];
551 if (ring && ring->sched.ready)
552 amdgpu_fence_wait_empty(ring);
555 mutex_lock(&adev->pm.mutex);
556 pp_funcs->pm_compute_clocks(adev->powerplay.pp_handle);
557 mutex_unlock(&adev->pm.mutex);
560 void amdgpu_dpm_enable_uvd(struct amdgpu_device *adev, bool enable)
564 if (adev->family == AMDGPU_FAMILY_SI) {
565 mutex_lock(&adev->pm.mutex);
567 adev->pm.dpm.uvd_active = true;
568 adev->pm.dpm.state = POWER_STATE_TYPE_INTERNAL_UVD;
570 adev->pm.dpm.uvd_active = false;
572 mutex_unlock(&adev->pm.mutex);
574 amdgpu_dpm_compute_clocks(adev);
578 ret = amdgpu_dpm_set_powergating_by_smu(adev, AMD_IP_BLOCK_TYPE_UVD, !enable, 0);
580 DRM_ERROR("Dpm %s uvd failed, ret = %d. \n",
581 enable ? "enable" : "disable", ret);
584 void amdgpu_dpm_enable_vcn(struct amdgpu_device *adev, bool enable, int inst)
588 ret = amdgpu_dpm_set_powergating_by_smu(adev, AMD_IP_BLOCK_TYPE_VCN, !enable, inst);
590 DRM_ERROR("Dpm %s uvd failed, ret = %d. \n",
591 enable ? "enable" : "disable", ret);
594 void amdgpu_dpm_enable_vce(struct amdgpu_device *adev, bool enable)
598 if (adev->family == AMDGPU_FAMILY_SI) {
599 mutex_lock(&adev->pm.mutex);
601 adev->pm.dpm.vce_active = true;
602 /* XXX select vce level based on ring/task */
603 adev->pm.dpm.vce_level = AMD_VCE_LEVEL_AC_ALL;
605 adev->pm.dpm.vce_active = false;
607 mutex_unlock(&adev->pm.mutex);
609 amdgpu_dpm_compute_clocks(adev);
613 ret = amdgpu_dpm_set_powergating_by_smu(adev, AMD_IP_BLOCK_TYPE_VCE, !enable, 0);
615 DRM_ERROR("Dpm %s vce failed, ret = %d. \n",
616 enable ? "enable" : "disable", ret);
619 void amdgpu_dpm_enable_jpeg(struct amdgpu_device *adev, bool enable)
623 ret = amdgpu_dpm_set_powergating_by_smu(adev, AMD_IP_BLOCK_TYPE_JPEG, !enable, 0);
625 DRM_ERROR("Dpm %s jpeg failed, ret = %d. \n",
626 enable ? "enable" : "disable", ret);
629 void amdgpu_dpm_enable_vpe(struct amdgpu_device *adev, bool enable)
633 ret = amdgpu_dpm_set_powergating_by_smu(adev, AMD_IP_BLOCK_TYPE_VPE, !enable, 0);
635 DRM_ERROR("Dpm %s vpe failed, ret = %d.\n",
636 enable ? "enable" : "disable", ret);
639 int amdgpu_pm_load_smu_firmware(struct amdgpu_device *adev, uint32_t *smu_version)
641 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
644 if (!pp_funcs || !pp_funcs->load_firmware ||
645 (is_support_sw_smu(adev) && (adev->flags & AMD_IS_APU)))
648 mutex_lock(&adev->pm.mutex);
649 r = pp_funcs->load_firmware(adev->powerplay.pp_handle);
651 pr_err("smu firmware loading failed\n");
656 *smu_version = adev->pm.fw_version;
659 mutex_unlock(&adev->pm.mutex);
663 int amdgpu_dpm_handle_passthrough_sbr(struct amdgpu_device *adev, bool enable)
667 if (is_support_sw_smu(adev)) {
668 mutex_lock(&adev->pm.mutex);
669 ret = smu_handle_passthrough_sbr(adev->powerplay.pp_handle,
671 mutex_unlock(&adev->pm.mutex);
677 int amdgpu_dpm_send_hbm_bad_pages_num(struct amdgpu_device *adev, uint32_t size)
679 struct smu_context *smu = adev->powerplay.pp_handle;
682 if (!is_support_sw_smu(adev))
685 mutex_lock(&adev->pm.mutex);
686 ret = smu_send_hbm_bad_pages_num(smu, size);
687 mutex_unlock(&adev->pm.mutex);
692 int amdgpu_dpm_send_hbm_bad_channel_flag(struct amdgpu_device *adev, uint32_t size)
694 struct smu_context *smu = adev->powerplay.pp_handle;
697 if (!is_support_sw_smu(adev))
700 mutex_lock(&adev->pm.mutex);
701 ret = smu_send_hbm_bad_channel_flag(smu, size);
702 mutex_unlock(&adev->pm.mutex);
707 int amdgpu_dpm_send_rma_reason(struct amdgpu_device *adev)
709 struct smu_context *smu = adev->powerplay.pp_handle;
712 if (!is_support_sw_smu(adev))
715 mutex_lock(&adev->pm.mutex);
716 ret = smu_send_rma_reason(smu);
717 mutex_unlock(&adev->pm.mutex);
722 int amdgpu_dpm_reset_sdma(struct amdgpu_device *adev, uint32_t inst_mask)
724 struct smu_context *smu = adev->powerplay.pp_handle;
727 if (!is_support_sw_smu(adev))
730 mutex_lock(&adev->pm.mutex);
731 ret = smu_reset_sdma(smu, inst_mask);
732 mutex_unlock(&adev->pm.mutex);
737 int amdgpu_dpm_get_dpm_freq_range(struct amdgpu_device *adev,
738 enum pp_clock_type type,
747 if (!is_support_sw_smu(adev))
750 mutex_lock(&adev->pm.mutex);
751 ret = smu_get_dpm_freq_range(adev->powerplay.pp_handle,
755 mutex_unlock(&adev->pm.mutex);
760 int amdgpu_dpm_set_soft_freq_range(struct amdgpu_device *adev,
761 enum pp_clock_type type,
765 struct smu_context *smu = adev->powerplay.pp_handle;
771 if (!is_support_sw_smu(adev))
774 mutex_lock(&adev->pm.mutex);
775 ret = smu_set_soft_freq_range(smu,
779 mutex_unlock(&adev->pm.mutex);
784 int amdgpu_dpm_write_watermarks_table(struct amdgpu_device *adev)
786 struct smu_context *smu = adev->powerplay.pp_handle;
789 if (!is_support_sw_smu(adev))
792 mutex_lock(&adev->pm.mutex);
793 ret = smu_write_watermarks_table(smu);
794 mutex_unlock(&adev->pm.mutex);
799 int amdgpu_dpm_wait_for_event(struct amdgpu_device *adev,
800 enum smu_event_type event,
803 struct smu_context *smu = adev->powerplay.pp_handle;
806 if (!is_support_sw_smu(adev))
809 mutex_lock(&adev->pm.mutex);
810 ret = smu_wait_for_event(smu, event, event_arg);
811 mutex_unlock(&adev->pm.mutex);
816 int amdgpu_dpm_set_residency_gfxoff(struct amdgpu_device *adev, bool value)
818 struct smu_context *smu = adev->powerplay.pp_handle;
821 if (!is_support_sw_smu(adev))
824 mutex_lock(&adev->pm.mutex);
825 ret = smu_set_residency_gfxoff(smu, value);
826 mutex_unlock(&adev->pm.mutex);
831 int amdgpu_dpm_get_residency_gfxoff(struct amdgpu_device *adev, u32 *value)
833 struct smu_context *smu = adev->powerplay.pp_handle;
836 if (!is_support_sw_smu(adev))
839 mutex_lock(&adev->pm.mutex);
840 ret = smu_get_residency_gfxoff(smu, value);
841 mutex_unlock(&adev->pm.mutex);
846 int amdgpu_dpm_get_entrycount_gfxoff(struct amdgpu_device *adev, u64 *value)
848 struct smu_context *smu = adev->powerplay.pp_handle;
851 if (!is_support_sw_smu(adev))
854 mutex_lock(&adev->pm.mutex);
855 ret = smu_get_entrycount_gfxoff(smu, value);
856 mutex_unlock(&adev->pm.mutex);
861 int amdgpu_dpm_get_status_gfxoff(struct amdgpu_device *adev, uint32_t *value)
863 struct smu_context *smu = adev->powerplay.pp_handle;
866 if (!is_support_sw_smu(adev))
869 mutex_lock(&adev->pm.mutex);
870 ret = smu_get_status_gfxoff(smu, value);
871 mutex_unlock(&adev->pm.mutex);
876 uint64_t amdgpu_dpm_get_thermal_throttling_counter(struct amdgpu_device *adev)
878 struct smu_context *smu = adev->powerplay.pp_handle;
880 if (!is_support_sw_smu(adev))
883 return atomic64_read(&smu->throttle_int_counter);
886 /* amdgpu_dpm_gfx_state_change - Handle gfx power state change set
887 * @adev: amdgpu_device pointer
888 * @state: gfx power state(1 -sGpuChangeState_D0Entry and 2 -sGpuChangeState_D3Entry)
891 void amdgpu_dpm_gfx_state_change(struct amdgpu_device *adev,
892 enum gfx_change_state state)
894 mutex_lock(&adev->pm.mutex);
895 if (adev->powerplay.pp_funcs &&
896 adev->powerplay.pp_funcs->gfx_state_change_set)
897 ((adev)->powerplay.pp_funcs->gfx_state_change_set(
898 (adev)->powerplay.pp_handle, state));
899 mutex_unlock(&adev->pm.mutex);
902 int amdgpu_dpm_get_ecc_info(struct amdgpu_device *adev,
905 struct smu_context *smu = adev->powerplay.pp_handle;
908 if (!is_support_sw_smu(adev))
911 mutex_lock(&adev->pm.mutex);
912 ret = smu_get_ecc_info(smu, umc_ecc);
913 mutex_unlock(&adev->pm.mutex);
918 struct amd_vce_state *amdgpu_dpm_get_vce_clock_state(struct amdgpu_device *adev,
921 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
922 struct amd_vce_state *vstate = NULL;
924 if (!pp_funcs->get_vce_clock_state)
927 mutex_lock(&adev->pm.mutex);
928 vstate = pp_funcs->get_vce_clock_state(adev->powerplay.pp_handle,
930 mutex_unlock(&adev->pm.mutex);
935 void amdgpu_dpm_get_current_power_state(struct amdgpu_device *adev,
936 enum amd_pm_state_type *state)
938 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
940 mutex_lock(&adev->pm.mutex);
942 if (!pp_funcs->get_current_power_state) {
943 *state = adev->pm.dpm.user_state;
947 *state = pp_funcs->get_current_power_state(adev->powerplay.pp_handle);
948 if (*state < POWER_STATE_TYPE_DEFAULT ||
949 *state > POWER_STATE_TYPE_INTERNAL_3DPERF)
950 *state = adev->pm.dpm.user_state;
953 mutex_unlock(&adev->pm.mutex);
956 void amdgpu_dpm_set_power_state(struct amdgpu_device *adev,
957 enum amd_pm_state_type state)
959 mutex_lock(&adev->pm.mutex);
960 adev->pm.dpm.user_state = state;
961 mutex_unlock(&adev->pm.mutex);
963 if (is_support_sw_smu(adev))
966 if (amdgpu_dpm_dispatch_task(adev,
967 AMD_PP_TASK_ENABLE_USER_STATE,
968 &state) == -EOPNOTSUPP)
969 amdgpu_dpm_compute_clocks(adev);
972 enum amd_dpm_forced_level amdgpu_dpm_get_performance_level(struct amdgpu_device *adev)
974 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
975 enum amd_dpm_forced_level level;
978 return AMD_DPM_FORCED_LEVEL_AUTO;
980 mutex_lock(&adev->pm.mutex);
981 if (pp_funcs->get_performance_level)
982 level = pp_funcs->get_performance_level(adev->powerplay.pp_handle);
984 level = adev->pm.dpm.forced_level;
985 mutex_unlock(&adev->pm.mutex);
990 static void amdgpu_dpm_enter_umd_state(struct amdgpu_device *adev)
992 /* enter UMD Pstate */
993 amdgpu_device_ip_set_powergating_state(adev, AMD_IP_BLOCK_TYPE_GFX,
994 AMD_PG_STATE_UNGATE);
995 amdgpu_device_ip_set_clockgating_state(adev, AMD_IP_BLOCK_TYPE_GFX,
996 AMD_CG_STATE_UNGATE);
999 static void amdgpu_dpm_exit_umd_state(struct amdgpu_device *adev)
1001 /* exit UMD Pstate */
1002 amdgpu_device_ip_set_clockgating_state(adev, AMD_IP_BLOCK_TYPE_GFX,
1004 amdgpu_device_ip_set_powergating_state(adev, AMD_IP_BLOCK_TYPE_GFX,
1008 int amdgpu_dpm_force_performance_level(struct amdgpu_device *adev,
1009 enum amd_dpm_forced_level level)
1011 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1012 enum amd_dpm_forced_level current_level;
1013 uint32_t profile_mode_mask = AMD_DPM_FORCED_LEVEL_PROFILE_STANDARD |
1014 AMD_DPM_FORCED_LEVEL_PROFILE_MIN_SCLK |
1015 AMD_DPM_FORCED_LEVEL_PROFILE_MIN_MCLK |
1016 AMD_DPM_FORCED_LEVEL_PROFILE_PEAK;
1018 if (!pp_funcs || !pp_funcs->force_performance_level)
1021 if (adev->pm.dpm.thermal_active)
1024 current_level = amdgpu_dpm_get_performance_level(adev);
1025 if (current_level == level)
1028 if (!(current_level & profile_mode_mask) &&
1029 (level == AMD_DPM_FORCED_LEVEL_PROFILE_EXIT))
1032 if (adev->asic_type == CHIP_RAVEN) {
1033 if (!(adev->apu_flags & AMD_APU_IS_RAVEN2)) {
1034 if (current_level != AMD_DPM_FORCED_LEVEL_MANUAL &&
1035 level == AMD_DPM_FORCED_LEVEL_MANUAL)
1036 amdgpu_gfx_off_ctrl(adev, false);
1037 else if (current_level == AMD_DPM_FORCED_LEVEL_MANUAL &&
1038 level != AMD_DPM_FORCED_LEVEL_MANUAL)
1039 amdgpu_gfx_off_ctrl(adev, true);
1043 if (!(current_level & profile_mode_mask) && (level & profile_mode_mask))
1044 amdgpu_dpm_enter_umd_state(adev);
1045 else if ((current_level & profile_mode_mask) &&
1046 !(level & profile_mode_mask))
1047 amdgpu_dpm_exit_umd_state(adev);
1049 mutex_lock(&adev->pm.mutex);
1051 if (pp_funcs->force_performance_level(adev->powerplay.pp_handle,
1053 mutex_unlock(&adev->pm.mutex);
1054 /* If new level failed, retain the umd state as before */
1055 if (!(current_level & profile_mode_mask) &&
1056 (level & profile_mode_mask))
1057 amdgpu_dpm_exit_umd_state(adev);
1058 else if ((current_level & profile_mode_mask) &&
1059 !(level & profile_mode_mask))
1060 amdgpu_dpm_enter_umd_state(adev);
1065 adev->pm.dpm.forced_level = level;
1067 mutex_unlock(&adev->pm.mutex);
1072 int amdgpu_dpm_get_pp_num_states(struct amdgpu_device *adev,
1073 struct pp_states_info *states)
1075 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1078 if (!pp_funcs->get_pp_num_states)
1081 mutex_lock(&adev->pm.mutex);
1082 ret = pp_funcs->get_pp_num_states(adev->powerplay.pp_handle,
1084 mutex_unlock(&adev->pm.mutex);
1089 int amdgpu_dpm_dispatch_task(struct amdgpu_device *adev,
1090 enum amd_pp_task task_id,
1091 enum amd_pm_state_type *user_state)
1093 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1096 if (!pp_funcs->dispatch_tasks)
1099 mutex_lock(&adev->pm.mutex);
1100 ret = pp_funcs->dispatch_tasks(adev->powerplay.pp_handle,
1103 mutex_unlock(&adev->pm.mutex);
1108 int amdgpu_dpm_get_pp_table(struct amdgpu_device *adev, char **table)
1110 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1113 if (!pp_funcs->get_pp_table)
1116 mutex_lock(&adev->pm.mutex);
1117 ret = pp_funcs->get_pp_table(adev->powerplay.pp_handle,
1119 mutex_unlock(&adev->pm.mutex);
1124 int amdgpu_dpm_set_fine_grain_clk_vol(struct amdgpu_device *adev,
1129 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1132 if (!pp_funcs->set_fine_grain_clk_vol)
1135 mutex_lock(&adev->pm.mutex);
1136 ret = pp_funcs->set_fine_grain_clk_vol(adev->powerplay.pp_handle,
1140 mutex_unlock(&adev->pm.mutex);
1145 int amdgpu_dpm_odn_edit_dpm_table(struct amdgpu_device *adev,
1150 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1153 if (!pp_funcs->odn_edit_dpm_table)
1156 mutex_lock(&adev->pm.mutex);
1157 ret = pp_funcs->odn_edit_dpm_table(adev->powerplay.pp_handle,
1161 mutex_unlock(&adev->pm.mutex);
1166 int amdgpu_dpm_print_clock_levels(struct amdgpu_device *adev,
1167 enum pp_clock_type type,
1170 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1173 if (!pp_funcs->print_clock_levels)
1176 mutex_lock(&adev->pm.mutex);
1177 ret = pp_funcs->print_clock_levels(adev->powerplay.pp_handle,
1180 mutex_unlock(&adev->pm.mutex);
1185 int amdgpu_dpm_emit_clock_levels(struct amdgpu_device *adev,
1186 enum pp_clock_type type,
1190 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1193 if (!pp_funcs->emit_clock_levels)
1196 mutex_lock(&adev->pm.mutex);
1197 ret = pp_funcs->emit_clock_levels(adev->powerplay.pp_handle,
1201 mutex_unlock(&adev->pm.mutex);
1206 int amdgpu_dpm_set_ppfeature_status(struct amdgpu_device *adev,
1207 uint64_t ppfeature_masks)
1209 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1212 if (!pp_funcs->set_ppfeature_status)
1215 mutex_lock(&adev->pm.mutex);
1216 ret = pp_funcs->set_ppfeature_status(adev->powerplay.pp_handle,
1218 mutex_unlock(&adev->pm.mutex);
1223 int amdgpu_dpm_get_ppfeature_status(struct amdgpu_device *adev, char *buf)
1225 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1228 if (!pp_funcs->get_ppfeature_status)
1231 mutex_lock(&adev->pm.mutex);
1232 ret = pp_funcs->get_ppfeature_status(adev->powerplay.pp_handle,
1234 mutex_unlock(&adev->pm.mutex);
1239 int amdgpu_dpm_force_clock_level(struct amdgpu_device *adev,
1240 enum pp_clock_type type,
1243 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1246 if (!pp_funcs->force_clock_level)
1249 mutex_lock(&adev->pm.mutex);
1250 ret = pp_funcs->force_clock_level(adev->powerplay.pp_handle,
1253 mutex_unlock(&adev->pm.mutex);
1258 int amdgpu_dpm_get_sclk_od(struct amdgpu_device *adev)
1260 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1263 if (!pp_funcs->get_sclk_od)
1266 mutex_lock(&adev->pm.mutex);
1267 ret = pp_funcs->get_sclk_od(adev->powerplay.pp_handle);
1268 mutex_unlock(&adev->pm.mutex);
1273 int amdgpu_dpm_set_sclk_od(struct amdgpu_device *adev, uint32_t value)
1275 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1277 if (is_support_sw_smu(adev))
1280 mutex_lock(&adev->pm.mutex);
1281 if (pp_funcs->set_sclk_od)
1282 pp_funcs->set_sclk_od(adev->powerplay.pp_handle, value);
1283 mutex_unlock(&adev->pm.mutex);
1285 if (amdgpu_dpm_dispatch_task(adev,
1286 AMD_PP_TASK_READJUST_POWER_STATE,
1287 NULL) == -EOPNOTSUPP) {
1288 adev->pm.dpm.current_ps = adev->pm.dpm.boot_ps;
1289 amdgpu_dpm_compute_clocks(adev);
1295 int amdgpu_dpm_get_mclk_od(struct amdgpu_device *adev)
1297 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1300 if (!pp_funcs->get_mclk_od)
1303 mutex_lock(&adev->pm.mutex);
1304 ret = pp_funcs->get_mclk_od(adev->powerplay.pp_handle);
1305 mutex_unlock(&adev->pm.mutex);
1310 int amdgpu_dpm_set_mclk_od(struct amdgpu_device *adev, uint32_t value)
1312 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1314 if (is_support_sw_smu(adev))
1317 mutex_lock(&adev->pm.mutex);
1318 if (pp_funcs->set_mclk_od)
1319 pp_funcs->set_mclk_od(adev->powerplay.pp_handle, value);
1320 mutex_unlock(&adev->pm.mutex);
1322 if (amdgpu_dpm_dispatch_task(adev,
1323 AMD_PP_TASK_READJUST_POWER_STATE,
1324 NULL) == -EOPNOTSUPP) {
1325 adev->pm.dpm.current_ps = adev->pm.dpm.boot_ps;
1326 amdgpu_dpm_compute_clocks(adev);
1332 int amdgpu_dpm_get_power_profile_mode(struct amdgpu_device *adev,
1335 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1338 if (!pp_funcs->get_power_profile_mode)
1341 mutex_lock(&adev->pm.mutex);
1342 ret = pp_funcs->get_power_profile_mode(adev->powerplay.pp_handle,
1344 mutex_unlock(&adev->pm.mutex);
1349 int amdgpu_dpm_set_power_profile_mode(struct amdgpu_device *adev,
1350 long *input, uint32_t size)
1352 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1355 if (!pp_funcs->set_power_profile_mode)
1358 mutex_lock(&adev->pm.mutex);
1359 ret = pp_funcs->set_power_profile_mode(adev->powerplay.pp_handle,
1362 mutex_unlock(&adev->pm.mutex);
1367 int amdgpu_dpm_get_gpu_metrics(struct amdgpu_device *adev, void **table)
1369 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1372 if (!pp_funcs->get_gpu_metrics)
1375 mutex_lock(&adev->pm.mutex);
1376 ret = pp_funcs->get_gpu_metrics(adev->powerplay.pp_handle,
1378 mutex_unlock(&adev->pm.mutex);
1383 ssize_t amdgpu_dpm_get_pm_metrics(struct amdgpu_device *adev, void *pm_metrics,
1386 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1389 if (!pp_funcs->get_pm_metrics)
1392 mutex_lock(&adev->pm.mutex);
1393 ret = pp_funcs->get_pm_metrics(adev->powerplay.pp_handle, pm_metrics,
1395 mutex_unlock(&adev->pm.mutex);
1400 int amdgpu_dpm_get_fan_control_mode(struct amdgpu_device *adev,
1403 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1406 if (!pp_funcs->get_fan_control_mode)
1409 mutex_lock(&adev->pm.mutex);
1410 ret = pp_funcs->get_fan_control_mode(adev->powerplay.pp_handle,
1412 mutex_unlock(&adev->pm.mutex);
1417 int amdgpu_dpm_set_fan_speed_pwm(struct amdgpu_device *adev,
1420 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1423 if (!pp_funcs->set_fan_speed_pwm)
1426 mutex_lock(&adev->pm.mutex);
1427 ret = pp_funcs->set_fan_speed_pwm(adev->powerplay.pp_handle,
1429 mutex_unlock(&adev->pm.mutex);
1434 int amdgpu_dpm_get_fan_speed_pwm(struct amdgpu_device *adev,
1437 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1440 if (!pp_funcs->get_fan_speed_pwm)
1443 mutex_lock(&adev->pm.mutex);
1444 ret = pp_funcs->get_fan_speed_pwm(adev->powerplay.pp_handle,
1446 mutex_unlock(&adev->pm.mutex);
1451 int amdgpu_dpm_get_fan_speed_rpm(struct amdgpu_device *adev,
1454 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1457 if (!pp_funcs->get_fan_speed_rpm)
1460 mutex_lock(&adev->pm.mutex);
1461 ret = pp_funcs->get_fan_speed_rpm(adev->powerplay.pp_handle,
1463 mutex_unlock(&adev->pm.mutex);
1468 int amdgpu_dpm_set_fan_speed_rpm(struct amdgpu_device *adev,
1471 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1474 if (!pp_funcs->set_fan_speed_rpm)
1477 mutex_lock(&adev->pm.mutex);
1478 ret = pp_funcs->set_fan_speed_rpm(adev->powerplay.pp_handle,
1480 mutex_unlock(&adev->pm.mutex);
1485 int amdgpu_dpm_set_fan_control_mode(struct amdgpu_device *adev,
1488 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1491 if (!pp_funcs->set_fan_control_mode)
1494 mutex_lock(&adev->pm.mutex);
1495 ret = pp_funcs->set_fan_control_mode(adev->powerplay.pp_handle,
1497 mutex_unlock(&adev->pm.mutex);
1502 int amdgpu_dpm_get_power_limit(struct amdgpu_device *adev,
1504 enum pp_power_limit_level pp_limit_level,
1505 enum pp_power_type power_type)
1507 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1510 if (!pp_funcs->get_power_limit)
1513 mutex_lock(&adev->pm.mutex);
1514 ret = pp_funcs->get_power_limit(adev->powerplay.pp_handle,
1518 mutex_unlock(&adev->pm.mutex);
1523 int amdgpu_dpm_set_power_limit(struct amdgpu_device *adev,
1526 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1529 if (!pp_funcs->set_power_limit)
1532 mutex_lock(&adev->pm.mutex);
1533 ret = pp_funcs->set_power_limit(adev->powerplay.pp_handle,
1535 mutex_unlock(&adev->pm.mutex);
1540 int amdgpu_dpm_is_cclk_dpm_supported(struct amdgpu_device *adev)
1542 bool cclk_dpm_supported = false;
1544 if (!is_support_sw_smu(adev))
1547 mutex_lock(&adev->pm.mutex);
1548 cclk_dpm_supported = is_support_cclk_dpm(adev);
1549 mutex_unlock(&adev->pm.mutex);
1551 return (int)cclk_dpm_supported;
1554 int amdgpu_dpm_debugfs_print_current_performance_level(struct amdgpu_device *adev,
1557 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1559 if (!pp_funcs->debugfs_print_current_performance_level)
1562 mutex_lock(&adev->pm.mutex);
1563 pp_funcs->debugfs_print_current_performance_level(adev->powerplay.pp_handle,
1565 mutex_unlock(&adev->pm.mutex);
1570 int amdgpu_dpm_get_smu_prv_buf_details(struct amdgpu_device *adev,
1574 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1577 if (!pp_funcs->get_smu_prv_buf_details)
1580 mutex_lock(&adev->pm.mutex);
1581 ret = pp_funcs->get_smu_prv_buf_details(adev->powerplay.pp_handle,
1584 mutex_unlock(&adev->pm.mutex);
1589 int amdgpu_dpm_is_overdrive_supported(struct amdgpu_device *adev)
1591 if (is_support_sw_smu(adev)) {
1592 struct smu_context *smu = adev->powerplay.pp_handle;
1594 return (smu->od_enabled || smu->is_apu);
1596 struct pp_hwmgr *hwmgr;
1599 * dpm on some legacy asics don't carry od_enabled member
1600 * as its pp_handle is casted directly from adev.
1602 if (amdgpu_dpm_is_legacy_dpm(adev))
1605 hwmgr = (struct pp_hwmgr *)adev->powerplay.pp_handle;
1607 return hwmgr->od_enabled;
1611 int amdgpu_dpm_set_pp_table(struct amdgpu_device *adev,
1615 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1618 if (!pp_funcs->set_pp_table)
1621 mutex_lock(&adev->pm.mutex);
1622 ret = pp_funcs->set_pp_table(adev->powerplay.pp_handle,
1625 mutex_unlock(&adev->pm.mutex);
1630 int amdgpu_dpm_get_num_cpu_cores(struct amdgpu_device *adev)
1632 struct smu_context *smu = adev->powerplay.pp_handle;
1634 if (!is_support_sw_smu(adev))
1637 return smu->cpu_core_num;
1640 void amdgpu_dpm_stb_debug_fs_init(struct amdgpu_device *adev)
1642 if (!is_support_sw_smu(adev))
1645 amdgpu_smu_stb_debug_fs_init(adev);
1648 int amdgpu_dpm_display_configuration_change(struct amdgpu_device *adev,
1649 const struct amd_pp_display_configuration *input)
1651 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1654 if (!pp_funcs->display_configuration_change)
1657 mutex_lock(&adev->pm.mutex);
1658 ret = pp_funcs->display_configuration_change(adev->powerplay.pp_handle,
1660 mutex_unlock(&adev->pm.mutex);
1665 int amdgpu_dpm_get_clock_by_type(struct amdgpu_device *adev,
1666 enum amd_pp_clock_type type,
1667 struct amd_pp_clocks *clocks)
1669 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1672 if (!pp_funcs->get_clock_by_type)
1675 mutex_lock(&adev->pm.mutex);
1676 ret = pp_funcs->get_clock_by_type(adev->powerplay.pp_handle,
1679 mutex_unlock(&adev->pm.mutex);
1684 int amdgpu_dpm_get_display_mode_validation_clks(struct amdgpu_device *adev,
1685 struct amd_pp_simple_clock_info *clocks)
1687 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1690 if (!pp_funcs->get_display_mode_validation_clocks)
1693 mutex_lock(&adev->pm.mutex);
1694 ret = pp_funcs->get_display_mode_validation_clocks(adev->powerplay.pp_handle,
1696 mutex_unlock(&adev->pm.mutex);
1701 int amdgpu_dpm_get_clock_by_type_with_latency(struct amdgpu_device *adev,
1702 enum amd_pp_clock_type type,
1703 struct pp_clock_levels_with_latency *clocks)
1705 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1708 if (!pp_funcs->get_clock_by_type_with_latency)
1711 mutex_lock(&adev->pm.mutex);
1712 ret = pp_funcs->get_clock_by_type_with_latency(adev->powerplay.pp_handle,
1715 mutex_unlock(&adev->pm.mutex);
1720 int amdgpu_dpm_get_clock_by_type_with_voltage(struct amdgpu_device *adev,
1721 enum amd_pp_clock_type type,
1722 struct pp_clock_levels_with_voltage *clocks)
1724 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1727 if (!pp_funcs->get_clock_by_type_with_voltage)
1730 mutex_lock(&adev->pm.mutex);
1731 ret = pp_funcs->get_clock_by_type_with_voltage(adev->powerplay.pp_handle,
1734 mutex_unlock(&adev->pm.mutex);
1739 int amdgpu_dpm_set_watermarks_for_clocks_ranges(struct amdgpu_device *adev,
1742 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1745 if (!pp_funcs->set_watermarks_for_clocks_ranges)
1748 mutex_lock(&adev->pm.mutex);
1749 ret = pp_funcs->set_watermarks_for_clocks_ranges(adev->powerplay.pp_handle,
1751 mutex_unlock(&adev->pm.mutex);
1756 int amdgpu_dpm_display_clock_voltage_request(struct amdgpu_device *adev,
1757 struct pp_display_clock_request *clock)
1759 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1762 if (!pp_funcs->display_clock_voltage_request)
1765 mutex_lock(&adev->pm.mutex);
1766 ret = pp_funcs->display_clock_voltage_request(adev->powerplay.pp_handle,
1768 mutex_unlock(&adev->pm.mutex);
1773 int amdgpu_dpm_get_current_clocks(struct amdgpu_device *adev,
1774 struct amd_pp_clock_info *clocks)
1776 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1779 if (!pp_funcs->get_current_clocks)
1782 mutex_lock(&adev->pm.mutex);
1783 ret = pp_funcs->get_current_clocks(adev->powerplay.pp_handle,
1785 mutex_unlock(&adev->pm.mutex);
1790 void amdgpu_dpm_notify_smu_enable_pwe(struct amdgpu_device *adev)
1792 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1794 if (!pp_funcs->notify_smu_enable_pwe)
1797 mutex_lock(&adev->pm.mutex);
1798 pp_funcs->notify_smu_enable_pwe(adev->powerplay.pp_handle);
1799 mutex_unlock(&adev->pm.mutex);
1802 int amdgpu_dpm_set_active_display_count(struct amdgpu_device *adev,
1805 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1808 if (!pp_funcs->set_active_display_count)
1811 mutex_lock(&adev->pm.mutex);
1812 ret = pp_funcs->set_active_display_count(adev->powerplay.pp_handle,
1814 mutex_unlock(&adev->pm.mutex);
1819 int amdgpu_dpm_set_min_deep_sleep_dcefclk(struct amdgpu_device *adev,
1822 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1825 if (!pp_funcs->set_min_deep_sleep_dcefclk)
1828 mutex_lock(&adev->pm.mutex);
1829 ret = pp_funcs->set_min_deep_sleep_dcefclk(adev->powerplay.pp_handle,
1831 mutex_unlock(&adev->pm.mutex);
1836 void amdgpu_dpm_set_hard_min_dcefclk_by_freq(struct amdgpu_device *adev,
1839 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1841 if (!pp_funcs->set_hard_min_dcefclk_by_freq)
1844 mutex_lock(&adev->pm.mutex);
1845 pp_funcs->set_hard_min_dcefclk_by_freq(adev->powerplay.pp_handle,
1847 mutex_unlock(&adev->pm.mutex);
1850 void amdgpu_dpm_set_hard_min_fclk_by_freq(struct amdgpu_device *adev,
1853 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1855 if (!pp_funcs->set_hard_min_fclk_by_freq)
1858 mutex_lock(&adev->pm.mutex);
1859 pp_funcs->set_hard_min_fclk_by_freq(adev->powerplay.pp_handle,
1861 mutex_unlock(&adev->pm.mutex);
1864 int amdgpu_dpm_display_disable_memory_clock_switch(struct amdgpu_device *adev,
1865 bool disable_memory_clock_switch)
1867 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1870 if (!pp_funcs->display_disable_memory_clock_switch)
1873 mutex_lock(&adev->pm.mutex);
1874 ret = pp_funcs->display_disable_memory_clock_switch(adev->powerplay.pp_handle,
1875 disable_memory_clock_switch);
1876 mutex_unlock(&adev->pm.mutex);
1881 int amdgpu_dpm_get_max_sustainable_clocks_by_dc(struct amdgpu_device *adev,
1882 struct pp_smu_nv_clock_table *max_clocks)
1884 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1887 if (!pp_funcs->get_max_sustainable_clocks_by_dc)
1890 mutex_lock(&adev->pm.mutex);
1891 ret = pp_funcs->get_max_sustainable_clocks_by_dc(adev->powerplay.pp_handle,
1893 mutex_unlock(&adev->pm.mutex);
1898 enum pp_smu_status amdgpu_dpm_get_uclk_dpm_states(struct amdgpu_device *adev,
1899 unsigned int *clock_values_in_khz,
1900 unsigned int *num_states)
1902 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1905 if (!pp_funcs->get_uclk_dpm_states)
1908 mutex_lock(&adev->pm.mutex);
1909 ret = pp_funcs->get_uclk_dpm_states(adev->powerplay.pp_handle,
1910 clock_values_in_khz,
1912 mutex_unlock(&adev->pm.mutex);
1917 int amdgpu_dpm_get_dpm_clock_table(struct amdgpu_device *adev,
1918 struct dpm_clocks *clock_table)
1920 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1923 if (!pp_funcs->get_dpm_clock_table)
1926 mutex_lock(&adev->pm.mutex);
1927 ret = pp_funcs->get_dpm_clock_table(adev->powerplay.pp_handle,
1929 mutex_unlock(&adev->pm.mutex);