]> Git Repo - linux.git/blobdiff - drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c
Merge tag 'mips_5.2_2' of git://git.kernel.org/pub/scm/linux/kernel/git/mips/linux
[linux.git] / drivers / gpu / drm / amd / amdgpu / amdgpu_pm.c
index a7adb7b6bd98ab7b615a1d8c103551f2ffa46d1c..34471dbaa872ad9d7b18c669a95c2fcd758def83 100644 (file)
@@ -28,6 +28,7 @@
 #include "amdgpu_pm.h"
 #include "amdgpu_dpm.h"
 #include "amdgpu_display.h"
+#include "amdgpu_smu.h"
 #include "atom.h"
 #include <linux/power_supply.h>
 #include <linux/hwmon.h>
@@ -80,6 +81,27 @@ void amdgpu_pm_acpi_event_handler(struct amdgpu_device *adev)
        }
 }
 
+int amdgpu_dpm_read_sensor(struct amdgpu_device *adev, enum amd_pp_sensors sensor,
+                          void *data, uint32_t *size)
+{
+       int ret = 0;
+
+       if (!data || !size)
+               return -EINVAL;
+
+       if (is_support_sw_smu(adev))
+               ret = smu_read_sensor(&adev->smu, sensor, data, size);
+       else {
+               if (adev->powerplay.pp_funcs && adev->powerplay.pp_funcs->read_sensor)
+                       ret = adev->powerplay.pp_funcs->read_sensor((adev)->powerplay.pp_handle,
+                                                                   sensor, data, size);
+               else
+                       ret = -EINVAL;
+       }
+
+       return ret;
+}
+
 /**
  * DOC: power_dpm_state
  *
@@ -122,7 +144,9 @@ static ssize_t amdgpu_get_dpm_state(struct device *dev,
        struct amdgpu_device *adev = ddev->dev_private;
        enum amd_pm_state_type pm;
 
-       if (adev->powerplay.pp_funcs->get_current_power_state)
+       if (is_support_sw_smu(adev) && adev->smu.ppt_funcs->get_current_power_state)
+               pm = amdgpu_smu_get_current_power_state(adev);
+       else if (adev->powerplay.pp_funcs->get_current_power_state)
                pm = amdgpu_dpm_get_current_power_state(adev);
        else
                pm = adev->pm.dpm.user_state;
@@ -240,7 +264,9 @@ static ssize_t amdgpu_get_dpm_forced_performance_level(struct device *dev,
             (ddev->switch_power_state != DRM_SWITCH_POWER_ON))
                return snprintf(buf, PAGE_SIZE, "off\n");
 
-       if (adev->powerplay.pp_funcs->get_performance_level)
+       if (is_support_sw_smu(adev))
+               level = smu_get_performance_level(&adev->smu);
+       else if (adev->powerplay.pp_funcs->get_performance_level)
                level = amdgpu_dpm_get_performance_level(adev);
        else
                level = adev->pm.dpm.forced_level;
@@ -273,7 +299,9 @@ static ssize_t amdgpu_set_dpm_forced_performance_level(struct device *dev,
             (ddev->switch_power_state != DRM_SWITCH_POWER_ON))
                return -EINVAL;
 
-       if (adev->powerplay.pp_funcs->get_performance_level)
+       if (is_support_sw_smu(adev))
+               current_level = smu_get_performance_level(&adev->smu);
+       else if (adev->powerplay.pp_funcs->get_performance_level)
                current_level = amdgpu_dpm_get_performance_level(adev);
 
        if (strncmp("low", buf, strlen("low")) == 0) {
@@ -299,10 +327,45 @@ static ssize_t amdgpu_set_dpm_forced_performance_level(struct device *dev,
                goto fail;
        }
 
+        if (amdgpu_sriov_vf(adev)) {
+                if (amdgim_is_hwperf(adev) &&
+                    adev->virt.ops->force_dpm_level) {
+                        mutex_lock(&adev->pm.mutex);
+                        adev->virt.ops->force_dpm_level(adev, level);
+                        mutex_unlock(&adev->pm.mutex);
+                        return count;
+                } else {
+                        return -EINVAL;
+               }
+        }
+
        if (current_level == level)
                return count;
 
-       if (adev->powerplay.pp_funcs->force_performance_level) {
+       /* profile_exit setting is valid only when current mode is in profile mode */
+       if (!(current_level & (AMD_DPM_FORCED_LEVEL_PROFILE_STANDARD |
+           AMD_DPM_FORCED_LEVEL_PROFILE_MIN_SCLK |
+           AMD_DPM_FORCED_LEVEL_PROFILE_MIN_MCLK |
+           AMD_DPM_FORCED_LEVEL_PROFILE_PEAK)) &&
+           (level == AMD_DPM_FORCED_LEVEL_PROFILE_EXIT)) {
+               pr_err("Currently not in any profile mode!\n");
+               return -EINVAL;
+       }
+
+       if (is_support_sw_smu(adev)) {
+               mutex_lock(&adev->pm.mutex);
+               if (adev->pm.dpm.thermal_active) {
+                       count = -EINVAL;
+                       mutex_unlock(&adev->pm.mutex);
+                       goto fail;
+               }
+               ret = smu_force_performance_level(&adev->smu, level);
+               if (ret)
+                       count = -EINVAL;
+               else
+                       adev->pm.dpm.forced_level = level;
+               mutex_unlock(&adev->pm.mutex);
+       } else if (adev->powerplay.pp_funcs->force_performance_level) {
                mutex_lock(&adev->pm.mutex);
                if (adev->pm.dpm.thermal_active) {
                        count = -EINVAL;
@@ -328,9 +391,13 @@ static ssize_t amdgpu_get_pp_num_states(struct device *dev,
        struct drm_device *ddev = dev_get_drvdata(dev);
        struct amdgpu_device *adev = ddev->dev_private;
        struct pp_states_info data;
-       int i, buf_len;
+       int i, buf_len, ret;
 
-       if (adev->powerplay.pp_funcs->get_pp_num_states)
+       if (is_support_sw_smu(adev)) {
+               ret = smu_get_power_num_states(&adev->smu, &data);
+               if (ret)
+                       return ret;
+       } else if (adev->powerplay.pp_funcs->get_pp_num_states)
                amdgpu_dpm_get_pp_num_states(adev, &data);
 
        buf_len = snprintf(buf, PAGE_SIZE, "states: %d\n", data.nums);
@@ -351,23 +418,29 @@ static ssize_t amdgpu_get_pp_cur_state(struct device *dev,
        struct drm_device *ddev = dev_get_drvdata(dev);
        struct amdgpu_device *adev = ddev->dev_private;
        struct pp_states_info data;
+       struct smu_context *smu = &adev->smu;
        enum amd_pm_state_type pm = 0;
-       int i = 0;
+       int i = 0, ret = 0;
 
-       if (adev->powerplay.pp_funcs->get_current_power_state
+       if (is_support_sw_smu(adev)) {
+               pm = smu_get_current_power_state(smu);
+               ret = smu_get_power_num_states(smu, &data);
+               if (ret)
+                       return ret;
+       } else if (adev->powerplay.pp_funcs->get_current_power_state
                 && adev->powerplay.pp_funcs->get_pp_num_states) {
                pm = amdgpu_dpm_get_current_power_state(adev);
                amdgpu_dpm_get_pp_num_states(adev, &data);
+       }
 
-               for (i = 0; i < data.nums; i++) {
-                       if (pm == data.states[i])
-                               break;
-               }
-
-               if (i == data.nums)
-                       i = -EINVAL;
+       for (i = 0; i < data.nums; i++) {
+               if (pm == data.states[i])
+                       break;
        }
 
+       if (i == data.nums)
+               i = -EINVAL;
+
        return snprintf(buf, PAGE_SIZE, "%d\n", i);
 }
 
@@ -397,6 +470,8 @@ static ssize_t amdgpu_set_pp_force_state(struct device *dev,
 
        if (strlen(buf) == 1)
                adev->pp_force_state_enabled = false;
+       else if (is_support_sw_smu(adev))
+               adev->pp_force_state_enabled = false;
        else if (adev->powerplay.pp_funcs->dispatch_tasks &&
                        adev->powerplay.pp_funcs->get_pp_num_states) {
                struct pp_states_info data;
@@ -442,7 +517,12 @@ static ssize_t amdgpu_get_pp_table(struct device *dev,
        char *table = NULL;
        int size;
 
-       if (adev->powerplay.pp_funcs->get_pp_table)
+       if (is_support_sw_smu(adev)) {
+               size = smu_sys_get_pp_table(&adev->smu, (void **)&table);
+               if (size < 0)
+                       return size;
+       }
+       else if (adev->powerplay.pp_funcs->get_pp_table)
                size = amdgpu_dpm_get_pp_table(adev, &table);
        else
                return 0;
@@ -462,8 +542,13 @@ static ssize_t amdgpu_set_pp_table(struct device *dev,
 {
        struct drm_device *ddev = dev_get_drvdata(dev);
        struct amdgpu_device *adev = ddev->dev_private;
+       int ret = 0;
 
-       if (adev->powerplay.pp_funcs->set_pp_table)
+       if (is_support_sw_smu(adev)) {
+               ret = smu_sys_set_pp_table(&adev->smu, (void *)buf, count);
+               if (ret)
+                       return ret;
+       } else if (adev->powerplay.pp_funcs->set_pp_table)
                amdgpu_dpm_set_pp_table(adev, buf, count);
 
        return count;
@@ -586,19 +671,29 @@ static ssize_t amdgpu_set_pp_od_clk_voltage(struct device *dev,
                        tmp_str++;
        }
 
-       if (adev->powerplay.pp_funcs->odn_edit_dpm_table)
-               ret = amdgpu_dpm_odn_edit_dpm_table(adev, type,
-                                               parameter, parameter_size);
+       if (is_support_sw_smu(adev)) {
+               ret = smu_od_edit_dpm_table(&adev->smu, type,
+                                           parameter, parameter_size);
 
-       if (ret)
-               return -EINVAL;
+               if (ret)
+                       return -EINVAL;
+       } else {
+               if (adev->powerplay.pp_funcs->odn_edit_dpm_table)
+                       ret = amdgpu_dpm_odn_edit_dpm_table(adev, type,
+                                               parameter, parameter_size);
 
-       if (type == PP_OD_COMMIT_DPM_TABLE) {
-               if (adev->powerplay.pp_funcs->dispatch_tasks) {
-                       amdgpu_dpm_dispatch_task(adev, AMD_PP_TASK_READJUST_POWER_STATE, NULL);
-                       return count;
-               } else {
+               if (ret)
                        return -EINVAL;
+
+               if (type == PP_OD_COMMIT_DPM_TABLE) {
+                       if (adev->powerplay.pp_funcs->dispatch_tasks) {
+                               amdgpu_dpm_dispatch_task(adev,
+                                               AMD_PP_TASK_READJUST_POWER_STATE,
+                                               NULL);
+                               return count;
+                       } else {
+                               return -EINVAL;
+                       }
                }
        }
 
@@ -613,7 +708,13 @@ static ssize_t amdgpu_get_pp_od_clk_voltage(struct device *dev,
        struct amdgpu_device *adev = ddev->dev_private;
        uint32_t size = 0;
 
-       if (adev->powerplay.pp_funcs->print_clock_levels) {
+       if (is_support_sw_smu(adev)) {
+               size = smu_print_clk_levels(&adev->smu, OD_SCLK, buf);
+               size += smu_print_clk_levels(&adev->smu, OD_MCLK, buf+size);
+               size += smu_print_clk_levels(&adev->smu, OD_VDDC_CURVE, buf+size);
+               size += smu_print_clk_levels(&adev->smu, OD_RANGE, buf+size);
+               return size;
+       } else if (adev->powerplay.pp_funcs->print_clock_levels) {
                size = amdgpu_dpm_print_clock_levels(adev, OD_SCLK, buf);
                size += amdgpu_dpm_print_clock_levels(adev, OD_MCLK, buf+size);
                size += amdgpu_dpm_print_clock_levels(adev, OD_VDDC_CURVE, buf+size);
@@ -711,7 +812,13 @@ static ssize_t amdgpu_get_pp_dpm_sclk(struct device *dev,
        struct drm_device *ddev = dev_get_drvdata(dev);
        struct amdgpu_device *adev = ddev->dev_private;
 
-       if (adev->powerplay.pp_funcs->print_clock_levels)
+       if (amdgpu_sriov_vf(adev) && amdgim_is_hwperf(adev) &&
+           adev->virt.ops->get_pp_clk)
+               return adev->virt.ops->get_pp_clk(adev, PP_SCLK, buf);
+
+       if (is_support_sw_smu(adev))
+               return smu_print_clk_levels(&adev->smu, PP_SCLK, buf);
+       else if (adev->powerplay.pp_funcs->print_clock_levels)
                return amdgpu_dpm_print_clock_levels(adev, PP_SCLK, buf);
        else
                return snprintf(buf, PAGE_SIZE, "\n");
@@ -767,7 +874,9 @@ static ssize_t amdgpu_set_pp_dpm_sclk(struct device *dev,
        if (ret)
                return ret;
 
-       if (adev->powerplay.pp_funcs->force_clock_level)
+       if (is_support_sw_smu(adev))
+               ret = smu_force_clk_levels(&adev->smu, PP_SCLK, mask);
+       else if (adev->powerplay.pp_funcs->force_clock_level)
                ret = amdgpu_dpm_force_clock_level(adev, PP_SCLK, mask);
 
        if (ret)
@@ -783,7 +892,9 @@ static ssize_t amdgpu_get_pp_dpm_mclk(struct device *dev,
        struct drm_device *ddev = dev_get_drvdata(dev);
        struct amdgpu_device *adev = ddev->dev_private;
 
-       if (adev->powerplay.pp_funcs->print_clock_levels)
+       if (is_support_sw_smu(adev))
+               return smu_print_clk_levels(&adev->smu, PP_MCLK, buf);
+       else if (adev->powerplay.pp_funcs->print_clock_levels)
                return amdgpu_dpm_print_clock_levels(adev, PP_MCLK, buf);
        else
                return snprintf(buf, PAGE_SIZE, "\n");
@@ -803,7 +914,9 @@ static ssize_t amdgpu_set_pp_dpm_mclk(struct device *dev,
        if (ret)
                return ret;
 
-       if (adev->powerplay.pp_funcs->force_clock_level)
+       if (is_support_sw_smu(adev))
+               ret = smu_force_clk_levels(&adev->smu, PP_MCLK, mask);
+       else if (adev->powerplay.pp_funcs->force_clock_level)
                ret = amdgpu_dpm_force_clock_level(adev, PP_MCLK, mask);
 
        if (ret)
@@ -819,7 +932,9 @@ static ssize_t amdgpu_get_pp_dpm_socclk(struct device *dev,
        struct drm_device *ddev = dev_get_drvdata(dev);
        struct amdgpu_device *adev = ddev->dev_private;
 
-       if (adev->powerplay.pp_funcs->print_clock_levels)
+       if (is_support_sw_smu(adev))
+               return smu_print_clk_levels(&adev->smu, PP_SOCCLK, buf);
+       else if (adev->powerplay.pp_funcs->print_clock_levels)
                return amdgpu_dpm_print_clock_levels(adev, PP_SOCCLK, buf);
        else
                return snprintf(buf, PAGE_SIZE, "\n");
@@ -839,7 +954,9 @@ static ssize_t amdgpu_set_pp_dpm_socclk(struct device *dev,
        if (ret)
                return ret;
 
-       if (adev->powerplay.pp_funcs->force_clock_level)
+       if (is_support_sw_smu(adev))
+               ret = smu_force_clk_levels(&adev->smu, PP_SOCCLK, mask);
+       else if (adev->powerplay.pp_funcs->force_clock_level)
                ret = amdgpu_dpm_force_clock_level(adev, PP_SOCCLK, mask);
 
        if (ret)
@@ -855,7 +972,9 @@ static ssize_t amdgpu_get_pp_dpm_fclk(struct device *dev,
        struct drm_device *ddev = dev_get_drvdata(dev);
        struct amdgpu_device *adev = ddev->dev_private;
 
-       if (adev->powerplay.pp_funcs->print_clock_levels)
+       if (is_support_sw_smu(adev))
+               return smu_print_clk_levels(&adev->smu, PP_FCLK, buf);
+       else if (adev->powerplay.pp_funcs->print_clock_levels)
                return amdgpu_dpm_print_clock_levels(adev, PP_FCLK, buf);
        else
                return snprintf(buf, PAGE_SIZE, "\n");
@@ -875,7 +994,9 @@ static ssize_t amdgpu_set_pp_dpm_fclk(struct device *dev,
        if (ret)
                return ret;
 
-       if (adev->powerplay.pp_funcs->force_clock_level)
+       if (is_support_sw_smu(adev))
+               ret = smu_force_clk_levels(&adev->smu, PP_FCLK, mask);
+       else if (adev->powerplay.pp_funcs->force_clock_level)
                ret = amdgpu_dpm_force_clock_level(adev, PP_FCLK, mask);
 
        if (ret)
@@ -891,7 +1012,9 @@ static ssize_t amdgpu_get_pp_dpm_dcefclk(struct device *dev,
        struct drm_device *ddev = dev_get_drvdata(dev);
        struct amdgpu_device *adev = ddev->dev_private;
 
-       if (adev->powerplay.pp_funcs->print_clock_levels)
+       if (is_support_sw_smu(adev))
+               return smu_print_clk_levels(&adev->smu, PP_DCEFCLK, buf);
+       else if (adev->powerplay.pp_funcs->print_clock_levels)
                return amdgpu_dpm_print_clock_levels(adev, PP_DCEFCLK, buf);
        else
                return snprintf(buf, PAGE_SIZE, "\n");
@@ -911,7 +1034,9 @@ static ssize_t amdgpu_set_pp_dpm_dcefclk(struct device *dev,
        if (ret)
                return ret;
 
-       if (adev->powerplay.pp_funcs->force_clock_level)
+       if (is_support_sw_smu(adev))
+               ret = smu_force_clk_levels(&adev->smu, PP_DCEFCLK, mask);
+       else if (adev->powerplay.pp_funcs->force_clock_level)
                ret = amdgpu_dpm_force_clock_level(adev, PP_DCEFCLK, mask);
 
        if (ret)
@@ -927,7 +1052,9 @@ static ssize_t amdgpu_get_pp_dpm_pcie(struct device *dev,
        struct drm_device *ddev = dev_get_drvdata(dev);
        struct amdgpu_device *adev = ddev->dev_private;
 
-       if (adev->powerplay.pp_funcs->print_clock_levels)
+       if (is_support_sw_smu(adev))
+               return smu_print_clk_levels(&adev->smu, PP_PCIE, buf);
+       else if (adev->powerplay.pp_funcs->print_clock_levels)
                return amdgpu_dpm_print_clock_levels(adev, PP_PCIE, buf);
        else
                return snprintf(buf, PAGE_SIZE, "\n");
@@ -947,7 +1074,9 @@ static ssize_t amdgpu_set_pp_dpm_pcie(struct device *dev,
        if (ret)
                return ret;
 
-       if (adev->powerplay.pp_funcs->force_clock_level)
+       if (is_support_sw_smu(adev))
+               ret = smu_force_clk_levels(&adev->smu, PP_PCIE, mask);
+       else if (adev->powerplay.pp_funcs->force_clock_level)
                ret = amdgpu_dpm_force_clock_level(adev, PP_PCIE, mask);
 
        if (ret)
@@ -964,7 +1093,9 @@ static ssize_t amdgpu_get_pp_sclk_od(struct device *dev,
        struct amdgpu_device *adev = ddev->dev_private;
        uint32_t value = 0;
 
-       if (adev->powerplay.pp_funcs->get_sclk_od)
+       if (is_support_sw_smu(adev))
+               value = smu_get_od_percentage(&(adev->smu), OD_SCLK);
+       else if (adev->powerplay.pp_funcs->get_sclk_od)
                value = amdgpu_dpm_get_sclk_od(adev);
 
        return snprintf(buf, PAGE_SIZE, "%d\n", value);
@@ -986,14 +1117,19 @@ static ssize_t amdgpu_set_pp_sclk_od(struct device *dev,
                count = -EINVAL;
                goto fail;
        }
-       if (adev->powerplay.pp_funcs->set_sclk_od)
-               amdgpu_dpm_set_sclk_od(adev, (uint32_t)value);
 
-       if (adev->powerplay.pp_funcs->dispatch_tasks) {
-               amdgpu_dpm_dispatch_task(adev, AMD_PP_TASK_READJUST_POWER_STATE, NULL);
+       if (is_support_sw_smu(adev)) {
+               value = smu_set_od_percentage(&(adev->smu), OD_SCLK, (uint32_t)value);
        } else {
-               adev->pm.dpm.current_ps = adev->pm.dpm.boot_ps;
-               amdgpu_pm_compute_clocks(adev);
+               if (adev->powerplay.pp_funcs->set_sclk_od)
+                       amdgpu_dpm_set_sclk_od(adev, (uint32_t)value);
+
+               if (adev->powerplay.pp_funcs->dispatch_tasks) {
+                       amdgpu_dpm_dispatch_task(adev, AMD_PP_TASK_READJUST_POWER_STATE, NULL);
+               } else {
+                       adev->pm.dpm.current_ps = adev->pm.dpm.boot_ps;
+                       amdgpu_pm_compute_clocks(adev);
+               }
        }
 
 fail:
@@ -1008,7 +1144,9 @@ static ssize_t amdgpu_get_pp_mclk_od(struct device *dev,
        struct amdgpu_device *adev = ddev->dev_private;
        uint32_t value = 0;
 
-       if (adev->powerplay.pp_funcs->get_mclk_od)
+       if (is_support_sw_smu(adev))
+               value = smu_get_od_percentage(&(adev->smu), OD_MCLK);
+       else if (adev->powerplay.pp_funcs->get_mclk_od)
                value = amdgpu_dpm_get_mclk_od(adev);
 
        return snprintf(buf, PAGE_SIZE, "%d\n", value);
@@ -1030,14 +1168,19 @@ static ssize_t amdgpu_set_pp_mclk_od(struct device *dev,
                count = -EINVAL;
                goto fail;
        }
-       if (adev->powerplay.pp_funcs->set_mclk_od)
-               amdgpu_dpm_set_mclk_od(adev, (uint32_t)value);
 
-       if (adev->powerplay.pp_funcs->dispatch_tasks) {
-               amdgpu_dpm_dispatch_task(adev, AMD_PP_TASK_READJUST_POWER_STATE, NULL);
+       if (is_support_sw_smu(adev)) {
+               value = smu_set_od_percentage(&(adev->smu), OD_MCLK, (uint32_t)value);
        } else {
-               adev->pm.dpm.current_ps = adev->pm.dpm.boot_ps;
-               amdgpu_pm_compute_clocks(adev);
+               if (adev->powerplay.pp_funcs->set_mclk_od)
+                       amdgpu_dpm_set_mclk_od(adev, (uint32_t)value);
+
+               if (adev->powerplay.pp_funcs->dispatch_tasks) {
+                       amdgpu_dpm_dispatch_task(adev, AMD_PP_TASK_READJUST_POWER_STATE, NULL);
+               } else {
+                       adev->pm.dpm.current_ps = adev->pm.dpm.boot_ps;
+                       amdgpu_pm_compute_clocks(adev);
+               }
        }
 
 fail:
@@ -1071,7 +1214,9 @@ static ssize_t amdgpu_get_pp_power_profile_mode(struct device *dev,
        struct drm_device *ddev = dev_get_drvdata(dev);
        struct amdgpu_device *adev = ddev->dev_private;
 
-       if (adev->powerplay.pp_funcs->get_power_profile_mode)
+       if (is_support_sw_smu(adev))
+               return smu_get_power_profile_mode(&adev->smu, buf);
+       else if (adev->powerplay.pp_funcs->get_power_profile_mode)
                return amdgpu_dpm_get_power_profile_mode(adev, buf);
 
        return snprintf(buf, PAGE_SIZE, "\n");
@@ -1121,9 +1266,10 @@ static ssize_t amdgpu_set_pp_power_profile_mode(struct device *dev,
                }
        }
        parameter[parameter_size] = profile_mode;
-       if (adev->powerplay.pp_funcs->set_power_profile_mode)
+       if (is_support_sw_smu(adev))
+               ret = smu_set_power_profile_mode(&adev->smu, parameter, parameter_size);
+       else if (adev->powerplay.pp_funcs->set_power_profile_mode)
                ret = amdgpu_dpm_set_power_profile_mode(adev, parameter, parameter_size);
-
        if (!ret)
                return count;
 fail:
@@ -1146,14 +1292,10 @@ static ssize_t amdgpu_get_busy_percent(struct device *dev,
        struct amdgpu_device *adev = ddev->dev_private;
        int r, value, size = sizeof(value);
 
-       /* sanity check PP is enabled */
-       if (!(adev->powerplay.pp_funcs &&
-             adev->powerplay.pp_funcs->read_sensor))
-               return -EINVAL;
-
        /* read the IP busy sensor */
        r = amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_GPU_LOAD,
                                   (void *)&value, &size);
+
        if (r)
                return r;
 
@@ -1247,11 +1389,6 @@ static ssize_t amdgpu_hwmon_show_temp(struct device *dev,
             (ddev->switch_power_state != DRM_SWITCH_POWER_ON))
                return -EINVAL;
 
-       /* sanity check PP is enabled */
-       if (!(adev->powerplay.pp_funcs &&
-             adev->powerplay.pp_funcs->read_sensor))
-               return -EINVAL;
-
        /* get the temperature */
        r = amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_GPU_TEMP,
                                   (void *)&temp, &size);
@@ -1283,11 +1420,14 @@ static ssize_t amdgpu_hwmon_get_pwm1_enable(struct device *dev,
 {
        struct amdgpu_device *adev = dev_get_drvdata(dev);
        u32 pwm_mode = 0;
+       if (is_support_sw_smu(adev)) {
+               pwm_mode = smu_get_fan_control_mode(&adev->smu);
+       } else {
+               if (!adev->powerplay.pp_funcs->get_fan_control_mode)
+                       return -EINVAL;
 
-       if (!adev->powerplay.pp_funcs->get_fan_control_mode)
-               return -EINVAL;
-
-       pwm_mode = amdgpu_dpm_get_fan_control_mode(adev);
+               pwm_mode = amdgpu_dpm_get_fan_control_mode(adev);
+       }
 
        return sprintf(buf, "%i\n", pwm_mode);
 }
@@ -1306,14 +1446,22 @@ static ssize_t amdgpu_hwmon_set_pwm1_enable(struct device *dev,
             (adev->ddev->switch_power_state != DRM_SWITCH_POWER_ON))
                return -EINVAL;
 
-       if (!adev->powerplay.pp_funcs->set_fan_control_mode)
-               return -EINVAL;
+       if (is_support_sw_smu(adev)) {
+               err = kstrtoint(buf, 10, &value);
+               if (err)
+                       return err;
 
-       err = kstrtoint(buf, 10, &value);
-       if (err)
-               return err;
+               smu_set_fan_control_mode(&adev->smu, value);
+       } else {
+               if (!adev->powerplay.pp_funcs->set_fan_control_mode)
+                       return -EINVAL;
 
-       amdgpu_dpm_set_fan_control_mode(adev, value);
+               err = kstrtoint(buf, 10, &value);
+               if (err)
+                       return err;
+
+               amdgpu_dpm_set_fan_control_mode(adev, value);
+       }
 
        return count;
 }
@@ -1345,8 +1493,10 @@ static ssize_t amdgpu_hwmon_set_pwm1(struct device *dev,
        if  ((adev->flags & AMD_IS_PX) &&
             (adev->ddev->switch_power_state != DRM_SWITCH_POWER_ON))
                return -EINVAL;
-
-       pwm_mode = amdgpu_dpm_get_fan_control_mode(adev);
+       if (is_support_sw_smu(adev))
+               pwm_mode = smu_get_fan_control_mode(&adev->smu);
+       else
+               pwm_mode = amdgpu_dpm_get_fan_control_mode(adev);
        if (pwm_mode != AMD_FAN_CTRL_MANUAL) {
                pr_info("manual fan speed control should be enabled first\n");
                return -EINVAL;
@@ -1358,7 +1508,11 @@ static ssize_t amdgpu_hwmon_set_pwm1(struct device *dev,
 
        value = (value * 100) / 255;
 
-       if (adev->powerplay.pp_funcs->set_fan_speed_percent) {
+       if (is_support_sw_smu(adev)) {
+               err = smu_set_fan_speed_percent(&adev->smu, value);
+               if (err)
+                       return err;
+       } else if (adev->powerplay.pp_funcs->set_fan_speed_percent) {
                err = amdgpu_dpm_set_fan_speed_percent(adev, value);
                if (err)
                        return err;
@@ -1380,7 +1534,11 @@ static ssize_t amdgpu_hwmon_get_pwm1(struct device *dev,
             (adev->ddev->switch_power_state != DRM_SWITCH_POWER_ON))
                return -EINVAL;
 
-       if (adev->powerplay.pp_funcs->get_fan_speed_percent) {
+       if (is_support_sw_smu(adev)) {
+               err = smu_get_fan_speed_percent(&adev->smu, &speed);
+               if (err)
+                       return err;
+       } else if (adev->powerplay.pp_funcs->get_fan_speed_percent) {
                err = amdgpu_dpm_get_fan_speed_percent(adev, &speed);
                if (err)
                        return err;
@@ -1404,7 +1562,11 @@ static ssize_t amdgpu_hwmon_get_fan1_input(struct device *dev,
             (adev->ddev->switch_power_state != DRM_SWITCH_POWER_ON))
                return -EINVAL;
 
-       if (adev->powerplay.pp_funcs->get_fan_speed_rpm) {
+       if (is_support_sw_smu(adev)) {
+               err = smu_get_current_rpm(&adev->smu, &speed);
+               if (err)
+                       return err;
+       } else if (adev->powerplay.pp_funcs->get_fan_speed_rpm) {
                err = amdgpu_dpm_get_fan_speed_rpm(adev, &speed);
                if (err)
                        return err;
@@ -1422,9 +1584,6 @@ static ssize_t amdgpu_hwmon_get_fan1_min(struct device *dev,
        u32 size = sizeof(min_rpm);
        int r;
 
-       if (!adev->powerplay.pp_funcs->read_sensor)
-               return -EINVAL;
-
        r = amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_MIN_FAN_RPM,
                                   (void *)&min_rpm, &size);
        if (r)
@@ -1442,9 +1601,6 @@ static ssize_t amdgpu_hwmon_get_fan1_max(struct device *dev,
        u32 size = sizeof(max_rpm);
        int r;
 
-       if (!adev->powerplay.pp_funcs->read_sensor)
-               return -EINVAL;
-
        r = amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_MAX_FAN_RPM,
                                   (void *)&max_rpm, &size);
        if (r)
@@ -1466,7 +1622,11 @@ static ssize_t amdgpu_hwmon_get_fan1_target(struct device *dev,
             (adev->ddev->switch_power_state != DRM_SWITCH_POWER_ON))
                return -EINVAL;
 
-       if (adev->powerplay.pp_funcs->get_fan_speed_rpm) {
+       if (is_support_sw_smu(adev)) {
+               err = smu_get_current_rpm(&adev->smu, &rpm);
+               if (err)
+                       return err;
+       } else if (adev->powerplay.pp_funcs->get_fan_speed_rpm) {
                err = amdgpu_dpm_get_fan_speed_rpm(adev, &rpm);
                if (err)
                        return err;
@@ -1484,7 +1644,11 @@ static ssize_t amdgpu_hwmon_set_fan1_target(struct device *dev,
        u32 value;
        u32 pwm_mode;
 
-       pwm_mode = amdgpu_dpm_get_fan_control_mode(adev);
+       if (is_support_sw_smu(adev))
+               pwm_mode = smu_get_fan_control_mode(&adev->smu);
+       else
+               pwm_mode = amdgpu_dpm_get_fan_control_mode(adev);
+
        if (pwm_mode != AMD_FAN_CTRL_MANUAL)
                return -ENODATA;
 
@@ -1497,7 +1661,11 @@ static ssize_t amdgpu_hwmon_set_fan1_target(struct device *dev,
        if (err)
                return err;
 
-       if (adev->powerplay.pp_funcs->set_fan_speed_rpm) {
+       if (is_support_sw_smu(adev)) {
+               err = smu_set_fan_speed_rpm(&adev->smu, value);
+               if (err)
+                       return err;
+       } else if (adev->powerplay.pp_funcs->set_fan_speed_rpm) {
                err = amdgpu_dpm_set_fan_speed_rpm(adev, value);
                if (err)
                        return err;
@@ -1513,11 +1681,14 @@ static ssize_t amdgpu_hwmon_get_fan1_enable(struct device *dev,
        struct amdgpu_device *adev = dev_get_drvdata(dev);
        u32 pwm_mode = 0;
 
-       if (!adev->powerplay.pp_funcs->get_fan_control_mode)
-               return -EINVAL;
-
-       pwm_mode = amdgpu_dpm_get_fan_control_mode(adev);
+       if (is_support_sw_smu(adev)) {
+               pwm_mode = smu_get_fan_control_mode(&adev->smu);
+       } else {
+               if (!adev->powerplay.pp_funcs->get_fan_control_mode)
+                       return -EINVAL;
 
+               pwm_mode = amdgpu_dpm_get_fan_control_mode(adev);
+       }
        return sprintf(buf, "%i\n", pwm_mode == AMD_FAN_CTRL_AUTO ? 0 : 1);
 }
 
@@ -1536,8 +1707,6 @@ static ssize_t amdgpu_hwmon_set_fan1_enable(struct device *dev,
             (adev->ddev->switch_power_state != DRM_SWITCH_POWER_ON))
                return -EINVAL;
 
-       if (!adev->powerplay.pp_funcs->set_fan_control_mode)
-               return -EINVAL;
 
        err = kstrtoint(buf, 10, &value);
        if (err)
@@ -1550,7 +1719,13 @@ static ssize_t amdgpu_hwmon_set_fan1_enable(struct device *dev,
        else
                return -EINVAL;
 
-       amdgpu_dpm_set_fan_control_mode(adev, pwm_mode);
+       if (is_support_sw_smu(adev)) {
+               smu_set_fan_control_mode(&adev->smu, pwm_mode);
+       } else {
+               if (!adev->powerplay.pp_funcs->set_fan_control_mode)
+                       return -EINVAL;
+               amdgpu_dpm_set_fan_control_mode(adev, pwm_mode);
+       }
 
        return count;
 }
@@ -1569,11 +1744,6 @@ static ssize_t amdgpu_hwmon_show_vddgfx(struct device *dev,
             (ddev->switch_power_state != DRM_SWITCH_POWER_ON))
                return -EINVAL;
 
-       /* sanity check PP is enabled */
-       if (!(adev->powerplay.pp_funcs &&
-             adev->powerplay.pp_funcs->read_sensor))
-             return -EINVAL;
-
        /* get the voltage */
        r = amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_VDDGFX,
                                   (void *)&vddgfx, &size);
@@ -1608,11 +1778,6 @@ static ssize_t amdgpu_hwmon_show_vddnb(struct device *dev,
             (ddev->switch_power_state != DRM_SWITCH_POWER_ON))
                return -EINVAL;
 
-       /* sanity check PP is enabled */
-       if (!(adev->powerplay.pp_funcs &&
-             adev->powerplay.pp_funcs->read_sensor))
-             return -EINVAL;
-
        /* get the voltage */
        r = amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_VDDNB,
                                   (void *)&vddnb, &size);
@@ -1644,11 +1809,6 @@ static ssize_t amdgpu_hwmon_show_power_avg(struct device *dev,
             (ddev->switch_power_state != DRM_SWITCH_POWER_ON))
                return -EINVAL;
 
-       /* sanity check PP is enabled */
-       if (!(adev->powerplay.pp_funcs &&
-             adev->powerplay.pp_funcs->read_sensor))
-             return -EINVAL;
-
        /* get the voltage */
        r = amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_GPU_POWER,
                                   (void *)&query, &size);
@@ -1675,7 +1835,10 @@ static ssize_t amdgpu_hwmon_show_power_cap_max(struct device *dev,
        struct amdgpu_device *adev = dev_get_drvdata(dev);
        uint32_t limit = 0;
 
-       if (adev->powerplay.pp_funcs && adev->powerplay.pp_funcs->get_power_limit) {
+       if (is_support_sw_smu(adev)) {
+               smu_get_power_limit(&adev->smu, &limit, true);
+               return snprintf(buf, PAGE_SIZE, "%u\n", limit * 1000000);
+       } else if (adev->powerplay.pp_funcs && adev->powerplay.pp_funcs->get_power_limit) {
                adev->powerplay.pp_funcs->get_power_limit(adev->powerplay.pp_handle, &limit, true);
                return snprintf(buf, PAGE_SIZE, "%u\n", limit * 1000000);
        } else {
@@ -1690,7 +1853,10 @@ static ssize_t amdgpu_hwmon_show_power_cap(struct device *dev,
        struct amdgpu_device *adev = dev_get_drvdata(dev);
        uint32_t limit = 0;
 
-       if (adev->powerplay.pp_funcs && adev->powerplay.pp_funcs->get_power_limit) {
+       if (is_support_sw_smu(adev)) {
+               smu_get_power_limit(&adev->smu, &limit, false);
+               return snprintf(buf, PAGE_SIZE, "%u\n", limit * 1000000);
+       } else if (adev->powerplay.pp_funcs && adev->powerplay.pp_funcs->get_power_limit) {
                adev->powerplay.pp_funcs->get_power_limit(adev->powerplay.pp_handle, &limit, false);
                return snprintf(buf, PAGE_SIZE, "%u\n", limit * 1000000);
        } else {
@@ -1713,7 +1879,9 @@ static ssize_t amdgpu_hwmon_set_power_cap(struct device *dev,
                return err;
 
        value = value / 1000000; /* convert to Watt */
-       if (adev->powerplay.pp_funcs && adev->powerplay.pp_funcs->set_power_limit) {
+       if (is_support_sw_smu(adev)) {
+               adev->smu.funcs->set_power_limit(&adev->smu, value);
+       } else if (adev->powerplay.pp_funcs && adev->powerplay.pp_funcs->set_power_limit) {
                err = adev->powerplay.pp_funcs->set_power_limit(adev->powerplay.pp_handle, value);
                if (err)
                        return err;
@@ -1967,18 +2135,20 @@ static umode_t hwmon_attributes_visible(struct kobject *kobj,
             attr == &sensor_dev_attr_fan1_enable.dev_attr.attr))
                return 0;
 
-       /* mask fan attributes if we have no bindings for this asic to expose */
-       if ((!adev->powerplay.pp_funcs->get_fan_speed_percent &&
-            attr == &sensor_dev_attr_pwm1.dev_attr.attr) || /* can't query fan */
-           (!adev->powerplay.pp_funcs->get_fan_control_mode &&
-            attr == &sensor_dev_attr_pwm1_enable.dev_attr.attr)) /* can't query state */
-               effective_mode &= ~S_IRUGO;
+       if (!is_support_sw_smu(adev)) {
+               /* mask fan attributes if we have no bindings for this asic to expose */
+               if ((!adev->powerplay.pp_funcs->get_fan_speed_percent &&
+                    attr == &sensor_dev_attr_pwm1.dev_attr.attr) || /* can't query fan */
+                   (!adev->powerplay.pp_funcs->get_fan_control_mode &&
+                    attr == &sensor_dev_attr_pwm1_enable.dev_attr.attr)) /* can't query state */
+                       effective_mode &= ~S_IRUGO;
 
-       if ((!adev->powerplay.pp_funcs->set_fan_speed_percent &&
-            attr == &sensor_dev_attr_pwm1.dev_attr.attr) || /* can't manage fan */
-           (!adev->powerplay.pp_funcs->set_fan_control_mode &&
-            attr == &sensor_dev_attr_pwm1_enable.dev_attr.attr)) /* can't manage state */
-               effective_mode &= ~S_IWUSR;
+               if ((!adev->powerplay.pp_funcs->set_fan_speed_percent &&
+                    attr == &sensor_dev_attr_pwm1.dev_attr.attr) || /* can't manage fan */
+                   (!adev->powerplay.pp_funcs->set_fan_control_mode &&
+                    attr == &sensor_dev_attr_pwm1_enable.dev_attr.attr)) /* can't manage state */
+                       effective_mode &= ~S_IWUSR;
+       }
 
        if ((adev->flags & AMD_IS_APU) &&
            (attr == &sensor_dev_attr_power1_average.dev_attr.attr ||
@@ -1987,20 +2157,22 @@ static umode_t hwmon_attributes_visible(struct kobject *kobj,
             attr == &sensor_dev_attr_power1_cap.dev_attr.attr))
                return 0;
 
-       /* hide max/min values if we can't both query and manage the fan */
-       if ((!adev->powerplay.pp_funcs->set_fan_speed_percent &&
-            !adev->powerplay.pp_funcs->get_fan_speed_percent) &&
-            (!adev->powerplay.pp_funcs->set_fan_speed_rpm &&
-            !adev->powerplay.pp_funcs->get_fan_speed_rpm) &&
-           (attr == &sensor_dev_attr_pwm1_max.dev_attr.attr ||
-            attr == &sensor_dev_attr_pwm1_min.dev_attr.attr))
-               return 0;
+       if (!is_support_sw_smu(adev)) {
+               /* hide max/min values if we can't both query and manage the fan */
+               if ((!adev->powerplay.pp_funcs->set_fan_speed_percent &&
+                    !adev->powerplay.pp_funcs->get_fan_speed_percent) &&
+                    (!adev->powerplay.pp_funcs->set_fan_speed_rpm &&
+                    !adev->powerplay.pp_funcs->get_fan_speed_rpm) &&
+                   (attr == &sensor_dev_attr_pwm1_max.dev_attr.attr ||
+                    attr == &sensor_dev_attr_pwm1_min.dev_attr.attr))
+                       return 0;
 
-       if ((!adev->powerplay.pp_funcs->set_fan_speed_rpm &&
-            !adev->powerplay.pp_funcs->get_fan_speed_rpm) &&
-           (attr == &sensor_dev_attr_fan1_max.dev_attr.attr ||
-            attr == &sensor_dev_attr_fan1_min.dev_attr.attr))
-               return 0;
+               if ((!adev->powerplay.pp_funcs->set_fan_speed_rpm &&
+                    !adev->powerplay.pp_funcs->get_fan_speed_rpm) &&
+                   (attr == &sensor_dev_attr_fan1_max.dev_attr.attr ||
+                    attr == &sensor_dev_attr_fan1_min.dev_attr.attr))
+                       return 0;
+       }
 
        /* only APUs have vddnb */
        if (!(adev->flags & AMD_IS_APU) &&
@@ -2039,9 +2211,7 @@ void amdgpu_dpm_thermal_work_handler(struct work_struct *work)
        if (!adev->pm.dpm_enabled)
                return;
 
-       if (adev->powerplay.pp_funcs &&
-           adev->powerplay.pp_funcs->read_sensor &&
-           !amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_GPU_TEMP,
+       if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_GPU_TEMP,
                                    (void *)&temp, &size)) {
                if (temp < adev->pm.dpm.thermal.min_temp)
                        /* switch back the user state */
@@ -2267,7 +2437,13 @@ static void amdgpu_dpm_change_power_state_locked(struct amdgpu_device *adev)
 
 void amdgpu_dpm_enable_uvd(struct amdgpu_device *adev, bool enable)
 {
-       if (adev->powerplay.pp_funcs->set_powergating_by_smu) {
+       int ret = 0;
+       if (is_support_sw_smu(adev)) {
+           ret = smu_dpm_set_power_gate(&adev->smu, AMD_IP_BLOCK_TYPE_UVD, enable);
+           if (ret)
+               DRM_ERROR("[SW SMU]: dpm enable uvd failed, state = %s, ret = %d. \n",
+                         enable ? "true" : "false", ret);
+       } else if (adev->powerplay.pp_funcs->set_powergating_by_smu) {
                /* enable/disable UVD */
                mutex_lock(&adev->pm.mutex);
                amdgpu_dpm_set_powergating_by_smu(adev, AMD_IP_BLOCK_TYPE_UVD, !enable);
@@ -2288,7 +2464,13 @@ void amdgpu_dpm_enable_uvd(struct amdgpu_device *adev, bool enable)
 
 void amdgpu_dpm_enable_vce(struct amdgpu_device *adev, bool enable)
 {
-       if (adev->powerplay.pp_funcs->set_powergating_by_smu) {
+       int ret = 0;
+       if (is_support_sw_smu(adev)) {
+           ret = smu_dpm_set_power_gate(&adev->smu, AMD_IP_BLOCK_TYPE_VCE, enable);
+           if (ret)
+               DRM_ERROR("[SW SMU]: dpm enable vce failed, state = %s, ret = %d. \n",
+                         enable ? "true" : "false", ret);
+       } else if (adev->powerplay.pp_funcs->set_powergating_by_smu) {
                /* enable/disable VCE */
                mutex_lock(&adev->pm.mutex);
                amdgpu_dpm_set_powergating_by_smu(adev, AMD_IP_BLOCK_TYPE_VCE, !enable);
@@ -2413,7 +2595,8 @@ int amdgpu_pm_sysfs_init(struct amdgpu_device *adev)
                                "pp_power_profile_mode\n");
                return ret;
        }
-       if (hwmgr->od_enabled) {
+       if ((is_support_sw_smu(adev) && adev->smu.od_enabled) ||
+           (!is_support_sw_smu(adev) && hwmgr->od_enabled)) {
                ret = device_create_file(adev->dev,
                                &dev_attr_pp_od_clk_voltage);
                if (ret) {
@@ -2489,7 +2672,8 @@ void amdgpu_pm_sysfs_fini(struct amdgpu_device *adev)
        device_remove_file(adev->dev, &dev_attr_pp_mclk_od);
        device_remove_file(adev->dev,
                        &dev_attr_pp_power_profile_mode);
-       if (hwmgr->od_enabled)
+       if ((is_support_sw_smu(adev) && adev->smu.od_enabled) ||
+           (!is_support_sw_smu(adev) && hwmgr->od_enabled))
                device_remove_file(adev->dev,
                                &dev_attr_pp_od_clk_voltage);
        device_remove_file(adev->dev, &dev_attr_gpu_busy_percent);
@@ -2516,28 +2700,38 @@ void amdgpu_pm_compute_clocks(struct amdgpu_device *adev)
                        amdgpu_fence_wait_empty(ring);
        }
 
-       if (adev->powerplay.pp_funcs->dispatch_tasks) {
-               if (!amdgpu_device_has_dc_support(adev)) {
+       if (is_support_sw_smu(adev)) {
+               struct smu_context *smu = &adev->smu;
+               struct smu_dpm_context *smu_dpm = &adev->smu.smu_dpm;
+               mutex_lock(&(smu->mutex));
+               smu_handle_task(&adev->smu,
+                               smu_dpm->dpm_level,
+                               AMD_PP_TASK_DISPLAY_CONFIG_CHANGE);
+               mutex_unlock(&(smu->mutex));
+       } else {
+               if (adev->powerplay.pp_funcs->dispatch_tasks) {
+                       if (!amdgpu_device_has_dc_support(adev)) {
+                               mutex_lock(&adev->pm.mutex);
+                               amdgpu_dpm_get_active_displays(adev);
+                               adev->pm.pm_display_cfg.num_display = adev->pm.dpm.new_active_crtc_count;
+                               adev->pm.pm_display_cfg.vrefresh = amdgpu_dpm_get_vrefresh(adev);
+                               adev->pm.pm_display_cfg.min_vblank_time = amdgpu_dpm_get_vblank_time(adev);
+                               /* we have issues with mclk switching with refresh rates over 120 hz on the non-DC code. */
+                               if (adev->pm.pm_display_cfg.vrefresh > 120)
+                                       adev->pm.pm_display_cfg.min_vblank_time = 0;
+                               if (adev->powerplay.pp_funcs->display_configuration_change)
+                                       adev->powerplay.pp_funcs->display_configuration_change(
+                                                                       adev->powerplay.pp_handle,
+                                                                       &adev->pm.pm_display_cfg);
+                               mutex_unlock(&adev->pm.mutex);
+                       }
+                       amdgpu_dpm_dispatch_task(adev, AMD_PP_TASK_DISPLAY_CONFIG_CHANGE, NULL);
+               } else {
                        mutex_lock(&adev->pm.mutex);
                        amdgpu_dpm_get_active_displays(adev);
-                       adev->pm.pm_display_cfg.num_display = adev->pm.dpm.new_active_crtc_count;
-                       adev->pm.pm_display_cfg.vrefresh = amdgpu_dpm_get_vrefresh(adev);
-                       adev->pm.pm_display_cfg.min_vblank_time = amdgpu_dpm_get_vblank_time(adev);
-                       /* we have issues with mclk switching with refresh rates over 120 hz on the non-DC code. */
-                       if (adev->pm.pm_display_cfg.vrefresh > 120)
-                               adev->pm.pm_display_cfg.min_vblank_time = 0;
-                       if (adev->powerplay.pp_funcs->display_configuration_change)
-                               adev->powerplay.pp_funcs->display_configuration_change(
-                                                               adev->powerplay.pp_handle,
-                                                               &adev->pm.pm_display_cfg);
+                       amdgpu_dpm_change_power_state_locked(adev);
                        mutex_unlock(&adev->pm.mutex);
                }
-               amdgpu_dpm_dispatch_task(adev, AMD_PP_TASK_DISPLAY_CONFIG_CHANGE, NULL);
-       } else {
-               mutex_lock(&adev->pm.mutex);
-               amdgpu_dpm_get_active_displays(adev);
-               amdgpu_dpm_change_power_state_locked(adev);
-               mutex_unlock(&adev->pm.mutex);
        }
 }
 
@@ -2553,11 +2747,6 @@ static int amdgpu_debugfs_pm_info_pp(struct seq_file *m, struct amdgpu_device *a
        uint32_t query = 0;
        int size;
 
-       /* sanity check PP is enabled */
-       if (!(adev->powerplay.pp_funcs &&
-             adev->powerplay.pp_funcs->read_sensor))
-             return -EINVAL;
-
        /* GPU Clocks */
        size = sizeof(value);
        seq_printf(m, "GFX Clocks and Power:\n");
@@ -2649,7 +2838,7 @@ static int amdgpu_debugfs_pm_info(struct seq_file *m, void *data)
        if  ((adev->flags & AMD_IS_PX) &&
             (ddev->switch_power_state != DRM_SWITCH_POWER_ON)) {
                seq_printf(m, "PX asic powered off\n");
-       } else if (adev->powerplay.pp_funcs->debugfs_print_current_performance_level) {
+       } else if (!is_support_sw_smu(adev) && adev->powerplay.pp_funcs->debugfs_print_current_performance_level) {
                mutex_lock(&adev->pm.mutex);
                if (adev->powerplay.pp_funcs->debugfs_print_current_performance_level)
                        adev->powerplay.pp_funcs->debugfs_print_current_performance_level(adev, m);
This page took 0.065645 seconds and 4 git commands to generate.