X-Git-Url: https://repo.jachan.dev/linux.git/blobdiff_plain/ad88f0517b239a08b148fbd300de103e8f7a78ef..bcd1739788e2ea111d0d2efe1ed6633d9f6a20da:/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c index 7fe0330ca319..34471dbaa872 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c @@ -144,7 +144,9 @@ static ssize_t amdgpu_get_dpm_state(struct device *dev, struct amdgpu_device *adev = ddev->dev_private; enum amd_pm_state_type pm; - if (adev->powerplay.pp_funcs->get_current_power_state) + if (is_support_sw_smu(adev) && adev->smu.ppt_funcs->get_current_power_state) + pm = amdgpu_smu_get_current_power_state(adev); + else if (adev->powerplay.pp_funcs->get_current_power_state) pm = amdgpu_dpm_get_current_power_state(adev); else pm = adev->pm.dpm.user_state; @@ -262,7 +264,9 @@ static ssize_t amdgpu_get_dpm_forced_performance_level(struct device *dev, (ddev->switch_power_state != DRM_SWITCH_POWER_ON)) return snprintf(buf, PAGE_SIZE, "off\n"); - if (adev->powerplay.pp_funcs->get_performance_level) + if (is_support_sw_smu(adev)) + level = smu_get_performance_level(&adev->smu); + else if (adev->powerplay.pp_funcs->get_performance_level) level = amdgpu_dpm_get_performance_level(adev); else level = adev->pm.dpm.forced_level; @@ -295,7 +299,9 @@ static ssize_t amdgpu_set_dpm_forced_performance_level(struct device *dev, (ddev->switch_power_state != DRM_SWITCH_POWER_ON)) return -EINVAL; - if (adev->powerplay.pp_funcs->get_performance_level) + if (is_support_sw_smu(adev)) + current_level = smu_get_performance_level(&adev->smu); + else if (adev->powerplay.pp_funcs->get_performance_level) current_level = amdgpu_dpm_get_performance_level(adev); if (strncmp("low", buf, strlen("low")) == 0) { @@ -321,10 +327,45 @@ static ssize_t amdgpu_set_dpm_forced_performance_level(struct device *dev, goto fail; } + if (amdgpu_sriov_vf(adev)) { + if (amdgim_is_hwperf(adev) && + adev->virt.ops->force_dpm_level) { + mutex_lock(&adev->pm.mutex); + adev->virt.ops->force_dpm_level(adev, level); + mutex_unlock(&adev->pm.mutex); + return count; + } else { + return -EINVAL; + } + } + if (current_level == level) return count; - if (adev->powerplay.pp_funcs->force_performance_level) { + /* profile_exit setting is valid only when current mode is in profile mode */ + if (!(current_level & (AMD_DPM_FORCED_LEVEL_PROFILE_STANDARD | + AMD_DPM_FORCED_LEVEL_PROFILE_MIN_SCLK | + AMD_DPM_FORCED_LEVEL_PROFILE_MIN_MCLK | + AMD_DPM_FORCED_LEVEL_PROFILE_PEAK)) && + (level == AMD_DPM_FORCED_LEVEL_PROFILE_EXIT)) { + pr_err("Currently not in any profile mode!\n"); + return -EINVAL; + } + + if (is_support_sw_smu(adev)) { + mutex_lock(&adev->pm.mutex); + if (adev->pm.dpm.thermal_active) { + count = -EINVAL; + mutex_unlock(&adev->pm.mutex); + goto fail; + } + ret = smu_force_performance_level(&adev->smu, level); + if (ret) + count = -EINVAL; + else + adev->pm.dpm.forced_level = level; + mutex_unlock(&adev->pm.mutex); + } else if (adev->powerplay.pp_funcs->force_performance_level) { mutex_lock(&adev->pm.mutex); if (adev->pm.dpm.thermal_active) { count = -EINVAL; @@ -630,19 +671,29 @@ static ssize_t amdgpu_set_pp_od_clk_voltage(struct device *dev, tmp_str++; } - if (adev->powerplay.pp_funcs->odn_edit_dpm_table) - ret = amdgpu_dpm_odn_edit_dpm_table(adev, type, - parameter, parameter_size); + if (is_support_sw_smu(adev)) { + ret = smu_od_edit_dpm_table(&adev->smu, type, + parameter, parameter_size); - if (ret) - return -EINVAL; + if (ret) + return -EINVAL; + } else { + if (adev->powerplay.pp_funcs->odn_edit_dpm_table) + ret = amdgpu_dpm_odn_edit_dpm_table(adev, type, + parameter, parameter_size); - if (type == PP_OD_COMMIT_DPM_TABLE) { - if (adev->powerplay.pp_funcs->dispatch_tasks) { - amdgpu_dpm_dispatch_task(adev, AMD_PP_TASK_READJUST_POWER_STATE, NULL); - return count; - } else { + if (ret) return -EINVAL; + + if (type == PP_OD_COMMIT_DPM_TABLE) { + if (adev->powerplay.pp_funcs->dispatch_tasks) { + amdgpu_dpm_dispatch_task(adev, + AMD_PP_TASK_READJUST_POWER_STATE, + NULL); + return count; + } else { + return -EINVAL; + } } } @@ -761,6 +812,10 @@ static ssize_t amdgpu_get_pp_dpm_sclk(struct device *dev, struct drm_device *ddev = dev_get_drvdata(dev); struct amdgpu_device *adev = ddev->dev_private; + if (amdgpu_sriov_vf(adev) && amdgim_is_hwperf(adev) && + adev->virt.ops->get_pp_clk) + return adev->virt.ops->get_pp_clk(adev, PP_SCLK, buf); + if (is_support_sw_smu(adev)) return smu_print_clk_levels(&adev->smu, PP_SCLK, buf); else if (adev->powerplay.pp_funcs->print_clock_levels) @@ -877,7 +932,9 @@ static ssize_t amdgpu_get_pp_dpm_socclk(struct device *dev, struct drm_device *ddev = dev_get_drvdata(dev); struct amdgpu_device *adev = ddev->dev_private; - if (adev->powerplay.pp_funcs->print_clock_levels) + if (is_support_sw_smu(adev)) + return smu_print_clk_levels(&adev->smu, PP_SOCCLK, buf); + else if (adev->powerplay.pp_funcs->print_clock_levels) return amdgpu_dpm_print_clock_levels(adev, PP_SOCCLK, buf); else return snprintf(buf, PAGE_SIZE, "\n"); @@ -897,7 +954,9 @@ static ssize_t amdgpu_set_pp_dpm_socclk(struct device *dev, if (ret) return ret; - if (adev->powerplay.pp_funcs->force_clock_level) + if (is_support_sw_smu(adev)) + ret = smu_force_clk_levels(&adev->smu, PP_SOCCLK, mask); + else if (adev->powerplay.pp_funcs->force_clock_level) ret = amdgpu_dpm_force_clock_level(adev, PP_SOCCLK, mask); if (ret) @@ -913,7 +972,9 @@ static ssize_t amdgpu_get_pp_dpm_fclk(struct device *dev, struct drm_device *ddev = dev_get_drvdata(dev); struct amdgpu_device *adev = ddev->dev_private; - if (adev->powerplay.pp_funcs->print_clock_levels) + if (is_support_sw_smu(adev)) + return smu_print_clk_levels(&adev->smu, PP_FCLK, buf); + else if (adev->powerplay.pp_funcs->print_clock_levels) return amdgpu_dpm_print_clock_levels(adev, PP_FCLK, buf); else return snprintf(buf, PAGE_SIZE, "\n"); @@ -933,7 +994,9 @@ static ssize_t amdgpu_set_pp_dpm_fclk(struct device *dev, if (ret) return ret; - if (adev->powerplay.pp_funcs->force_clock_level) + if (is_support_sw_smu(adev)) + ret = smu_force_clk_levels(&adev->smu, PP_FCLK, mask); + else if (adev->powerplay.pp_funcs->force_clock_level) ret = amdgpu_dpm_force_clock_level(adev, PP_FCLK, mask); if (ret) @@ -949,7 +1012,9 @@ static ssize_t amdgpu_get_pp_dpm_dcefclk(struct device *dev, struct drm_device *ddev = dev_get_drvdata(dev); struct amdgpu_device *adev = ddev->dev_private; - if (adev->powerplay.pp_funcs->print_clock_levels) + if (is_support_sw_smu(adev)) + return smu_print_clk_levels(&adev->smu, PP_DCEFCLK, buf); + else if (adev->powerplay.pp_funcs->print_clock_levels) return amdgpu_dpm_print_clock_levels(adev, PP_DCEFCLK, buf); else return snprintf(buf, PAGE_SIZE, "\n"); @@ -969,7 +1034,9 @@ static ssize_t amdgpu_set_pp_dpm_dcefclk(struct device *dev, if (ret) return ret; - if (adev->powerplay.pp_funcs->force_clock_level) + if (is_support_sw_smu(adev)) + ret = smu_force_clk_levels(&adev->smu, PP_DCEFCLK, mask); + else if (adev->powerplay.pp_funcs->force_clock_level) ret = amdgpu_dpm_force_clock_level(adev, PP_DCEFCLK, mask); if (ret) @@ -985,7 +1052,9 @@ static ssize_t amdgpu_get_pp_dpm_pcie(struct device *dev, struct drm_device *ddev = dev_get_drvdata(dev); struct amdgpu_device *adev = ddev->dev_private; - if (adev->powerplay.pp_funcs->print_clock_levels) + if (is_support_sw_smu(adev)) + return smu_print_clk_levels(&adev->smu, PP_PCIE, buf); + else if (adev->powerplay.pp_funcs->print_clock_levels) return amdgpu_dpm_print_clock_levels(adev, PP_PCIE, buf); else return snprintf(buf, PAGE_SIZE, "\n"); @@ -1005,7 +1074,9 @@ static ssize_t amdgpu_set_pp_dpm_pcie(struct device *dev, if (ret) return ret; - if (adev->powerplay.pp_funcs->force_clock_level) + if (is_support_sw_smu(adev)) + ret = smu_force_clk_levels(&adev->smu, PP_PCIE, mask); + else if (adev->powerplay.pp_funcs->force_clock_level) ret = amdgpu_dpm_force_clock_level(adev, PP_PCIE, mask); if (ret) @@ -1046,14 +1117,19 @@ static ssize_t amdgpu_set_pp_sclk_od(struct device *dev, count = -EINVAL; goto fail; } - if (adev->powerplay.pp_funcs->set_sclk_od) - amdgpu_dpm_set_sclk_od(adev, (uint32_t)value); - if (adev->powerplay.pp_funcs->dispatch_tasks) { - amdgpu_dpm_dispatch_task(adev, AMD_PP_TASK_READJUST_POWER_STATE, NULL); + if (is_support_sw_smu(adev)) { + value = smu_set_od_percentage(&(adev->smu), OD_SCLK, (uint32_t)value); } else { - adev->pm.dpm.current_ps = adev->pm.dpm.boot_ps; - amdgpu_pm_compute_clocks(adev); + if (adev->powerplay.pp_funcs->set_sclk_od) + amdgpu_dpm_set_sclk_od(adev, (uint32_t)value); + + if (adev->powerplay.pp_funcs->dispatch_tasks) { + amdgpu_dpm_dispatch_task(adev, AMD_PP_TASK_READJUST_POWER_STATE, NULL); + } else { + adev->pm.dpm.current_ps = adev->pm.dpm.boot_ps; + amdgpu_pm_compute_clocks(adev); + } } fail: @@ -1092,14 +1168,19 @@ static ssize_t amdgpu_set_pp_mclk_od(struct device *dev, count = -EINVAL; goto fail; } - if (adev->powerplay.pp_funcs->set_mclk_od) - amdgpu_dpm_set_mclk_od(adev, (uint32_t)value); - if (adev->powerplay.pp_funcs->dispatch_tasks) { - amdgpu_dpm_dispatch_task(adev, AMD_PP_TASK_READJUST_POWER_STATE, NULL); + if (is_support_sw_smu(adev)) { + value = smu_set_od_percentage(&(adev->smu), OD_MCLK, (uint32_t)value); } else { - adev->pm.dpm.current_ps = adev->pm.dpm.boot_ps; - amdgpu_pm_compute_clocks(adev); + if (adev->powerplay.pp_funcs->set_mclk_od) + amdgpu_dpm_set_mclk_od(adev, (uint32_t)value); + + if (adev->powerplay.pp_funcs->dispatch_tasks) { + amdgpu_dpm_dispatch_task(adev, AMD_PP_TASK_READJUST_POWER_STATE, NULL); + } else { + adev->pm.dpm.current_ps = adev->pm.dpm.boot_ps; + amdgpu_pm_compute_clocks(adev); + } } fail: @@ -1133,7 +1214,9 @@ static ssize_t amdgpu_get_pp_power_profile_mode(struct device *dev, struct drm_device *ddev = dev_get_drvdata(dev); struct amdgpu_device *adev = ddev->dev_private; - if (adev->powerplay.pp_funcs->get_power_profile_mode) + if (is_support_sw_smu(adev)) + return smu_get_power_profile_mode(&adev->smu, buf); + else if (adev->powerplay.pp_funcs->get_power_profile_mode) return amdgpu_dpm_get_power_profile_mode(adev, buf); return snprintf(buf, PAGE_SIZE, "\n"); @@ -1183,9 +1266,10 @@ static ssize_t amdgpu_set_pp_power_profile_mode(struct device *dev, } } parameter[parameter_size] = profile_mode; - if (adev->powerplay.pp_funcs->set_power_profile_mode) + if (is_support_sw_smu(adev)) + ret = smu_set_power_profile_mode(&adev->smu, parameter, parameter_size); + else if (adev->powerplay.pp_funcs->set_power_profile_mode) ret = amdgpu_dpm_set_power_profile_mode(adev, parameter, parameter_size); - if (!ret) return count; fail: @@ -1336,11 +1420,14 @@ static ssize_t amdgpu_hwmon_get_pwm1_enable(struct device *dev, { struct amdgpu_device *adev = dev_get_drvdata(dev); u32 pwm_mode = 0; + if (is_support_sw_smu(adev)) { + pwm_mode = smu_get_fan_control_mode(&adev->smu); + } else { + if (!adev->powerplay.pp_funcs->get_fan_control_mode) + return -EINVAL; - if (!adev->powerplay.pp_funcs->get_fan_control_mode) - return -EINVAL; - - pwm_mode = amdgpu_dpm_get_fan_control_mode(adev); + pwm_mode = amdgpu_dpm_get_fan_control_mode(adev); + } return sprintf(buf, "%i\n", pwm_mode); } @@ -1359,14 +1446,22 @@ static ssize_t amdgpu_hwmon_set_pwm1_enable(struct device *dev, (adev->ddev->switch_power_state != DRM_SWITCH_POWER_ON)) return -EINVAL; - if (!adev->powerplay.pp_funcs->set_fan_control_mode) - return -EINVAL; + if (is_support_sw_smu(adev)) { + err = kstrtoint(buf, 10, &value); + if (err) + return err; - err = kstrtoint(buf, 10, &value); - if (err) - return err; + smu_set_fan_control_mode(&adev->smu, value); + } else { + if (!adev->powerplay.pp_funcs->set_fan_control_mode) + return -EINVAL; - amdgpu_dpm_set_fan_control_mode(adev, value); + err = kstrtoint(buf, 10, &value); + if (err) + return err; + + amdgpu_dpm_set_fan_control_mode(adev, value); + } return count; } @@ -1398,8 +1493,10 @@ static ssize_t amdgpu_hwmon_set_pwm1(struct device *dev, if ((adev->flags & AMD_IS_PX) && (adev->ddev->switch_power_state != DRM_SWITCH_POWER_ON)) return -EINVAL; - - pwm_mode = amdgpu_dpm_get_fan_control_mode(adev); + if (is_support_sw_smu(adev)) + pwm_mode = smu_get_fan_control_mode(&adev->smu); + else + pwm_mode = amdgpu_dpm_get_fan_control_mode(adev); if (pwm_mode != AMD_FAN_CTRL_MANUAL) { pr_info("manual fan speed control should be enabled first\n"); return -EINVAL; @@ -1411,7 +1508,11 @@ static ssize_t amdgpu_hwmon_set_pwm1(struct device *dev, value = (value * 100) / 255; - if (adev->powerplay.pp_funcs->set_fan_speed_percent) { + if (is_support_sw_smu(adev)) { + err = smu_set_fan_speed_percent(&adev->smu, value); + if (err) + return err; + } else if (adev->powerplay.pp_funcs->set_fan_speed_percent) { err = amdgpu_dpm_set_fan_speed_percent(adev, value); if (err) return err; @@ -1433,7 +1534,11 @@ static ssize_t amdgpu_hwmon_get_pwm1(struct device *dev, (adev->ddev->switch_power_state != DRM_SWITCH_POWER_ON)) return -EINVAL; - if (adev->powerplay.pp_funcs->get_fan_speed_percent) { + if (is_support_sw_smu(adev)) { + err = smu_get_fan_speed_percent(&adev->smu, &speed); + if (err) + return err; + } else if (adev->powerplay.pp_funcs->get_fan_speed_percent) { err = amdgpu_dpm_get_fan_speed_percent(adev, &speed); if (err) return err; @@ -1457,7 +1562,11 @@ static ssize_t amdgpu_hwmon_get_fan1_input(struct device *dev, (adev->ddev->switch_power_state != DRM_SWITCH_POWER_ON)) return -EINVAL; - if (adev->powerplay.pp_funcs->get_fan_speed_rpm) { + if (is_support_sw_smu(adev)) { + err = smu_get_current_rpm(&adev->smu, &speed); + if (err) + return err; + } else if (adev->powerplay.pp_funcs->get_fan_speed_rpm) { err = amdgpu_dpm_get_fan_speed_rpm(adev, &speed); if (err) return err; @@ -1513,7 +1622,11 @@ static ssize_t amdgpu_hwmon_get_fan1_target(struct device *dev, (adev->ddev->switch_power_state != DRM_SWITCH_POWER_ON)) return -EINVAL; - if (adev->powerplay.pp_funcs->get_fan_speed_rpm) { + if (is_support_sw_smu(adev)) { + err = smu_get_current_rpm(&adev->smu, &rpm); + if (err) + return err; + } else if (adev->powerplay.pp_funcs->get_fan_speed_rpm) { err = amdgpu_dpm_get_fan_speed_rpm(adev, &rpm); if (err) return err; @@ -1531,7 +1644,11 @@ static ssize_t amdgpu_hwmon_set_fan1_target(struct device *dev, u32 value; u32 pwm_mode; - pwm_mode = amdgpu_dpm_get_fan_control_mode(adev); + if (is_support_sw_smu(adev)) + pwm_mode = smu_get_fan_control_mode(&adev->smu); + else + pwm_mode = amdgpu_dpm_get_fan_control_mode(adev); + if (pwm_mode != AMD_FAN_CTRL_MANUAL) return -ENODATA; @@ -1544,7 +1661,11 @@ static ssize_t amdgpu_hwmon_set_fan1_target(struct device *dev, if (err) return err; - if (adev->powerplay.pp_funcs->set_fan_speed_rpm) { + if (is_support_sw_smu(adev)) { + err = smu_set_fan_speed_rpm(&adev->smu, value); + if (err) + return err; + } else if (adev->powerplay.pp_funcs->set_fan_speed_rpm) { err = amdgpu_dpm_set_fan_speed_rpm(adev, value); if (err) return err; @@ -1560,11 +1681,14 @@ static ssize_t amdgpu_hwmon_get_fan1_enable(struct device *dev, struct amdgpu_device *adev = dev_get_drvdata(dev); u32 pwm_mode = 0; - if (!adev->powerplay.pp_funcs->get_fan_control_mode) - return -EINVAL; - - pwm_mode = amdgpu_dpm_get_fan_control_mode(adev); + if (is_support_sw_smu(adev)) { + pwm_mode = smu_get_fan_control_mode(&adev->smu); + } else { + if (!adev->powerplay.pp_funcs->get_fan_control_mode) + return -EINVAL; + pwm_mode = amdgpu_dpm_get_fan_control_mode(adev); + } return sprintf(buf, "%i\n", pwm_mode == AMD_FAN_CTRL_AUTO ? 0 : 1); } @@ -1583,8 +1707,6 @@ static ssize_t amdgpu_hwmon_set_fan1_enable(struct device *dev, (adev->ddev->switch_power_state != DRM_SWITCH_POWER_ON)) return -EINVAL; - if (!adev->powerplay.pp_funcs->set_fan_control_mode) - return -EINVAL; err = kstrtoint(buf, 10, &value); if (err) @@ -1597,7 +1719,13 @@ static ssize_t amdgpu_hwmon_set_fan1_enable(struct device *dev, else return -EINVAL; - amdgpu_dpm_set_fan_control_mode(adev, pwm_mode); + if (is_support_sw_smu(adev)) { + smu_set_fan_control_mode(&adev->smu, pwm_mode); + } else { + if (!adev->powerplay.pp_funcs->set_fan_control_mode) + return -EINVAL; + amdgpu_dpm_set_fan_control_mode(adev, pwm_mode); + } return count; } @@ -1707,7 +1835,10 @@ static ssize_t amdgpu_hwmon_show_power_cap_max(struct device *dev, struct amdgpu_device *adev = dev_get_drvdata(dev); uint32_t limit = 0; - if (adev->powerplay.pp_funcs && adev->powerplay.pp_funcs->get_power_limit) { + if (is_support_sw_smu(adev)) { + smu_get_power_limit(&adev->smu, &limit, true); + return snprintf(buf, PAGE_SIZE, "%u\n", limit * 1000000); + } else if (adev->powerplay.pp_funcs && adev->powerplay.pp_funcs->get_power_limit) { adev->powerplay.pp_funcs->get_power_limit(adev->powerplay.pp_handle, &limit, true); return snprintf(buf, PAGE_SIZE, "%u\n", limit * 1000000); } else { @@ -1722,7 +1853,10 @@ static ssize_t amdgpu_hwmon_show_power_cap(struct device *dev, struct amdgpu_device *adev = dev_get_drvdata(dev); uint32_t limit = 0; - if (adev->powerplay.pp_funcs && adev->powerplay.pp_funcs->get_power_limit) { + if (is_support_sw_smu(adev)) { + smu_get_power_limit(&adev->smu, &limit, false); + return snprintf(buf, PAGE_SIZE, "%u\n", limit * 1000000); + } else if (adev->powerplay.pp_funcs && adev->powerplay.pp_funcs->get_power_limit) { adev->powerplay.pp_funcs->get_power_limit(adev->powerplay.pp_handle, &limit, false); return snprintf(buf, PAGE_SIZE, "%u\n", limit * 1000000); } else { @@ -1745,7 +1879,9 @@ static ssize_t amdgpu_hwmon_set_power_cap(struct device *dev, return err; value = value / 1000000; /* convert to Watt */ - if (adev->powerplay.pp_funcs && adev->powerplay.pp_funcs->set_power_limit) { + if (is_support_sw_smu(adev)) { + adev->smu.funcs->set_power_limit(&adev->smu, value); + } else if (adev->powerplay.pp_funcs && adev->powerplay.pp_funcs->set_power_limit) { err = adev->powerplay.pp_funcs->set_power_limit(adev->powerplay.pp_handle, value); if (err) return err; @@ -1999,18 +2135,20 @@ static umode_t hwmon_attributes_visible(struct kobject *kobj, attr == &sensor_dev_attr_fan1_enable.dev_attr.attr)) return 0; - /* mask fan attributes if we have no bindings for this asic to expose */ - if ((!adev->powerplay.pp_funcs->get_fan_speed_percent && - attr == &sensor_dev_attr_pwm1.dev_attr.attr) || /* can't query fan */ - (!adev->powerplay.pp_funcs->get_fan_control_mode && - attr == &sensor_dev_attr_pwm1_enable.dev_attr.attr)) /* can't query state */ - effective_mode &= ~S_IRUGO; + if (!is_support_sw_smu(adev)) { + /* mask fan attributes if we have no bindings for this asic to expose */ + if ((!adev->powerplay.pp_funcs->get_fan_speed_percent && + attr == &sensor_dev_attr_pwm1.dev_attr.attr) || /* can't query fan */ + (!adev->powerplay.pp_funcs->get_fan_control_mode && + attr == &sensor_dev_attr_pwm1_enable.dev_attr.attr)) /* can't query state */ + effective_mode &= ~S_IRUGO; - if ((!adev->powerplay.pp_funcs->set_fan_speed_percent && - attr == &sensor_dev_attr_pwm1.dev_attr.attr) || /* can't manage fan */ - (!adev->powerplay.pp_funcs->set_fan_control_mode && - attr == &sensor_dev_attr_pwm1_enable.dev_attr.attr)) /* can't manage state */ - effective_mode &= ~S_IWUSR; + if ((!adev->powerplay.pp_funcs->set_fan_speed_percent && + attr == &sensor_dev_attr_pwm1.dev_attr.attr) || /* can't manage fan */ + (!adev->powerplay.pp_funcs->set_fan_control_mode && + attr == &sensor_dev_attr_pwm1_enable.dev_attr.attr)) /* can't manage state */ + effective_mode &= ~S_IWUSR; + } if ((adev->flags & AMD_IS_APU) && (attr == &sensor_dev_attr_power1_average.dev_attr.attr || @@ -2019,20 +2157,22 @@ static umode_t hwmon_attributes_visible(struct kobject *kobj, attr == &sensor_dev_attr_power1_cap.dev_attr.attr)) return 0; - /* hide max/min values if we can't both query and manage the fan */ - if ((!adev->powerplay.pp_funcs->set_fan_speed_percent && - !adev->powerplay.pp_funcs->get_fan_speed_percent) && - (!adev->powerplay.pp_funcs->set_fan_speed_rpm && - !adev->powerplay.pp_funcs->get_fan_speed_rpm) && - (attr == &sensor_dev_attr_pwm1_max.dev_attr.attr || - attr == &sensor_dev_attr_pwm1_min.dev_attr.attr)) - return 0; + if (!is_support_sw_smu(adev)) { + /* hide max/min values if we can't both query and manage the fan */ + if ((!adev->powerplay.pp_funcs->set_fan_speed_percent && + !adev->powerplay.pp_funcs->get_fan_speed_percent) && + (!adev->powerplay.pp_funcs->set_fan_speed_rpm && + !adev->powerplay.pp_funcs->get_fan_speed_rpm) && + (attr == &sensor_dev_attr_pwm1_max.dev_attr.attr || + attr == &sensor_dev_attr_pwm1_min.dev_attr.attr)) + return 0; - if ((!adev->powerplay.pp_funcs->set_fan_speed_rpm && - !adev->powerplay.pp_funcs->get_fan_speed_rpm) && - (attr == &sensor_dev_attr_fan1_max.dev_attr.attr || - attr == &sensor_dev_attr_fan1_min.dev_attr.attr)) - return 0; + if ((!adev->powerplay.pp_funcs->set_fan_speed_rpm && + !adev->powerplay.pp_funcs->get_fan_speed_rpm) && + (attr == &sensor_dev_attr_fan1_max.dev_attr.attr || + attr == &sensor_dev_attr_fan1_min.dev_attr.attr)) + return 0; + } /* only APUs have vddnb */ if (!(adev->flags & AMD_IS_APU) && @@ -2297,7 +2437,13 @@ static void amdgpu_dpm_change_power_state_locked(struct amdgpu_device *adev) void amdgpu_dpm_enable_uvd(struct amdgpu_device *adev, bool enable) { - if (adev->powerplay.pp_funcs->set_powergating_by_smu) { + int ret = 0; + if (is_support_sw_smu(adev)) { + ret = smu_dpm_set_power_gate(&adev->smu, AMD_IP_BLOCK_TYPE_UVD, enable); + if (ret) + DRM_ERROR("[SW SMU]: dpm enable uvd failed, state = %s, ret = %d. \n", + enable ? "true" : "false", ret); + } else if (adev->powerplay.pp_funcs->set_powergating_by_smu) { /* enable/disable UVD */ mutex_lock(&adev->pm.mutex); amdgpu_dpm_set_powergating_by_smu(adev, AMD_IP_BLOCK_TYPE_UVD, !enable); @@ -2318,7 +2464,13 @@ void amdgpu_dpm_enable_uvd(struct amdgpu_device *adev, bool enable) void amdgpu_dpm_enable_vce(struct amdgpu_device *adev, bool enable) { - if (adev->powerplay.pp_funcs->set_powergating_by_smu) { + int ret = 0; + if (is_support_sw_smu(adev)) { + ret = smu_dpm_set_power_gate(&adev->smu, AMD_IP_BLOCK_TYPE_VCE, enable); + if (ret) + DRM_ERROR("[SW SMU]: dpm enable vce failed, state = %s, ret = %d. \n", + enable ? "true" : "false", ret); + } else if (adev->powerplay.pp_funcs->set_powergating_by_smu) { /* enable/disable VCE */ mutex_lock(&adev->pm.mutex); amdgpu_dpm_set_powergating_by_smu(adev, AMD_IP_BLOCK_TYPE_VCE, !enable); @@ -2443,7 +2595,8 @@ int amdgpu_pm_sysfs_init(struct amdgpu_device *adev) "pp_power_profile_mode\n"); return ret; } - if (is_support_sw_smu(adev) || hwmgr->od_enabled) { + if ((is_support_sw_smu(adev) && adev->smu.od_enabled) || + (!is_support_sw_smu(adev) && hwmgr->od_enabled)) { ret = device_create_file(adev->dev, &dev_attr_pp_od_clk_voltage); if (ret) { @@ -2519,7 +2672,8 @@ void amdgpu_pm_sysfs_fini(struct amdgpu_device *adev) device_remove_file(adev->dev, &dev_attr_pp_mclk_od); device_remove_file(adev->dev, &dev_attr_pp_power_profile_mode); - if (hwmgr->od_enabled) + if ((is_support_sw_smu(adev) && adev->smu.od_enabled) || + (!is_support_sw_smu(adev) && hwmgr->od_enabled)) device_remove_file(adev->dev, &dev_attr_pp_od_clk_voltage); device_remove_file(adev->dev, &dev_attr_gpu_busy_percent); @@ -2546,28 +2700,38 @@ void amdgpu_pm_compute_clocks(struct amdgpu_device *adev) amdgpu_fence_wait_empty(ring); } - if (adev->powerplay.pp_funcs->dispatch_tasks) { - if (!amdgpu_device_has_dc_support(adev)) { + if (is_support_sw_smu(adev)) { + struct smu_context *smu = &adev->smu; + struct smu_dpm_context *smu_dpm = &adev->smu.smu_dpm; + mutex_lock(&(smu->mutex)); + smu_handle_task(&adev->smu, + smu_dpm->dpm_level, + AMD_PP_TASK_DISPLAY_CONFIG_CHANGE); + mutex_unlock(&(smu->mutex)); + } else { + if (adev->powerplay.pp_funcs->dispatch_tasks) { + if (!amdgpu_device_has_dc_support(adev)) { + mutex_lock(&adev->pm.mutex); + amdgpu_dpm_get_active_displays(adev); + adev->pm.pm_display_cfg.num_display = adev->pm.dpm.new_active_crtc_count; + adev->pm.pm_display_cfg.vrefresh = amdgpu_dpm_get_vrefresh(adev); + adev->pm.pm_display_cfg.min_vblank_time = amdgpu_dpm_get_vblank_time(adev); + /* we have issues with mclk switching with refresh rates over 120 hz on the non-DC code. */ + if (adev->pm.pm_display_cfg.vrefresh > 120) + adev->pm.pm_display_cfg.min_vblank_time = 0; + if (adev->powerplay.pp_funcs->display_configuration_change) + adev->powerplay.pp_funcs->display_configuration_change( + adev->powerplay.pp_handle, + &adev->pm.pm_display_cfg); + mutex_unlock(&adev->pm.mutex); + } + amdgpu_dpm_dispatch_task(adev, AMD_PP_TASK_DISPLAY_CONFIG_CHANGE, NULL); + } else { mutex_lock(&adev->pm.mutex); amdgpu_dpm_get_active_displays(adev); - adev->pm.pm_display_cfg.num_display = adev->pm.dpm.new_active_crtc_count; - adev->pm.pm_display_cfg.vrefresh = amdgpu_dpm_get_vrefresh(adev); - adev->pm.pm_display_cfg.min_vblank_time = amdgpu_dpm_get_vblank_time(adev); - /* we have issues with mclk switching with refresh rates over 120 hz on the non-DC code. */ - if (adev->pm.pm_display_cfg.vrefresh > 120) - adev->pm.pm_display_cfg.min_vblank_time = 0; - if (adev->powerplay.pp_funcs->display_configuration_change) - adev->powerplay.pp_funcs->display_configuration_change( - adev->powerplay.pp_handle, - &adev->pm.pm_display_cfg); + amdgpu_dpm_change_power_state_locked(adev); mutex_unlock(&adev->pm.mutex); } - amdgpu_dpm_dispatch_task(adev, AMD_PP_TASK_DISPLAY_CONFIG_CHANGE, NULL); - } else { - mutex_lock(&adev->pm.mutex); - amdgpu_dpm_get_active_displays(adev); - amdgpu_dpm_change_power_state_locked(adev); - mutex_unlock(&adev->pm.mutex); } } @@ -2674,7 +2838,7 @@ static int amdgpu_debugfs_pm_info(struct seq_file *m, void *data) if ((adev->flags & AMD_IS_PX) && (ddev->switch_power_state != DRM_SWITCH_POWER_ON)) { seq_printf(m, "PX asic powered off\n"); - } else if (adev->powerplay.pp_funcs->debugfs_print_current_performance_level) { + } else if (!is_support_sw_smu(adev) && adev->powerplay.pp_funcs->debugfs_print_current_performance_level) { mutex_lock(&adev->pm.mutex); if (adev->powerplay.pp_funcs->debugfs_print_current_performance_level) adev->powerplay.pp_funcs->debugfs_print_current_performance_level(adev, m);