tmp_str++;
while (isspace(*++tmp_str));
- while (tmp_str[0]) {
- sub_str = strsep(&tmp_str, delimiter);
+ while ((sub_str = strsep(&tmp_str, delimiter)) != NULL) {
ret = kstrtol(sub_str, 0, ¶meter[parameter_size]);
if (ret)
return -EINVAL;
memcpy(buf_cpy, buf, bytes);
buf_cpy[bytes] = '\0';
tmp = buf_cpy;
- while (tmp[0]) {
- sub_str = strsep(&tmp, delimiter);
+ while ((sub_str = strsep(&tmp, delimiter)) != NULL) {
if (strlen(sub_str)) {
ret = kstrtol(sub_str, 0, &level);
if (ret)
}
if (is_support_sw_smu(adev))
- ret = smu_force_clk_levels(&adev->smu, SMU_SCLK, mask, true);
+ ret = smu_force_clk_levels(&adev->smu, SMU_SCLK, mask);
else if (adev->powerplay.pp_funcs->force_clock_level)
ret = amdgpu_dpm_force_clock_level(adev, PP_SCLK, mask);
}
if (is_support_sw_smu(adev))
- ret = smu_force_clk_levels(&adev->smu, SMU_MCLK, mask, true);
+ ret = smu_force_clk_levels(&adev->smu, SMU_MCLK, mask);
else if (adev->powerplay.pp_funcs->force_clock_level)
ret = amdgpu_dpm_force_clock_level(adev, PP_MCLK, mask);
}
if (is_support_sw_smu(adev))
- ret = smu_force_clk_levels(&adev->smu, SMU_SOCCLK, mask, true);
+ ret = smu_force_clk_levels(&adev->smu, SMU_SOCCLK, mask);
else if (adev->powerplay.pp_funcs->force_clock_level)
ret = amdgpu_dpm_force_clock_level(adev, PP_SOCCLK, mask);
else
}
if (is_support_sw_smu(adev))
- ret = smu_force_clk_levels(&adev->smu, SMU_FCLK, mask, true);
+ ret = smu_force_clk_levels(&adev->smu, SMU_FCLK, mask);
else if (adev->powerplay.pp_funcs->force_clock_level)
ret = amdgpu_dpm_force_clock_level(adev, PP_FCLK, mask);
else
}
if (is_support_sw_smu(adev))
- ret = smu_force_clk_levels(&adev->smu, SMU_DCEFCLK, mask, true);
+ ret = smu_force_clk_levels(&adev->smu, SMU_DCEFCLK, mask);
else if (adev->powerplay.pp_funcs->force_clock_level)
ret = amdgpu_dpm_force_clock_level(adev, PP_DCEFCLK, mask);
else
}
if (is_support_sw_smu(adev))
- ret = smu_force_clk_levels(&adev->smu, SMU_PCIE, mask, true);
+ ret = smu_force_clk_levels(&adev->smu, SMU_PCIE, mask);
else if (adev->powerplay.pp_funcs->force_clock_level)
ret = amdgpu_dpm_force_clock_level(adev, PP_PCIE, mask);
else
i++;
memcpy(buf_cpy, buf, count-i);
tmp_str = buf_cpy;
- while (tmp_str[0]) {
- sub_str = strsep(&tmp_str, delimiter);
+ while ((sub_str = strsep(&tmp_str, delimiter)) != NULL) {
ret = kstrtol(sub_str, 0, ¶meter[parameter_size]);
if (ret)
return -EINVAL;
if (r)
return r;
- return snprintf(buf, PAGE_SIZE, "%d\n", sclk * 10 * 1000);
+ return snprintf(buf, PAGE_SIZE, "%u\n", sclk * 10 * 1000);
}
static ssize_t amdgpu_hwmon_show_sclk_label(struct device *dev,
if (r)
return r;
- return snprintf(buf, PAGE_SIZE, "%d\n", mclk * 10 * 1000);
+ return snprintf(buf, PAGE_SIZE, "%u\n", mclk * 10 * 1000);
}
static ssize_t amdgpu_hwmon_show_mclk_label(struct device *dev,
attr == &sensor_dev_attr_fan1_enable.dev_attr.attr))
return 0;
+ /* Skip crit temp on APU */
+ if ((adev->flags & AMD_IS_APU) && (adev->family >= AMDGPU_FAMILY_CZ) &&
+ (attr == &sensor_dev_attr_temp1_crit.dev_attr.attr ||
+ attr == &sensor_dev_attr_temp1_crit_hyst.dev_attr.attr))
+ return 0;
+
/* Skip limit attributes if DPM is not enabled */
if (!adev->pm.dpm_enabled &&
(attr == &sensor_dev_attr_temp1_crit.dev_attr.attr ||
{
int ret = 0;
- ret = amdgpu_dpm_set_powergating_by_smu(adev, AMD_IP_BLOCK_TYPE_UVD, !enable);
- if (ret)
- DRM_ERROR("Dpm %s uvd failed, ret = %d. \n",
- enable ? "enable" : "disable", ret);
-
- /* enable/disable Low Memory PState for UVD (4k videos) */
- if (adev->asic_type == CHIP_STONEY &&
- adev->uvd.decode_image_width >= WIDTH_4K) {
- struct pp_hwmgr *hwmgr = adev->powerplay.pp_handle;
+ if (adev->family == AMDGPU_FAMILY_SI) {
+ mutex_lock(&adev->pm.mutex);
+ if (enable) {
+ adev->pm.dpm.uvd_active = true;
+ adev->pm.dpm.state = POWER_STATE_TYPE_INTERNAL_UVD;
+ } else {
+ adev->pm.dpm.uvd_active = false;
+ }
+ mutex_unlock(&adev->pm.mutex);
- if (hwmgr && hwmgr->hwmgr_func &&
- hwmgr->hwmgr_func->update_nbdpm_pstate)
- hwmgr->hwmgr_func->update_nbdpm_pstate(hwmgr,
- !enable,
- true);
+ amdgpu_pm_compute_clocks(adev);
+ } else {
+ ret = amdgpu_dpm_set_powergating_by_smu(adev, AMD_IP_BLOCK_TYPE_UVD, !enable);
+ if (ret)
+ DRM_ERROR("Dpm %s uvd failed, ret = %d. \n",
+ enable ? "enable" : "disable", ret);
+
+ /* enable/disable Low Memory PState for UVD (4k videos) */
+ if (adev->asic_type == CHIP_STONEY &&
+ adev->uvd.decode_image_width >= WIDTH_4K) {
+ struct pp_hwmgr *hwmgr = adev->powerplay.pp_handle;
+
+ if (hwmgr && hwmgr->hwmgr_func &&
+ hwmgr->hwmgr_func->update_nbdpm_pstate)
+ hwmgr->hwmgr_func->update_nbdpm_pstate(hwmgr,
+ !enable,
+ true);
+ }
}
}
{
int ret = 0;
- ret = amdgpu_dpm_set_powergating_by_smu(adev, AMD_IP_BLOCK_TYPE_VCE, !enable);
- if (ret)
- DRM_ERROR("Dpm %s vce failed, ret = %d. \n",
- enable ? "enable" : "disable", ret);
+ if (adev->family == AMDGPU_FAMILY_SI) {
+ mutex_lock(&adev->pm.mutex);
+ if (enable) {
+ adev->pm.dpm.vce_active = true;
+ /* XXX select vce level based on ring/task */
+ adev->pm.dpm.vce_level = AMD_VCE_LEVEL_AC_ALL;
+ } else {
+ adev->pm.dpm.vce_active = false;
+ }
+ mutex_unlock(&adev->pm.mutex);
+
+ amdgpu_pm_compute_clocks(adev);
+ } else {
+ ret = amdgpu_dpm_set_powergating_by_smu(adev, AMD_IP_BLOCK_TYPE_VCE, !enable);
+ if (ret)
+ DRM_ERROR("Dpm %s vce failed, ret = %d. \n",
+ enable ? "enable" : "disable", ret);
+ }
}
void amdgpu_pm_print_power_states(struct amdgpu_device *adev)