2 * Permission is hereby granted, free of charge, to any person obtaining a
3 * copy of this software and associated documentation files (the "Software"),
4 * to deal in the Software without restriction, including without limitation
5 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
6 * and/or sell copies of the Software, and to permit persons to whom the
7 * Software is furnished to do so, subject to the following conditions:
9 * The above copyright notice and this permission notice shall be included in
10 * all copies or substantial portions of the Software.
12 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
13 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
14 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
15 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
16 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
17 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
18 * OTHER DEALINGS IN THE SOFTWARE.
25 #include "amdgpu_drv.h"
26 #include "amdgpu_pm.h"
27 #include "amdgpu_dpm.h"
29 #include <linux/power_supply.h>
30 #include <linux/hwmon.h>
31 #include <linux/hwmon-sysfs.h>
33 #include "amd_powerplay.h"
35 static int amdgpu_debugfs_pm_init(struct amdgpu_device *adev);
37 void amdgpu_pm_acpi_event_handler(struct amdgpu_device *adev)
43 if (adev->pm.dpm_enabled) {
44 mutex_lock(&adev->pm.mutex);
45 if (power_supply_is_system_supplied() > 0)
46 adev->pm.dpm.ac_power = true;
48 adev->pm.dpm.ac_power = false;
49 if (adev->pm.funcs->enable_bapm)
50 amdgpu_dpm_enable_bapm(adev, adev->pm.dpm.ac_power);
51 mutex_unlock(&adev->pm.mutex);
55 static ssize_t amdgpu_get_dpm_state(struct device *dev,
56 struct device_attribute *attr,
59 struct drm_device *ddev = dev_get_drvdata(dev);
60 struct amdgpu_device *adev = ddev->dev_private;
61 enum amd_pm_state_type pm;
63 if (adev->pp_enabled) {
64 pm = amdgpu_dpm_get_current_power_state(adev);
66 pm = adev->pm.dpm.user_state;
68 return snprintf(buf, PAGE_SIZE, "%s\n",
69 (pm == POWER_STATE_TYPE_BATTERY) ? "battery" :
70 (pm == POWER_STATE_TYPE_BALANCED) ? "balanced" : "performance");
73 static ssize_t amdgpu_set_dpm_state(struct device *dev,
74 struct device_attribute *attr,
78 struct drm_device *ddev = dev_get_drvdata(dev);
79 struct amdgpu_device *adev = ddev->dev_private;
80 enum amd_pm_state_type state;
82 if (strncmp("battery", buf, strlen("battery")) == 0)
83 state = POWER_STATE_TYPE_BATTERY;
84 else if (strncmp("balanced", buf, strlen("balanced")) == 0)
85 state = POWER_STATE_TYPE_BALANCED;
86 else if (strncmp("performance", buf, strlen("performance")) == 0)
87 state = POWER_STATE_TYPE_PERFORMANCE;
93 if (adev->pp_enabled) {
94 amdgpu_dpm_dispatch_task(adev, AMD_PP_EVENT_ENABLE_USER_STATE, &state, NULL);
96 mutex_lock(&adev->pm.mutex);
97 adev->pm.dpm.user_state = state;
98 mutex_unlock(&adev->pm.mutex);
100 /* Can't set dpm state when the card is off */
101 if (!(adev->flags & AMD_IS_PX) ||
102 (ddev->switch_power_state == DRM_SWITCH_POWER_ON))
103 amdgpu_pm_compute_clocks(adev);
109 static ssize_t amdgpu_get_dpm_forced_performance_level(struct device *dev,
110 struct device_attribute *attr,
113 struct drm_device *ddev = dev_get_drvdata(dev);
114 struct amdgpu_device *adev = ddev->dev_private;
116 if ((adev->flags & AMD_IS_PX) &&
117 (ddev->switch_power_state != DRM_SWITCH_POWER_ON))
118 return snprintf(buf, PAGE_SIZE, "off\n");
120 if (adev->pp_enabled) {
121 enum amd_dpm_forced_level level;
123 level = amdgpu_dpm_get_performance_level(adev);
124 return snprintf(buf, PAGE_SIZE, "%s\n",
125 (level == AMD_DPM_FORCED_LEVEL_AUTO) ? "auto" :
126 (level == AMD_DPM_FORCED_LEVEL_LOW) ? "low" :
127 (level == AMD_DPM_FORCED_LEVEL_HIGH) ? "high" :
128 (level == AMD_DPM_FORCED_LEVEL_MANUAL) ? "manual" : "unknown");
130 enum amdgpu_dpm_forced_level level;
132 level = adev->pm.dpm.forced_level;
133 return snprintf(buf, PAGE_SIZE, "%s\n",
134 (level == AMDGPU_DPM_FORCED_LEVEL_AUTO) ? "auto" :
135 (level == AMDGPU_DPM_FORCED_LEVEL_LOW) ? "low" : "high");
139 static ssize_t amdgpu_set_dpm_forced_performance_level(struct device *dev,
140 struct device_attribute *attr,
144 struct drm_device *ddev = dev_get_drvdata(dev);
145 struct amdgpu_device *adev = ddev->dev_private;
146 enum amdgpu_dpm_forced_level level;
149 /* Can't force performance level when the card is off */
150 if ((adev->flags & AMD_IS_PX) &&
151 (ddev->switch_power_state != DRM_SWITCH_POWER_ON))
154 if (strncmp("low", buf, strlen("low")) == 0) {
155 level = AMDGPU_DPM_FORCED_LEVEL_LOW;
156 } else if (strncmp("high", buf, strlen("high")) == 0) {
157 level = AMDGPU_DPM_FORCED_LEVEL_HIGH;
158 } else if (strncmp("auto", buf, strlen("auto")) == 0) {
159 level = AMDGPU_DPM_FORCED_LEVEL_AUTO;
160 } else if (strncmp("manual", buf, strlen("manual")) == 0) {
161 level = AMDGPU_DPM_FORCED_LEVEL_MANUAL;
167 if (adev->pp_enabled)
168 amdgpu_dpm_force_performance_level(adev, level);
170 mutex_lock(&adev->pm.mutex);
171 if (adev->pm.dpm.thermal_active) {
173 mutex_unlock(&adev->pm.mutex);
176 ret = amdgpu_dpm_force_performance_level(adev, level);
180 adev->pm.dpm.forced_level = level;
181 mutex_unlock(&adev->pm.mutex);
187 static ssize_t amdgpu_get_pp_num_states(struct device *dev,
188 struct device_attribute *attr,
191 struct drm_device *ddev = dev_get_drvdata(dev);
192 struct amdgpu_device *adev = ddev->dev_private;
193 struct pp_states_info data;
196 if (adev->pp_enabled)
197 amdgpu_dpm_get_pp_num_states(adev, &data);
199 buf_len = snprintf(buf, PAGE_SIZE, "states: %d\n", data.nums);
200 for (i = 0; i < data.nums; i++)
201 buf_len += snprintf(buf + buf_len, PAGE_SIZE, "%d %s\n", i,
202 (data.states[i] == POWER_STATE_TYPE_INTERNAL_BOOT) ? "boot" :
203 (data.states[i] == POWER_STATE_TYPE_BATTERY) ? "battery" :
204 (data.states[i] == POWER_STATE_TYPE_BALANCED) ? "balanced" :
205 (data.states[i] == POWER_STATE_TYPE_PERFORMANCE) ? "performance" : "default");
210 static ssize_t amdgpu_get_pp_cur_state(struct device *dev,
211 struct device_attribute *attr,
214 struct drm_device *ddev = dev_get_drvdata(dev);
215 struct amdgpu_device *adev = ddev->dev_private;
216 struct pp_states_info data;
217 enum amd_pm_state_type pm = 0;
220 if (adev->pp_enabled) {
222 pm = amdgpu_dpm_get_current_power_state(adev);
223 amdgpu_dpm_get_pp_num_states(adev, &data);
225 for (i = 0; i < data.nums; i++) {
226 if (pm == data.states[i])
234 return snprintf(buf, PAGE_SIZE, "%d\n", i);
237 static ssize_t amdgpu_get_pp_force_state(struct device *dev,
238 struct device_attribute *attr,
241 struct drm_device *ddev = dev_get_drvdata(dev);
242 struct amdgpu_device *adev = ddev->dev_private;
243 struct pp_states_info data;
244 enum amd_pm_state_type pm = 0;
247 if (adev->pp_force_state_enabled && adev->pp_enabled) {
248 pm = amdgpu_dpm_get_current_power_state(adev);
249 amdgpu_dpm_get_pp_num_states(adev, &data);
251 for (i = 0; i < data.nums; i++) {
252 if (pm == data.states[i])
259 return snprintf(buf, PAGE_SIZE, "%d\n", i);
262 return snprintf(buf, PAGE_SIZE, "\n");
265 static ssize_t amdgpu_set_pp_force_state(struct device *dev,
266 struct device_attribute *attr,
270 struct drm_device *ddev = dev_get_drvdata(dev);
271 struct amdgpu_device *adev = ddev->dev_private;
272 enum amd_pm_state_type state = 0;
276 if (strlen(buf) == 1)
277 adev->pp_force_state_enabled = false;
279 ret = kstrtol(buf, 0, &idx);
286 if (adev->pp_enabled) {
287 struct pp_states_info data;
288 amdgpu_dpm_get_pp_num_states(adev, &data);
289 state = data.states[idx];
290 /* only set user selected power states */
291 if (state != POWER_STATE_TYPE_INTERNAL_BOOT &&
292 state != POWER_STATE_TYPE_DEFAULT) {
293 amdgpu_dpm_dispatch_task(adev,
294 AMD_PP_EVENT_ENABLE_USER_STATE, &state, NULL);
295 adev->pp_force_state_enabled = true;
303 static ssize_t amdgpu_get_pp_table(struct device *dev,
304 struct device_attribute *attr,
307 struct drm_device *ddev = dev_get_drvdata(dev);
308 struct amdgpu_device *adev = ddev->dev_private;
312 if (adev->pp_enabled)
313 size = amdgpu_dpm_get_pp_table(adev, &table);
317 if (size >= PAGE_SIZE)
318 size = PAGE_SIZE - 1;
320 for (i = 0; i < size; i++) {
321 sprintf(buf + i, "%02x", table[i]);
323 sprintf(buf + i, "\n");
328 static ssize_t amdgpu_set_pp_table(struct device *dev,
329 struct device_attribute *attr,
333 struct drm_device *ddev = dev_get_drvdata(dev);
334 struct amdgpu_device *adev = ddev->dev_private;
336 if (adev->pp_enabled)
337 amdgpu_dpm_set_pp_table(adev, buf, count);
342 static ssize_t amdgpu_get_pp_dpm_sclk(struct device *dev,
343 struct device_attribute *attr,
346 struct drm_device *ddev = dev_get_drvdata(dev);
347 struct amdgpu_device *adev = ddev->dev_private;
350 if (adev->pp_enabled)
351 size = amdgpu_dpm_print_clock_levels(adev, PP_SCLK, buf);
356 static ssize_t amdgpu_set_pp_dpm_sclk(struct device *dev,
357 struct device_attribute *attr,
361 struct drm_device *ddev = dev_get_drvdata(dev);
362 struct amdgpu_device *adev = ddev->dev_private;
366 ret = kstrtol(buf, 0, &level);
373 if (adev->pp_enabled)
374 amdgpu_dpm_force_clock_level(adev, PP_SCLK, level);
379 static ssize_t amdgpu_get_pp_dpm_mclk(struct device *dev,
380 struct device_attribute *attr,
383 struct drm_device *ddev = dev_get_drvdata(dev);
384 struct amdgpu_device *adev = ddev->dev_private;
387 if (adev->pp_enabled)
388 size = amdgpu_dpm_print_clock_levels(adev, PP_MCLK, buf);
393 static ssize_t amdgpu_set_pp_dpm_mclk(struct device *dev,
394 struct device_attribute *attr,
398 struct drm_device *ddev = dev_get_drvdata(dev);
399 struct amdgpu_device *adev = ddev->dev_private;
403 ret = kstrtol(buf, 0, &level);
410 if (adev->pp_enabled)
411 amdgpu_dpm_force_clock_level(adev, PP_MCLK, level);
416 static ssize_t amdgpu_get_pp_dpm_pcie(struct device *dev,
417 struct device_attribute *attr,
420 struct drm_device *ddev = dev_get_drvdata(dev);
421 struct amdgpu_device *adev = ddev->dev_private;
424 if (adev->pp_enabled)
425 size = amdgpu_dpm_print_clock_levels(adev, PP_PCIE, buf);
430 static ssize_t amdgpu_set_pp_dpm_pcie(struct device *dev,
431 struct device_attribute *attr,
435 struct drm_device *ddev = dev_get_drvdata(dev);
436 struct amdgpu_device *adev = ddev->dev_private;
440 ret = kstrtol(buf, 0, &level);
447 if (adev->pp_enabled)
448 amdgpu_dpm_force_clock_level(adev, PP_PCIE, level);
453 static DEVICE_ATTR(power_dpm_state, S_IRUGO | S_IWUSR, amdgpu_get_dpm_state, amdgpu_set_dpm_state);
454 static DEVICE_ATTR(power_dpm_force_performance_level, S_IRUGO | S_IWUSR,
455 amdgpu_get_dpm_forced_performance_level,
456 amdgpu_set_dpm_forced_performance_level);
457 static DEVICE_ATTR(pp_num_states, S_IRUGO, amdgpu_get_pp_num_states, NULL);
458 static DEVICE_ATTR(pp_cur_state, S_IRUGO, amdgpu_get_pp_cur_state, NULL);
459 static DEVICE_ATTR(pp_force_state, S_IRUGO | S_IWUSR,
460 amdgpu_get_pp_force_state,
461 amdgpu_set_pp_force_state);
462 static DEVICE_ATTR(pp_table, S_IRUGO | S_IWUSR,
464 amdgpu_set_pp_table);
465 static DEVICE_ATTR(pp_dpm_sclk, S_IRUGO | S_IWUSR,
466 amdgpu_get_pp_dpm_sclk,
467 amdgpu_set_pp_dpm_sclk);
468 static DEVICE_ATTR(pp_dpm_mclk, S_IRUGO | S_IWUSR,
469 amdgpu_get_pp_dpm_mclk,
470 amdgpu_set_pp_dpm_mclk);
471 static DEVICE_ATTR(pp_dpm_pcie, S_IRUGO | S_IWUSR,
472 amdgpu_get_pp_dpm_pcie,
473 amdgpu_set_pp_dpm_pcie);
475 static ssize_t amdgpu_hwmon_show_temp(struct device *dev,
476 struct device_attribute *attr,
479 struct amdgpu_device *adev = dev_get_drvdata(dev);
480 struct drm_device *ddev = adev->ddev;
483 /* Can't get temperature when the card is off */
484 if ((adev->flags & AMD_IS_PX) &&
485 (ddev->switch_power_state != DRM_SWITCH_POWER_ON))
488 if (!adev->pp_enabled && !adev->pm.funcs->get_temperature)
491 temp = amdgpu_dpm_get_temperature(adev);
493 return snprintf(buf, PAGE_SIZE, "%d\n", temp);
496 static ssize_t amdgpu_hwmon_show_temp_thresh(struct device *dev,
497 struct device_attribute *attr,
500 struct amdgpu_device *adev = dev_get_drvdata(dev);
501 int hyst = to_sensor_dev_attr(attr)->index;
505 temp = adev->pm.dpm.thermal.min_temp;
507 temp = adev->pm.dpm.thermal.max_temp;
509 return snprintf(buf, PAGE_SIZE, "%d\n", temp);
512 static ssize_t amdgpu_hwmon_get_pwm1_enable(struct device *dev,
513 struct device_attribute *attr,
516 struct amdgpu_device *adev = dev_get_drvdata(dev);
519 if (!adev->pp_enabled && !adev->pm.funcs->get_fan_control_mode)
522 pwm_mode = amdgpu_dpm_get_fan_control_mode(adev);
524 /* never 0 (full-speed), fuse or smc-controlled always */
525 return sprintf(buf, "%i\n", pwm_mode == FDO_PWM_MODE_STATIC ? 1 : 2);
528 static ssize_t amdgpu_hwmon_set_pwm1_enable(struct device *dev,
529 struct device_attribute *attr,
533 struct amdgpu_device *adev = dev_get_drvdata(dev);
537 if (!adev->pp_enabled && !adev->pm.funcs->set_fan_control_mode)
540 err = kstrtoint(buf, 10, &value);
545 case 1: /* manual, percent-based */
546 amdgpu_dpm_set_fan_control_mode(adev, FDO_PWM_MODE_STATIC);
548 default: /* disable */
549 amdgpu_dpm_set_fan_control_mode(adev, 0);
556 static ssize_t amdgpu_hwmon_get_pwm1_min(struct device *dev,
557 struct device_attribute *attr,
560 return sprintf(buf, "%i\n", 0);
563 static ssize_t amdgpu_hwmon_get_pwm1_max(struct device *dev,
564 struct device_attribute *attr,
567 return sprintf(buf, "%i\n", 255);
570 static ssize_t amdgpu_hwmon_set_pwm1(struct device *dev,
571 struct device_attribute *attr,
572 const char *buf, size_t count)
574 struct amdgpu_device *adev = dev_get_drvdata(dev);
578 err = kstrtou32(buf, 10, &value);
582 value = (value * 100) / 255;
584 err = amdgpu_dpm_set_fan_speed_percent(adev, value);
591 static ssize_t amdgpu_hwmon_get_pwm1(struct device *dev,
592 struct device_attribute *attr,
595 struct amdgpu_device *adev = dev_get_drvdata(dev);
599 err = amdgpu_dpm_get_fan_speed_percent(adev, &speed);
603 speed = (speed * 255) / 100;
605 return sprintf(buf, "%i\n", speed);
608 static SENSOR_DEVICE_ATTR(temp1_input, S_IRUGO, amdgpu_hwmon_show_temp, NULL, 0);
609 static SENSOR_DEVICE_ATTR(temp1_crit, S_IRUGO, amdgpu_hwmon_show_temp_thresh, NULL, 0);
610 static SENSOR_DEVICE_ATTR(temp1_crit_hyst, S_IRUGO, amdgpu_hwmon_show_temp_thresh, NULL, 1);
611 static SENSOR_DEVICE_ATTR(pwm1, S_IRUGO | S_IWUSR, amdgpu_hwmon_get_pwm1, amdgpu_hwmon_set_pwm1, 0);
612 static SENSOR_DEVICE_ATTR(pwm1_enable, S_IRUGO | S_IWUSR, amdgpu_hwmon_get_pwm1_enable, amdgpu_hwmon_set_pwm1_enable, 0);
613 static SENSOR_DEVICE_ATTR(pwm1_min, S_IRUGO, amdgpu_hwmon_get_pwm1_min, NULL, 0);
614 static SENSOR_DEVICE_ATTR(pwm1_max, S_IRUGO, amdgpu_hwmon_get_pwm1_max, NULL, 0);
616 static struct attribute *hwmon_attributes[] = {
617 &sensor_dev_attr_temp1_input.dev_attr.attr,
618 &sensor_dev_attr_temp1_crit.dev_attr.attr,
619 &sensor_dev_attr_temp1_crit_hyst.dev_attr.attr,
620 &sensor_dev_attr_pwm1.dev_attr.attr,
621 &sensor_dev_attr_pwm1_enable.dev_attr.attr,
622 &sensor_dev_attr_pwm1_min.dev_attr.attr,
623 &sensor_dev_attr_pwm1_max.dev_attr.attr,
627 static umode_t hwmon_attributes_visible(struct kobject *kobj,
628 struct attribute *attr, int index)
630 struct device *dev = kobj_to_dev(kobj);
631 struct amdgpu_device *adev = dev_get_drvdata(dev);
632 umode_t effective_mode = attr->mode;
634 /* Skip limit attributes if DPM is not enabled */
635 if (!adev->pm.dpm_enabled &&
636 (attr == &sensor_dev_attr_temp1_crit.dev_attr.attr ||
637 attr == &sensor_dev_attr_temp1_crit_hyst.dev_attr.attr ||
638 attr == &sensor_dev_attr_pwm1.dev_attr.attr ||
639 attr == &sensor_dev_attr_pwm1_enable.dev_attr.attr ||
640 attr == &sensor_dev_attr_pwm1_max.dev_attr.attr ||
641 attr == &sensor_dev_attr_pwm1_min.dev_attr.attr))
644 if (adev->pp_enabled)
645 return effective_mode;
647 /* Skip fan attributes if fan is not present */
648 if (adev->pm.no_fan &&
649 (attr == &sensor_dev_attr_pwm1.dev_attr.attr ||
650 attr == &sensor_dev_attr_pwm1_enable.dev_attr.attr ||
651 attr == &sensor_dev_attr_pwm1_max.dev_attr.attr ||
652 attr == &sensor_dev_attr_pwm1_min.dev_attr.attr))
655 /* mask fan attributes if we have no bindings for this asic to expose */
656 if ((!adev->pm.funcs->get_fan_speed_percent &&
657 attr == &sensor_dev_attr_pwm1.dev_attr.attr) || /* can't query fan */
658 (!adev->pm.funcs->get_fan_control_mode &&
659 attr == &sensor_dev_attr_pwm1_enable.dev_attr.attr)) /* can't query state */
660 effective_mode &= ~S_IRUGO;
662 if ((!adev->pm.funcs->set_fan_speed_percent &&
663 attr == &sensor_dev_attr_pwm1.dev_attr.attr) || /* can't manage fan */
664 (!adev->pm.funcs->set_fan_control_mode &&
665 attr == &sensor_dev_attr_pwm1_enable.dev_attr.attr)) /* can't manage state */
666 effective_mode &= ~S_IWUSR;
668 /* hide max/min values if we can't both query and manage the fan */
669 if ((!adev->pm.funcs->set_fan_speed_percent &&
670 !adev->pm.funcs->get_fan_speed_percent) &&
671 (attr == &sensor_dev_attr_pwm1_max.dev_attr.attr ||
672 attr == &sensor_dev_attr_pwm1_min.dev_attr.attr))
675 return effective_mode;
678 static const struct attribute_group hwmon_attrgroup = {
679 .attrs = hwmon_attributes,
680 .is_visible = hwmon_attributes_visible,
683 static const struct attribute_group *hwmon_groups[] = {
688 void amdgpu_dpm_thermal_work_handler(struct work_struct *work)
690 struct amdgpu_device *adev =
691 container_of(work, struct amdgpu_device,
692 pm.dpm.thermal.work);
693 /* switch to the thermal state */
694 enum amd_pm_state_type dpm_state = POWER_STATE_TYPE_INTERNAL_THERMAL;
696 if (!adev->pm.dpm_enabled)
699 if (adev->pm.funcs->get_temperature) {
700 int temp = amdgpu_dpm_get_temperature(adev);
702 if (temp < adev->pm.dpm.thermal.min_temp)
703 /* switch back the user state */
704 dpm_state = adev->pm.dpm.user_state;
706 if (adev->pm.dpm.thermal.high_to_low)
707 /* switch back the user state */
708 dpm_state = adev->pm.dpm.user_state;
710 mutex_lock(&adev->pm.mutex);
711 if (dpm_state == POWER_STATE_TYPE_INTERNAL_THERMAL)
712 adev->pm.dpm.thermal_active = true;
714 adev->pm.dpm.thermal_active = false;
715 adev->pm.dpm.state = dpm_state;
716 mutex_unlock(&adev->pm.mutex);
718 amdgpu_pm_compute_clocks(adev);
721 static struct amdgpu_ps *amdgpu_dpm_pick_power_state(struct amdgpu_device *adev,
722 enum amd_pm_state_type dpm_state)
725 struct amdgpu_ps *ps;
727 bool single_display = (adev->pm.dpm.new_active_crtc_count < 2) ?
730 /* check if the vblank period is too short to adjust the mclk */
731 if (single_display && adev->pm.funcs->vblank_too_short) {
732 if (amdgpu_dpm_vblank_too_short(adev))
733 single_display = false;
736 /* certain older asics have a separare 3D performance state,
737 * so try that first if the user selected performance
739 if (dpm_state == POWER_STATE_TYPE_PERFORMANCE)
740 dpm_state = POWER_STATE_TYPE_INTERNAL_3DPERF;
741 /* balanced states don't exist at the moment */
742 if (dpm_state == POWER_STATE_TYPE_BALANCED)
743 dpm_state = POWER_STATE_TYPE_PERFORMANCE;
746 /* Pick the best power state based on current conditions */
747 for (i = 0; i < adev->pm.dpm.num_ps; i++) {
748 ps = &adev->pm.dpm.ps[i];
749 ui_class = ps->class & ATOM_PPLIB_CLASSIFICATION_UI_MASK;
752 case POWER_STATE_TYPE_BATTERY:
753 if (ui_class == ATOM_PPLIB_CLASSIFICATION_UI_BATTERY) {
754 if (ps->caps & ATOM_PPLIB_SINGLE_DISPLAY_ONLY) {
761 case POWER_STATE_TYPE_BALANCED:
762 if (ui_class == ATOM_PPLIB_CLASSIFICATION_UI_BALANCED) {
763 if (ps->caps & ATOM_PPLIB_SINGLE_DISPLAY_ONLY) {
770 case POWER_STATE_TYPE_PERFORMANCE:
771 if (ui_class == ATOM_PPLIB_CLASSIFICATION_UI_PERFORMANCE) {
772 if (ps->caps & ATOM_PPLIB_SINGLE_DISPLAY_ONLY) {
779 /* internal states */
780 case POWER_STATE_TYPE_INTERNAL_UVD:
781 if (adev->pm.dpm.uvd_ps)
782 return adev->pm.dpm.uvd_ps;
785 case POWER_STATE_TYPE_INTERNAL_UVD_SD:
786 if (ps->class & ATOM_PPLIB_CLASSIFICATION_SDSTATE)
789 case POWER_STATE_TYPE_INTERNAL_UVD_HD:
790 if (ps->class & ATOM_PPLIB_CLASSIFICATION_HDSTATE)
793 case POWER_STATE_TYPE_INTERNAL_UVD_HD2:
794 if (ps->class & ATOM_PPLIB_CLASSIFICATION_HD2STATE)
797 case POWER_STATE_TYPE_INTERNAL_UVD_MVC:
798 if (ps->class2 & ATOM_PPLIB_CLASSIFICATION2_MVC)
801 case POWER_STATE_TYPE_INTERNAL_BOOT:
802 return adev->pm.dpm.boot_ps;
803 case POWER_STATE_TYPE_INTERNAL_THERMAL:
804 if (ps->class & ATOM_PPLIB_CLASSIFICATION_THERMAL)
807 case POWER_STATE_TYPE_INTERNAL_ACPI:
808 if (ps->class & ATOM_PPLIB_CLASSIFICATION_ACPI)
811 case POWER_STATE_TYPE_INTERNAL_ULV:
812 if (ps->class2 & ATOM_PPLIB_CLASSIFICATION2_ULV)
815 case POWER_STATE_TYPE_INTERNAL_3DPERF:
816 if (ps->class & ATOM_PPLIB_CLASSIFICATION_3DPERFORMANCE)
823 /* use a fallback state if we didn't match */
825 case POWER_STATE_TYPE_INTERNAL_UVD_SD:
826 dpm_state = POWER_STATE_TYPE_INTERNAL_UVD_HD;
828 case POWER_STATE_TYPE_INTERNAL_UVD_HD:
829 case POWER_STATE_TYPE_INTERNAL_UVD_HD2:
830 case POWER_STATE_TYPE_INTERNAL_UVD_MVC:
831 if (adev->pm.dpm.uvd_ps) {
832 return adev->pm.dpm.uvd_ps;
834 dpm_state = POWER_STATE_TYPE_PERFORMANCE;
837 case POWER_STATE_TYPE_INTERNAL_THERMAL:
838 dpm_state = POWER_STATE_TYPE_INTERNAL_ACPI;
840 case POWER_STATE_TYPE_INTERNAL_ACPI:
841 dpm_state = POWER_STATE_TYPE_BATTERY;
843 case POWER_STATE_TYPE_BATTERY:
844 case POWER_STATE_TYPE_BALANCED:
845 case POWER_STATE_TYPE_INTERNAL_3DPERF:
846 dpm_state = POWER_STATE_TYPE_PERFORMANCE;
855 static void amdgpu_dpm_change_power_state_locked(struct amdgpu_device *adev)
858 struct amdgpu_ps *ps;
859 enum amd_pm_state_type dpm_state;
862 /* if dpm init failed */
863 if (!adev->pm.dpm_enabled)
866 if (adev->pm.dpm.user_state != adev->pm.dpm.state) {
867 /* add other state override checks here */
868 if ((!adev->pm.dpm.thermal_active) &&
869 (!adev->pm.dpm.uvd_active))
870 adev->pm.dpm.state = adev->pm.dpm.user_state;
872 dpm_state = adev->pm.dpm.state;
874 ps = amdgpu_dpm_pick_power_state(adev, dpm_state);
876 adev->pm.dpm.requested_ps = ps;
880 /* no need to reprogram if nothing changed unless we are on BTC+ */
881 if (adev->pm.dpm.current_ps == adev->pm.dpm.requested_ps) {
882 /* vce just modifies an existing state so force a change */
883 if (ps->vce_active != adev->pm.dpm.vce_active)
885 if (adev->flags & AMD_IS_APU) {
886 /* for APUs if the num crtcs changed but state is the same,
887 * all we need to do is update the display configuration.
889 if (adev->pm.dpm.new_active_crtcs != adev->pm.dpm.current_active_crtcs) {
890 /* update display watermarks based on new power state */
891 amdgpu_display_bandwidth_update(adev);
892 /* update displays */
893 amdgpu_dpm_display_configuration_changed(adev);
894 adev->pm.dpm.current_active_crtcs = adev->pm.dpm.new_active_crtcs;
895 adev->pm.dpm.current_active_crtc_count = adev->pm.dpm.new_active_crtc_count;
899 /* for BTC+ if the num crtcs hasn't changed and state is the same,
900 * nothing to do, if the num crtcs is > 1 and state is the same,
901 * update display configuration.
903 if (adev->pm.dpm.new_active_crtcs ==
904 adev->pm.dpm.current_active_crtcs) {
906 } else if ((adev->pm.dpm.current_active_crtc_count > 1) &&
907 (adev->pm.dpm.new_active_crtc_count > 1)) {
908 /* update display watermarks based on new power state */
909 amdgpu_display_bandwidth_update(adev);
910 /* update displays */
911 amdgpu_dpm_display_configuration_changed(adev);
912 adev->pm.dpm.current_active_crtcs = adev->pm.dpm.new_active_crtcs;
913 adev->pm.dpm.current_active_crtc_count = adev->pm.dpm.new_active_crtc_count;
920 if (amdgpu_dpm == 1) {
921 printk("switching from power state:\n");
922 amdgpu_dpm_print_power_state(adev, adev->pm.dpm.current_ps);
923 printk("switching to power state:\n");
924 amdgpu_dpm_print_power_state(adev, adev->pm.dpm.requested_ps);
927 /* update whether vce is active */
928 ps->vce_active = adev->pm.dpm.vce_active;
930 ret = amdgpu_dpm_pre_set_power_state(adev);
934 /* update display watermarks based on new power state */
935 amdgpu_display_bandwidth_update(adev);
937 /* wait for the rings to drain */
938 for (i = 0; i < AMDGPU_MAX_RINGS; i++) {
939 struct amdgpu_ring *ring = adev->rings[i];
940 if (ring && ring->ready)
941 amdgpu_fence_wait_empty(ring);
944 /* program the new power state */
945 amdgpu_dpm_set_power_state(adev);
947 /* update current power state */
948 adev->pm.dpm.current_ps = adev->pm.dpm.requested_ps;
950 amdgpu_dpm_post_set_power_state(adev);
952 /* update displays */
953 amdgpu_dpm_display_configuration_changed(adev);
955 adev->pm.dpm.current_active_crtcs = adev->pm.dpm.new_active_crtcs;
956 adev->pm.dpm.current_active_crtc_count = adev->pm.dpm.new_active_crtc_count;
958 if (adev->pm.funcs->force_performance_level) {
959 if (adev->pm.dpm.thermal_active) {
960 enum amdgpu_dpm_forced_level level = adev->pm.dpm.forced_level;
961 /* force low perf level for thermal */
962 amdgpu_dpm_force_performance_level(adev, AMDGPU_DPM_FORCED_LEVEL_LOW);
963 /* save the user's level */
964 adev->pm.dpm.forced_level = level;
966 /* otherwise, user selected level */
967 amdgpu_dpm_force_performance_level(adev, adev->pm.dpm.forced_level);
972 void amdgpu_dpm_enable_uvd(struct amdgpu_device *adev, bool enable)
974 if (adev->pp_enabled)
975 amdgpu_dpm_powergate_uvd(adev, !enable);
977 if (adev->pm.funcs->powergate_uvd) {
978 mutex_lock(&adev->pm.mutex);
979 /* enable/disable UVD */
980 amdgpu_dpm_powergate_uvd(adev, !enable);
981 mutex_unlock(&adev->pm.mutex);
984 mutex_lock(&adev->pm.mutex);
985 adev->pm.dpm.uvd_active = true;
986 adev->pm.dpm.state = POWER_STATE_TYPE_INTERNAL_UVD;
987 mutex_unlock(&adev->pm.mutex);
989 mutex_lock(&adev->pm.mutex);
990 adev->pm.dpm.uvd_active = false;
991 mutex_unlock(&adev->pm.mutex);
993 amdgpu_pm_compute_clocks(adev);
999 void amdgpu_dpm_enable_vce(struct amdgpu_device *adev, bool enable)
1001 if (adev->pp_enabled)
1002 amdgpu_dpm_powergate_vce(adev, !enable);
1004 if (adev->pm.funcs->powergate_vce) {
1005 mutex_lock(&adev->pm.mutex);
1006 amdgpu_dpm_powergate_vce(adev, !enable);
1007 mutex_unlock(&adev->pm.mutex);
1010 mutex_lock(&adev->pm.mutex);
1011 adev->pm.dpm.vce_active = true;
1012 /* XXX select vce level based on ring/task */
1013 adev->pm.dpm.vce_level = AMDGPU_VCE_LEVEL_AC_ALL;
1014 mutex_unlock(&adev->pm.mutex);
1016 mutex_lock(&adev->pm.mutex);
1017 adev->pm.dpm.vce_active = false;
1018 mutex_unlock(&adev->pm.mutex);
1020 amdgpu_pm_compute_clocks(adev);
1025 void amdgpu_pm_print_power_states(struct amdgpu_device *adev)
1029 if (adev->pp_enabled)
1033 for (i = 0; i < adev->pm.dpm.num_ps; i++)
1034 amdgpu_dpm_print_power_state(adev, &adev->pm.dpm.ps[i]);
1038 int amdgpu_pm_sysfs_init(struct amdgpu_device *adev)
1042 if (adev->pm.sysfs_initialized)
1045 if (!adev->pp_enabled) {
1046 if (adev->pm.funcs->get_temperature == NULL)
1050 adev->pm.int_hwmon_dev = hwmon_device_register_with_groups(adev->dev,
1053 if (IS_ERR(adev->pm.int_hwmon_dev)) {
1054 ret = PTR_ERR(adev->pm.int_hwmon_dev);
1056 "Unable to register hwmon device: %d\n", ret);
1060 ret = device_create_file(adev->dev, &dev_attr_power_dpm_state);
1062 DRM_ERROR("failed to create device file for dpm state\n");
1065 ret = device_create_file(adev->dev, &dev_attr_power_dpm_force_performance_level);
1067 DRM_ERROR("failed to create device file for dpm state\n");
1071 if (adev->pp_enabled) {
1072 ret = device_create_file(adev->dev, &dev_attr_pp_num_states);
1074 DRM_ERROR("failed to create device file pp_num_states\n");
1077 ret = device_create_file(adev->dev, &dev_attr_pp_cur_state);
1079 DRM_ERROR("failed to create device file pp_cur_state\n");
1082 ret = device_create_file(adev->dev, &dev_attr_pp_force_state);
1084 DRM_ERROR("failed to create device file pp_force_state\n");
1087 ret = device_create_file(adev->dev, &dev_attr_pp_table);
1089 DRM_ERROR("failed to create device file pp_table\n");
1092 ret = device_create_file(adev->dev, &dev_attr_pp_dpm_sclk);
1094 DRM_ERROR("failed to create device file pp_dpm_sclk\n");
1097 ret = device_create_file(adev->dev, &dev_attr_pp_dpm_mclk);
1099 DRM_ERROR("failed to create device file pp_dpm_mclk\n");
1102 ret = device_create_file(adev->dev, &dev_attr_pp_dpm_pcie);
1104 DRM_ERROR("failed to create device file pp_dpm_pcie\n");
1108 ret = amdgpu_debugfs_pm_init(adev);
1110 DRM_ERROR("Failed to register debugfs file for dpm!\n");
1114 adev->pm.sysfs_initialized = true;
1119 void amdgpu_pm_sysfs_fini(struct amdgpu_device *adev)
1121 if (adev->pm.int_hwmon_dev)
1122 hwmon_device_unregister(adev->pm.int_hwmon_dev);
1123 device_remove_file(adev->dev, &dev_attr_power_dpm_state);
1124 device_remove_file(adev->dev, &dev_attr_power_dpm_force_performance_level);
1125 if (adev->pp_enabled) {
1126 device_remove_file(adev->dev, &dev_attr_pp_num_states);
1127 device_remove_file(adev->dev, &dev_attr_pp_cur_state);
1128 device_remove_file(adev->dev, &dev_attr_pp_force_state);
1129 device_remove_file(adev->dev, &dev_attr_pp_table);
1130 device_remove_file(adev->dev, &dev_attr_pp_dpm_sclk);
1131 device_remove_file(adev->dev, &dev_attr_pp_dpm_mclk);
1132 device_remove_file(adev->dev, &dev_attr_pp_dpm_pcie);
1136 void amdgpu_pm_compute_clocks(struct amdgpu_device *adev)
1138 struct drm_device *ddev = adev->ddev;
1139 struct drm_crtc *crtc;
1140 struct amdgpu_crtc *amdgpu_crtc;
1142 if (!adev->pm.dpm_enabled)
1145 if (adev->pp_enabled) {
1148 amdgpu_display_bandwidth_update(adev);
1149 for (i = 0; i < AMDGPU_MAX_RINGS; i++) {
1150 struct amdgpu_ring *ring = adev->rings[i];
1151 if (ring && ring->ready)
1152 amdgpu_fence_wait_empty(ring);
1155 amdgpu_dpm_dispatch_task(adev, AMD_PP_EVENT_DISPLAY_CONFIG_CHANGE, NULL, NULL);
1157 mutex_lock(&adev->pm.mutex);
1158 adev->pm.dpm.new_active_crtcs = 0;
1159 adev->pm.dpm.new_active_crtc_count = 0;
1160 if (adev->mode_info.num_crtc && adev->mode_info.mode_config_initialized) {
1161 list_for_each_entry(crtc,
1162 &ddev->mode_config.crtc_list, head) {
1163 amdgpu_crtc = to_amdgpu_crtc(crtc);
1164 if (crtc->enabled) {
1165 adev->pm.dpm.new_active_crtcs |= (1 << amdgpu_crtc->crtc_id);
1166 adev->pm.dpm.new_active_crtc_count++;
1170 /* update battery/ac status */
1171 if (power_supply_is_system_supplied() > 0)
1172 adev->pm.dpm.ac_power = true;
1174 adev->pm.dpm.ac_power = false;
1176 amdgpu_dpm_change_power_state_locked(adev);
1178 mutex_unlock(&adev->pm.mutex);
1185 #if defined(CONFIG_DEBUG_FS)
1187 static int amdgpu_debugfs_pm_info(struct seq_file *m, void *data)
1189 struct drm_info_node *node = (struct drm_info_node *) m->private;
1190 struct drm_device *dev = node->minor->dev;
1191 struct amdgpu_device *adev = dev->dev_private;
1192 struct drm_device *ddev = adev->ddev;
1194 if (!adev->pm.dpm_enabled) {
1195 seq_printf(m, "dpm not enabled\n");
1198 if ((adev->flags & AMD_IS_PX) &&
1199 (ddev->switch_power_state != DRM_SWITCH_POWER_ON)) {
1200 seq_printf(m, "PX asic powered off\n");
1201 } else if (adev->pp_enabled) {
1202 amdgpu_dpm_debugfs_print_current_performance_level(adev, m);
1204 mutex_lock(&adev->pm.mutex);
1205 if (adev->pm.funcs->debugfs_print_current_performance_level)
1206 amdgpu_dpm_debugfs_print_current_performance_level(adev, m);
1208 seq_printf(m, "Debugfs support not implemented for this asic\n");
1209 mutex_unlock(&adev->pm.mutex);
1215 static struct drm_info_list amdgpu_pm_info_list[] = {
1216 {"amdgpu_pm_info", amdgpu_debugfs_pm_info, 0, NULL},
1220 static int amdgpu_debugfs_pm_init(struct amdgpu_device *adev)
1222 #if defined(CONFIG_DEBUG_FS)
1223 return amdgpu_debugfs_add_files(adev, amdgpu_pm_info_list, ARRAY_SIZE(amdgpu_pm_info_list));