2 * Copyright 2017 Advanced Micro Devices, Inc.
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
27 #include "amdgpu_drv.h"
28 #include "amdgpu_pm.h"
29 #include "amdgpu_dpm.h"
31 #include <linux/pci.h>
32 #include <linux/hwmon.h>
33 #include <linux/hwmon-sysfs.h>
34 #include <linux/nospec.h>
35 #include <linux/pm_runtime.h>
36 #include <asm/processor.h>
38 #define MAX_NUM_OF_FEATURES_PER_SUBSET 8
39 #define MAX_NUM_OF_SUBSETS 8
41 #define DEVICE_ATTR_IS(_name) (attr_id == device_attr_id__##_name)
44 struct kobj_attribute attribute;
45 struct list_head entry;
50 struct list_head entry;
51 struct list_head attribute;
55 struct od_feature_ops {
56 umode_t (*is_visible)(struct amdgpu_device *adev);
57 ssize_t (*show)(struct kobject *kobj, struct kobj_attribute *attr,
59 ssize_t (*store)(struct kobject *kobj, struct kobj_attribute *attr,
60 const char *buf, size_t count);
63 struct od_feature_item {
65 struct od_feature_ops ops;
68 struct od_feature_container {
70 struct od_feature_ops ops;
71 struct od_feature_item sub_feature[MAX_NUM_OF_FEATURES_PER_SUBSET];
74 struct od_feature_set {
75 struct od_feature_container containers[MAX_NUM_OF_SUBSETS];
78 static const struct hwmon_temp_label {
79 enum PP_HWMON_TEMP channel;
82 {PP_TEMP_EDGE, "edge"},
83 {PP_TEMP_JUNCTION, "junction"},
87 const char * const amdgpu_pp_profile_name[] = {
101 * DOC: power_dpm_state
103 * The power_dpm_state file is a legacy interface and is only provided for
104 * backwards compatibility. The amdgpu driver provides a sysfs API for adjusting
105 * certain power related parameters. The file power_dpm_state is used for this.
106 * It accepts the following arguments:
116 * On older GPUs, the vbios provided a special power state for battery
117 * operation. Selecting battery switched to this state. This is no
118 * longer provided on newer GPUs so the option does nothing in that case.
122 * On older GPUs, the vbios provided a special power state for balanced
123 * operation. Selecting balanced switched to this state. This is no
124 * longer provided on newer GPUs so the option does nothing in that case.
128 * On older GPUs, the vbios provided a special power state for performance
129 * operation. Selecting performance switched to this state. This is no
130 * longer provided on newer GPUs so the option does nothing in that case.
134 static ssize_t amdgpu_get_power_dpm_state(struct device *dev,
135 struct device_attribute *attr,
138 struct drm_device *ddev = dev_get_drvdata(dev);
139 struct amdgpu_device *adev = drm_to_adev(ddev);
140 enum amd_pm_state_type pm;
143 if (amdgpu_in_reset(adev))
145 if (adev->in_suspend && !adev->in_runpm)
148 ret = pm_runtime_get_if_active(ddev->dev);
150 return ret ?: -EPERM;
152 amdgpu_dpm_get_current_power_state(adev, &pm);
154 pm_runtime_put_autosuspend(ddev->dev);
156 return sysfs_emit(buf, "%s\n",
157 (pm == POWER_STATE_TYPE_BATTERY) ? "battery" :
158 (pm == POWER_STATE_TYPE_BALANCED) ? "balanced" : "performance");
161 static ssize_t amdgpu_set_power_dpm_state(struct device *dev,
162 struct device_attribute *attr,
166 struct drm_device *ddev = dev_get_drvdata(dev);
167 struct amdgpu_device *adev = drm_to_adev(ddev);
168 enum amd_pm_state_type state;
171 if (amdgpu_in_reset(adev))
173 if (adev->in_suspend && !adev->in_runpm)
176 if (strncmp("battery", buf, strlen("battery")) == 0)
177 state = POWER_STATE_TYPE_BATTERY;
178 else if (strncmp("balanced", buf, strlen("balanced")) == 0)
179 state = POWER_STATE_TYPE_BALANCED;
180 else if (strncmp("performance", buf, strlen("performance")) == 0)
181 state = POWER_STATE_TYPE_PERFORMANCE;
185 ret = pm_runtime_resume_and_get(ddev->dev);
189 amdgpu_dpm_set_power_state(adev, state);
191 pm_runtime_mark_last_busy(ddev->dev);
192 pm_runtime_put_autosuspend(ddev->dev);
199 * DOC: power_dpm_force_performance_level
201 * The amdgpu driver provides a sysfs API for adjusting certain power
202 * related parameters. The file power_dpm_force_performance_level is
203 * used for this. It accepts the following arguments:
223 * When auto is selected, the driver will attempt to dynamically select
224 * the optimal power profile for current conditions in the driver.
228 * When low is selected, the clocks are forced to the lowest power state.
232 * When high is selected, the clocks are forced to the highest power state.
236 * When manual is selected, the user can manually adjust which power states
237 * are enabled for each clock domain via the sysfs pp_dpm_mclk, pp_dpm_sclk,
238 * and pp_dpm_pcie files and adjust the power state transition heuristics
239 * via the pp_power_profile_mode sysfs file.
246 * When the profiling modes are selected, clock and power gating are
247 * disabled and the clocks are set for different profiling cases. This
248 * mode is recommended for profiling specific work loads where you do
249 * not want clock or power gating for clock fluctuation to interfere
250 * with your results. profile_standard sets the clocks to a fixed clock
251 * level which varies from asic to asic. profile_min_sclk forces the sclk
252 * to the lowest level. profile_min_mclk forces the mclk to the lowest level.
253 * profile_peak sets all clocks (mclk, sclk, pcie) to the highest levels.
257 static ssize_t amdgpu_get_power_dpm_force_performance_level(struct device *dev,
258 struct device_attribute *attr,
261 struct drm_device *ddev = dev_get_drvdata(dev);
262 struct amdgpu_device *adev = drm_to_adev(ddev);
263 enum amd_dpm_forced_level level = 0xff;
266 if (amdgpu_in_reset(adev))
268 if (adev->in_suspend && !adev->in_runpm)
271 ret = pm_runtime_get_if_active(ddev->dev);
273 return ret ?: -EPERM;
275 level = amdgpu_dpm_get_performance_level(adev);
277 pm_runtime_put_autosuspend(ddev->dev);
279 return sysfs_emit(buf, "%s\n",
280 (level == AMD_DPM_FORCED_LEVEL_AUTO) ? "auto" :
281 (level == AMD_DPM_FORCED_LEVEL_LOW) ? "low" :
282 (level == AMD_DPM_FORCED_LEVEL_HIGH) ? "high" :
283 (level == AMD_DPM_FORCED_LEVEL_MANUAL) ? "manual" :
284 (level == AMD_DPM_FORCED_LEVEL_PROFILE_STANDARD) ? "profile_standard" :
285 (level == AMD_DPM_FORCED_LEVEL_PROFILE_MIN_SCLK) ? "profile_min_sclk" :
286 (level == AMD_DPM_FORCED_LEVEL_PROFILE_MIN_MCLK) ? "profile_min_mclk" :
287 (level == AMD_DPM_FORCED_LEVEL_PROFILE_PEAK) ? "profile_peak" :
288 (level == AMD_DPM_FORCED_LEVEL_PERF_DETERMINISM) ? "perf_determinism" :
292 static ssize_t amdgpu_set_power_dpm_force_performance_level(struct device *dev,
293 struct device_attribute *attr,
297 struct drm_device *ddev = dev_get_drvdata(dev);
298 struct amdgpu_device *adev = drm_to_adev(ddev);
299 enum amd_dpm_forced_level level;
302 if (amdgpu_in_reset(adev))
304 if (adev->in_suspend && !adev->in_runpm)
307 if (strncmp("low", buf, strlen("low")) == 0) {
308 level = AMD_DPM_FORCED_LEVEL_LOW;
309 } else if (strncmp("high", buf, strlen("high")) == 0) {
310 level = AMD_DPM_FORCED_LEVEL_HIGH;
311 } else if (strncmp("auto", buf, strlen("auto")) == 0) {
312 level = AMD_DPM_FORCED_LEVEL_AUTO;
313 } else if (strncmp("manual", buf, strlen("manual")) == 0) {
314 level = AMD_DPM_FORCED_LEVEL_MANUAL;
315 } else if (strncmp("profile_exit", buf, strlen("profile_exit")) == 0) {
316 level = AMD_DPM_FORCED_LEVEL_PROFILE_EXIT;
317 } else if (strncmp("profile_standard", buf, strlen("profile_standard")) == 0) {
318 level = AMD_DPM_FORCED_LEVEL_PROFILE_STANDARD;
319 } else if (strncmp("profile_min_sclk", buf, strlen("profile_min_sclk")) == 0) {
320 level = AMD_DPM_FORCED_LEVEL_PROFILE_MIN_SCLK;
321 } else if (strncmp("profile_min_mclk", buf, strlen("profile_min_mclk")) == 0) {
322 level = AMD_DPM_FORCED_LEVEL_PROFILE_MIN_MCLK;
323 } else if (strncmp("profile_peak", buf, strlen("profile_peak")) == 0) {
324 level = AMD_DPM_FORCED_LEVEL_PROFILE_PEAK;
325 } else if (strncmp("perf_determinism", buf, strlen("perf_determinism")) == 0) {
326 level = AMD_DPM_FORCED_LEVEL_PERF_DETERMINISM;
331 ret = pm_runtime_resume_and_get(ddev->dev);
335 mutex_lock(&adev->pm.stable_pstate_ctx_lock);
336 if (amdgpu_dpm_force_performance_level(adev, level)) {
337 pm_runtime_mark_last_busy(ddev->dev);
338 pm_runtime_put_autosuspend(ddev->dev);
339 mutex_unlock(&adev->pm.stable_pstate_ctx_lock);
342 /* override whatever a user ctx may have set */
343 adev->pm.stable_pstate_ctx = NULL;
344 mutex_unlock(&adev->pm.stable_pstate_ctx_lock);
346 pm_runtime_mark_last_busy(ddev->dev);
347 pm_runtime_put_autosuspend(ddev->dev);
352 static ssize_t amdgpu_get_pp_num_states(struct device *dev,
353 struct device_attribute *attr,
356 struct drm_device *ddev = dev_get_drvdata(dev);
357 struct amdgpu_device *adev = drm_to_adev(ddev);
358 struct pp_states_info data;
362 if (amdgpu_in_reset(adev))
364 if (adev->in_suspend && !adev->in_runpm)
367 ret = pm_runtime_get_if_active(ddev->dev);
369 return ret ?: -EPERM;
371 if (amdgpu_dpm_get_pp_num_states(adev, &data))
372 memset(&data, 0, sizeof(data));
374 pm_runtime_put_autosuspend(ddev->dev);
376 buf_len = sysfs_emit(buf, "states: %d\n", data.nums);
377 for (i = 0; i < data.nums; i++)
378 buf_len += sysfs_emit_at(buf, buf_len, "%d %s\n", i,
379 (data.states[i] == POWER_STATE_TYPE_INTERNAL_BOOT) ? "boot" :
380 (data.states[i] == POWER_STATE_TYPE_BATTERY) ? "battery" :
381 (data.states[i] == POWER_STATE_TYPE_BALANCED) ? "balanced" :
382 (data.states[i] == POWER_STATE_TYPE_PERFORMANCE) ? "performance" : "default");
387 static ssize_t amdgpu_get_pp_cur_state(struct device *dev,
388 struct device_attribute *attr,
391 struct drm_device *ddev = dev_get_drvdata(dev);
392 struct amdgpu_device *adev = drm_to_adev(ddev);
393 struct pp_states_info data = {0};
394 enum amd_pm_state_type pm = 0;
397 if (amdgpu_in_reset(adev))
399 if (adev->in_suspend && !adev->in_runpm)
402 ret = pm_runtime_get_if_active(ddev->dev);
404 return ret ?: -EPERM;
406 amdgpu_dpm_get_current_power_state(adev, &pm);
408 ret = amdgpu_dpm_get_pp_num_states(adev, &data);
410 pm_runtime_put_autosuspend(ddev->dev);
415 for (i = 0; i < data.nums; i++) {
416 if (pm == data.states[i])
423 return sysfs_emit(buf, "%d\n", i);
426 static ssize_t amdgpu_get_pp_force_state(struct device *dev,
427 struct device_attribute *attr,
430 struct drm_device *ddev = dev_get_drvdata(dev);
431 struct amdgpu_device *adev = drm_to_adev(ddev);
433 if (amdgpu_in_reset(adev))
435 if (adev->in_suspend && !adev->in_runpm)
438 if (adev->pm.pp_force_state_enabled)
439 return amdgpu_get_pp_cur_state(dev, attr, buf);
441 return sysfs_emit(buf, "\n");
444 static ssize_t amdgpu_set_pp_force_state(struct device *dev,
445 struct device_attribute *attr,
449 struct drm_device *ddev = dev_get_drvdata(dev);
450 struct amdgpu_device *adev = drm_to_adev(ddev);
451 enum amd_pm_state_type state = 0;
452 struct pp_states_info data;
456 if (amdgpu_in_reset(adev))
458 if (adev->in_suspend && !adev->in_runpm)
461 adev->pm.pp_force_state_enabled = false;
463 if (strlen(buf) == 1)
466 ret = kstrtoul(buf, 0, &idx);
467 if (ret || idx >= ARRAY_SIZE(data.states))
470 idx = array_index_nospec(idx, ARRAY_SIZE(data.states));
472 ret = pm_runtime_resume_and_get(ddev->dev);
476 ret = amdgpu_dpm_get_pp_num_states(adev, &data);
480 state = data.states[idx];
482 /* only set user selected power states */
483 if (state != POWER_STATE_TYPE_INTERNAL_BOOT &&
484 state != POWER_STATE_TYPE_DEFAULT) {
485 ret = amdgpu_dpm_dispatch_task(adev,
486 AMD_PP_TASK_ENABLE_USER_STATE, &state);
490 adev->pm.pp_force_state_enabled = true;
493 pm_runtime_mark_last_busy(ddev->dev);
494 pm_runtime_put_autosuspend(ddev->dev);
499 pm_runtime_mark_last_busy(ddev->dev);
500 pm_runtime_put_autosuspend(ddev->dev);
507 * The amdgpu driver provides a sysfs API for uploading new powerplay
508 * tables. The file pp_table is used for this. Reading the file
509 * will dump the current power play table. Writing to the file
510 * will attempt to upload a new powerplay table and re-initialize
511 * powerplay using that new table.
515 static ssize_t amdgpu_get_pp_table(struct device *dev,
516 struct device_attribute *attr,
519 struct drm_device *ddev = dev_get_drvdata(dev);
520 struct amdgpu_device *adev = drm_to_adev(ddev);
524 if (amdgpu_in_reset(adev))
526 if (adev->in_suspend && !adev->in_runpm)
529 ret = pm_runtime_get_if_active(ddev->dev);
531 return ret ?: -EPERM;
533 size = amdgpu_dpm_get_pp_table(adev, &table);
535 pm_runtime_put_autosuspend(ddev->dev);
540 if (size >= PAGE_SIZE)
541 size = PAGE_SIZE - 1;
543 memcpy(buf, table, size);
548 static ssize_t amdgpu_set_pp_table(struct device *dev,
549 struct device_attribute *attr,
553 struct drm_device *ddev = dev_get_drvdata(dev);
554 struct amdgpu_device *adev = drm_to_adev(ddev);
557 if (amdgpu_in_reset(adev))
559 if (adev->in_suspend && !adev->in_runpm)
562 ret = pm_runtime_resume_and_get(ddev->dev);
566 ret = amdgpu_dpm_set_pp_table(adev, buf, count);
568 pm_runtime_mark_last_busy(ddev->dev);
569 pm_runtime_put_autosuspend(ddev->dev);
578 * DOC: pp_od_clk_voltage
580 * The amdgpu driver provides a sysfs API for adjusting the clocks and voltages
581 * in each power level within a power state. The pp_od_clk_voltage is used for
584 * Note that the actual memory controller clock rate are exposed, not
585 * the effective memory clock of the DRAMs. To translate it, use the
588 * Clock conversion (Mhz):
590 * HBM: effective_memory_clock = memory_controller_clock * 1
592 * G5: effective_memory_clock = memory_controller_clock * 1
594 * G6: effective_memory_clock = memory_controller_clock * 2
596 * DRAM data rate (MT/s):
598 * HBM: effective_memory_clock * 2 = data_rate
600 * G5: effective_memory_clock * 4 = data_rate
602 * G6: effective_memory_clock * 8 = data_rate
606 * data_rate * vram_bit_width / 8 = memory_bandwidth
612 * memory_controller_clock = 1750 Mhz
614 * effective_memory_clock = 1750 Mhz * 1 = 1750 Mhz
616 * data rate = 1750 * 4 = 7000 MT/s
618 * memory_bandwidth = 7000 * 128 bits / 8 = 112000 MB/s
622 * memory_controller_clock = 875 Mhz
624 * effective_memory_clock = 875 Mhz * 2 = 1750 Mhz
626 * data rate = 1750 * 8 = 14000 MT/s
628 * memory_bandwidth = 14000 * 256 bits / 8 = 448000 MB/s
630 * < For Vega10 and previous ASICs >
632 * Reading the file will display:
634 * - a list of engine clock levels and voltages labeled OD_SCLK
636 * - a list of memory clock levels and voltages labeled OD_MCLK
638 * - a list of valid ranges for sclk, mclk, and voltage labeled OD_RANGE
640 * To manually adjust these settings, first select manual using
641 * power_dpm_force_performance_level. Enter a new value for each
642 * level by writing a string that contains "s/m level clock voltage" to
643 * the file. E.g., "s 1 500 820" will update sclk level 1 to be 500 MHz
644 * at 820 mV; "m 0 350 810" will update mclk level 0 to be 350 MHz at
645 * 810 mV. When you have edited all of the states as needed, write
646 * "c" (commit) to the file to commit your changes. If you want to reset to the
647 * default power levels, write "r" (reset) to the file to reset them.
650 * < For Vega20 and newer ASICs >
652 * Reading the file will display:
654 * - minimum and maximum engine clock labeled OD_SCLK
656 * - minimum(not available for Vega20 and Navi1x) and maximum memory
657 * clock labeled OD_MCLK
659 * - three <frequency, voltage> points labeled OD_VDDC_CURVE.
660 * They can be used to calibrate the sclk voltage curve. This is
661 * available for Vega20 and NV1X.
663 * - voltage offset(in mV) applied on target voltage calculation.
664 * This is available for Sienna Cichlid, Navy Flounder, Dimgrey
665 * Cavefish and some later SMU13 ASICs. For these ASICs, the target
666 * voltage calculation can be illustrated by "voltage = voltage
667 * calculated from v/f curve + overdrive vddgfx offset"
669 * - a list of valid ranges for sclk, mclk, voltage curve points
670 * or voltage offset labeled OD_RANGE
674 * Reading the file will display:
676 * - minimum and maximum engine clock labeled OD_SCLK
678 * - a list of valid ranges for sclk labeled OD_RANGE
682 * Reading the file will display:
684 * - minimum and maximum engine clock labeled OD_SCLK
685 * - minimum and maximum core clocks labeled OD_CCLK
687 * - a list of valid ranges for sclk and cclk labeled OD_RANGE
689 * To manually adjust these settings:
691 * - First select manual using power_dpm_force_performance_level
693 * - For clock frequency setting, enter a new value by writing a
694 * string that contains "s/m index clock" to the file. The index
695 * should be 0 if to set minimum clock. And 1 if to set maximum
696 * clock. E.g., "s 0 500" will update minimum sclk to be 500 MHz.
697 * "m 1 800" will update maximum mclk to be 800Mhz. For core
698 * clocks on VanGogh, the string contains "p core index clock".
699 * E.g., "p 2 0 800" would set the minimum core clock on core
702 * For sclk voltage curve supported by Vega20 and NV1X, enter the new
703 * values by writing a string that contains "vc point clock voltage"
704 * to the file. The points are indexed by 0, 1 and 2. E.g., "vc 0 300
705 * 600" will update point1 with clock set as 300Mhz and voltage as 600mV.
706 * "vc 2 1000 1000" will update point3 with clock set as 1000Mhz and
709 * For voltage offset supported by Sienna Cichlid, Navy Flounder, Dimgrey
710 * Cavefish and some later SMU13 ASICs, enter the new value by writing a
711 * string that contains "vo offset". E.g., "vo -10" will update the extra
712 * voltage offset applied to the whole v/f curve line as -10mv.
714 * - When you have edited all of the states as needed, write "c" (commit)
715 * to the file to commit your changes
717 * - If you want to reset to the default power levels, write "r" (reset)
718 * to the file to reset them
722 static ssize_t amdgpu_set_pp_od_clk_voltage(struct device *dev,
723 struct device_attribute *attr,
727 struct drm_device *ddev = dev_get_drvdata(dev);
728 struct amdgpu_device *adev = drm_to_adev(ddev);
730 uint32_t parameter_size = 0;
735 const char delimiter[3] = {' ', '\n', '\0'};
738 if (amdgpu_in_reset(adev))
740 if (adev->in_suspend && !adev->in_runpm)
743 if (count > 127 || count == 0)
747 type = PP_OD_EDIT_SCLK_VDDC_TABLE;
748 else if (*buf == 'p')
749 type = PP_OD_EDIT_CCLK_VDDC_TABLE;
750 else if (*buf == 'm')
751 type = PP_OD_EDIT_MCLK_VDDC_TABLE;
752 else if (*buf == 'r')
753 type = PP_OD_RESTORE_DEFAULT_TABLE;
754 else if (*buf == 'c')
755 type = PP_OD_COMMIT_DPM_TABLE;
756 else if (!strncmp(buf, "vc", 2))
757 type = PP_OD_EDIT_VDDC_CURVE;
758 else if (!strncmp(buf, "vo", 2))
759 type = PP_OD_EDIT_VDDGFX_OFFSET;
763 memcpy(buf_cpy, buf, count);
768 if ((type == PP_OD_EDIT_VDDC_CURVE) ||
769 (type == PP_OD_EDIT_VDDGFX_OFFSET))
771 while (isspace(*++tmp_str));
773 while ((sub_str = strsep(&tmp_str, delimiter)) != NULL) {
774 if (strlen(sub_str) == 0)
776 ret = kstrtol(sub_str, 0, ¶meter[parameter_size]);
784 while (isspace(*tmp_str))
788 ret = pm_runtime_resume_and_get(ddev->dev);
792 if (amdgpu_dpm_set_fine_grain_clk_vol(adev,
798 if (amdgpu_dpm_odn_edit_dpm_table(adev, type,
799 parameter, parameter_size))
802 if (type == PP_OD_COMMIT_DPM_TABLE) {
803 if (amdgpu_dpm_dispatch_task(adev,
804 AMD_PP_TASK_READJUST_POWER_STATE,
809 pm_runtime_mark_last_busy(ddev->dev);
810 pm_runtime_put_autosuspend(ddev->dev);
815 pm_runtime_mark_last_busy(ddev->dev);
816 pm_runtime_put_autosuspend(ddev->dev);
820 static ssize_t amdgpu_get_pp_od_clk_voltage(struct device *dev,
821 struct device_attribute *attr,
824 struct drm_device *ddev = dev_get_drvdata(dev);
825 struct amdgpu_device *adev = drm_to_adev(ddev);
828 enum pp_clock_type od_clocks[6] = {
838 if (amdgpu_in_reset(adev))
840 if (adev->in_suspend && !adev->in_runpm)
843 ret = pm_runtime_get_if_active(ddev->dev);
845 return ret ?: -EPERM;
847 for (clk_index = 0 ; clk_index < 6 ; clk_index++) {
848 ret = amdgpu_dpm_emit_clock_levels(adev, od_clocks[clk_index], buf, &size);
852 if (ret == -ENOENT) {
853 size = amdgpu_dpm_print_clock_levels(adev, OD_SCLK, buf);
854 size += amdgpu_dpm_print_clock_levels(adev, OD_MCLK, buf + size);
855 size += amdgpu_dpm_print_clock_levels(adev, OD_VDDC_CURVE, buf + size);
856 size += amdgpu_dpm_print_clock_levels(adev, OD_VDDGFX_OFFSET, buf + size);
857 size += amdgpu_dpm_print_clock_levels(adev, OD_RANGE, buf + size);
858 size += amdgpu_dpm_print_clock_levels(adev, OD_CCLK, buf + size);
862 size = sysfs_emit(buf, "\n");
864 pm_runtime_put_autosuspend(ddev->dev);
872 * The amdgpu driver provides a sysfs API for adjusting what powerplay
873 * features to be enabled. The file pp_features is used for this. And
874 * this is only available for Vega10 and later dGPUs.
876 * Reading back the file will show you the followings:
877 * - Current ppfeature masks
878 * - List of the all supported powerplay features with their naming,
879 * bitmasks and enablement status('Y'/'N' means "enabled"/"disabled").
881 * To manually enable or disable a specific feature, just set or clear
882 * the corresponding bit from original ppfeature masks and input the
883 * new ppfeature masks.
885 static ssize_t amdgpu_set_pp_features(struct device *dev,
886 struct device_attribute *attr,
890 struct drm_device *ddev = dev_get_drvdata(dev);
891 struct amdgpu_device *adev = drm_to_adev(ddev);
892 uint64_t featuremask;
895 if (amdgpu_in_reset(adev))
897 if (adev->in_suspend && !adev->in_runpm)
900 ret = kstrtou64(buf, 0, &featuremask);
904 ret = pm_runtime_resume_and_get(ddev->dev);
908 ret = amdgpu_dpm_set_ppfeature_status(adev, featuremask);
910 pm_runtime_mark_last_busy(ddev->dev);
911 pm_runtime_put_autosuspend(ddev->dev);
919 static ssize_t amdgpu_get_pp_features(struct device *dev,
920 struct device_attribute *attr,
923 struct drm_device *ddev = dev_get_drvdata(dev);
924 struct amdgpu_device *adev = drm_to_adev(ddev);
928 if (amdgpu_in_reset(adev))
930 if (adev->in_suspend && !adev->in_runpm)
933 ret = pm_runtime_get_if_active(ddev->dev);
935 return ret ?: -EPERM;
937 size = amdgpu_dpm_get_ppfeature_status(adev, buf);
939 size = sysfs_emit(buf, "\n");
941 pm_runtime_put_autosuspend(ddev->dev);
947 * DOC: pp_dpm_sclk pp_dpm_mclk pp_dpm_socclk pp_dpm_fclk pp_dpm_dcefclk pp_dpm_pcie
949 * The amdgpu driver provides a sysfs API for adjusting what power levels
950 * are enabled for a given power state. The files pp_dpm_sclk, pp_dpm_mclk,
951 * pp_dpm_socclk, pp_dpm_fclk, pp_dpm_dcefclk and pp_dpm_pcie are used for
954 * pp_dpm_socclk and pp_dpm_dcefclk interfaces are only available for
955 * Vega10 and later ASICs.
956 * pp_dpm_fclk interface is only available for Vega20 and later ASICs.
958 * Reading back the files will show you the available power levels within
959 * the power state and the clock information for those levels. If deep sleep is
960 * applied to a clock, the level will be denoted by a special level 'S:'
970 * To manually adjust these states, first select manual using
971 * power_dpm_force_performance_level.
972 * Secondly, enter a new value for each level by inputing a string that
973 * contains " echo xx xx xx > pp_dpm_sclk/mclk/pcie"
976 * .. code-block:: bash
978 * echo "4 5 6" > pp_dpm_sclk
980 * will enable sclk levels 4, 5, and 6.
982 * NOTE: change to the dcefclk max dpm level is not supported now
985 static ssize_t amdgpu_get_pp_dpm_clock(struct device *dev,
986 enum pp_clock_type type,
989 struct drm_device *ddev = dev_get_drvdata(dev);
990 struct amdgpu_device *adev = drm_to_adev(ddev);
994 if (amdgpu_in_reset(adev))
996 if (adev->in_suspend && !adev->in_runpm)
999 ret = pm_runtime_get_if_active(ddev->dev);
1001 return ret ?: -EPERM;
1003 ret = amdgpu_dpm_emit_clock_levels(adev, type, buf, &size);
1005 size = amdgpu_dpm_print_clock_levels(adev, type, buf);
1008 size = sysfs_emit(buf, "\n");
1010 pm_runtime_put_autosuspend(ddev->dev);
1016 * Worst case: 32 bits individually specified, in octal at 12 characters
1017 * per line (+1 for \n).
1019 #define AMDGPU_MASK_BUF_MAX (32 * 13)
1021 static ssize_t amdgpu_read_mask(const char *buf, size_t count, uint32_t *mask)
1024 unsigned long level;
1025 char *sub_str = NULL;
1027 char buf_cpy[AMDGPU_MASK_BUF_MAX + 1];
1028 const char delimiter[3] = {' ', '\n', '\0'};
1033 bytes = min(count, sizeof(buf_cpy) - 1);
1034 memcpy(buf_cpy, buf, bytes);
1035 buf_cpy[bytes] = '\0';
1037 while ((sub_str = strsep(&tmp, delimiter)) != NULL) {
1038 if (strlen(sub_str)) {
1039 ret = kstrtoul(sub_str, 0, &level);
1040 if (ret || level > 31)
1042 *mask |= 1 << level;
1050 static ssize_t amdgpu_set_pp_dpm_clock(struct device *dev,
1051 enum pp_clock_type type,
1055 struct drm_device *ddev = dev_get_drvdata(dev);
1056 struct amdgpu_device *adev = drm_to_adev(ddev);
1060 if (amdgpu_in_reset(adev))
1062 if (adev->in_suspend && !adev->in_runpm)
1065 ret = amdgpu_read_mask(buf, count, &mask);
1069 ret = pm_runtime_resume_and_get(ddev->dev);
1073 ret = amdgpu_dpm_force_clock_level(adev, type, mask);
1075 pm_runtime_mark_last_busy(ddev->dev);
1076 pm_runtime_put_autosuspend(ddev->dev);
1084 static ssize_t amdgpu_get_pp_dpm_sclk(struct device *dev,
1085 struct device_attribute *attr,
1088 return amdgpu_get_pp_dpm_clock(dev, PP_SCLK, buf);
1091 static ssize_t amdgpu_set_pp_dpm_sclk(struct device *dev,
1092 struct device_attribute *attr,
1096 return amdgpu_set_pp_dpm_clock(dev, PP_SCLK, buf, count);
1099 static ssize_t amdgpu_get_pp_dpm_mclk(struct device *dev,
1100 struct device_attribute *attr,
1103 return amdgpu_get_pp_dpm_clock(dev, PP_MCLK, buf);
1106 static ssize_t amdgpu_set_pp_dpm_mclk(struct device *dev,
1107 struct device_attribute *attr,
1111 return amdgpu_set_pp_dpm_clock(dev, PP_MCLK, buf, count);
1114 static ssize_t amdgpu_get_pp_dpm_socclk(struct device *dev,
1115 struct device_attribute *attr,
1118 return amdgpu_get_pp_dpm_clock(dev, PP_SOCCLK, buf);
1121 static ssize_t amdgpu_set_pp_dpm_socclk(struct device *dev,
1122 struct device_attribute *attr,
1126 return amdgpu_set_pp_dpm_clock(dev, PP_SOCCLK, buf, count);
1129 static ssize_t amdgpu_get_pp_dpm_fclk(struct device *dev,
1130 struct device_attribute *attr,
1133 return amdgpu_get_pp_dpm_clock(dev, PP_FCLK, buf);
1136 static ssize_t amdgpu_set_pp_dpm_fclk(struct device *dev,
1137 struct device_attribute *attr,
1141 return amdgpu_set_pp_dpm_clock(dev, PP_FCLK, buf, count);
1144 static ssize_t amdgpu_get_pp_dpm_vclk(struct device *dev,
1145 struct device_attribute *attr,
1148 return amdgpu_get_pp_dpm_clock(dev, PP_VCLK, buf);
1151 static ssize_t amdgpu_set_pp_dpm_vclk(struct device *dev,
1152 struct device_attribute *attr,
1156 return amdgpu_set_pp_dpm_clock(dev, PP_VCLK, buf, count);
1159 static ssize_t amdgpu_get_pp_dpm_vclk1(struct device *dev,
1160 struct device_attribute *attr,
1163 return amdgpu_get_pp_dpm_clock(dev, PP_VCLK1, buf);
1166 static ssize_t amdgpu_set_pp_dpm_vclk1(struct device *dev,
1167 struct device_attribute *attr,
1171 return amdgpu_set_pp_dpm_clock(dev, PP_VCLK1, buf, count);
1174 static ssize_t amdgpu_get_pp_dpm_dclk(struct device *dev,
1175 struct device_attribute *attr,
1178 return amdgpu_get_pp_dpm_clock(dev, PP_DCLK, buf);
1181 static ssize_t amdgpu_set_pp_dpm_dclk(struct device *dev,
1182 struct device_attribute *attr,
1186 return amdgpu_set_pp_dpm_clock(dev, PP_DCLK, buf, count);
1189 static ssize_t amdgpu_get_pp_dpm_dclk1(struct device *dev,
1190 struct device_attribute *attr,
1193 return amdgpu_get_pp_dpm_clock(dev, PP_DCLK1, buf);
1196 static ssize_t amdgpu_set_pp_dpm_dclk1(struct device *dev,
1197 struct device_attribute *attr,
1201 return amdgpu_set_pp_dpm_clock(dev, PP_DCLK1, buf, count);
1204 static ssize_t amdgpu_get_pp_dpm_dcefclk(struct device *dev,
1205 struct device_attribute *attr,
1208 return amdgpu_get_pp_dpm_clock(dev, PP_DCEFCLK, buf);
1211 static ssize_t amdgpu_set_pp_dpm_dcefclk(struct device *dev,
1212 struct device_attribute *attr,
1216 return amdgpu_set_pp_dpm_clock(dev, PP_DCEFCLK, buf, count);
1219 static ssize_t amdgpu_get_pp_dpm_pcie(struct device *dev,
1220 struct device_attribute *attr,
1223 return amdgpu_get_pp_dpm_clock(dev, PP_PCIE, buf);
1226 static ssize_t amdgpu_set_pp_dpm_pcie(struct device *dev,
1227 struct device_attribute *attr,
1231 return amdgpu_set_pp_dpm_clock(dev, PP_PCIE, buf, count);
1234 static ssize_t amdgpu_get_pp_sclk_od(struct device *dev,
1235 struct device_attribute *attr,
1238 struct drm_device *ddev = dev_get_drvdata(dev);
1239 struct amdgpu_device *adev = drm_to_adev(ddev);
1243 if (amdgpu_in_reset(adev))
1245 if (adev->in_suspend && !adev->in_runpm)
1248 ret = pm_runtime_get_if_active(ddev->dev);
1250 return ret ?: -EPERM;
1252 value = amdgpu_dpm_get_sclk_od(adev);
1254 pm_runtime_put_autosuspend(ddev->dev);
1256 return sysfs_emit(buf, "%d\n", value);
1259 static ssize_t amdgpu_set_pp_sclk_od(struct device *dev,
1260 struct device_attribute *attr,
1264 struct drm_device *ddev = dev_get_drvdata(dev);
1265 struct amdgpu_device *adev = drm_to_adev(ddev);
1269 if (amdgpu_in_reset(adev))
1271 if (adev->in_suspend && !adev->in_runpm)
1274 ret = kstrtol(buf, 0, &value);
1279 ret = pm_runtime_resume_and_get(ddev->dev);
1283 amdgpu_dpm_set_sclk_od(adev, (uint32_t)value);
1285 pm_runtime_mark_last_busy(ddev->dev);
1286 pm_runtime_put_autosuspend(ddev->dev);
1291 static ssize_t amdgpu_get_pp_mclk_od(struct device *dev,
1292 struct device_attribute *attr,
1295 struct drm_device *ddev = dev_get_drvdata(dev);
1296 struct amdgpu_device *adev = drm_to_adev(ddev);
1300 if (amdgpu_in_reset(adev))
1302 if (adev->in_suspend && !adev->in_runpm)
1305 ret = pm_runtime_get_if_active(ddev->dev);
1307 return ret ?: -EPERM;
1309 value = amdgpu_dpm_get_mclk_od(adev);
1311 pm_runtime_put_autosuspend(ddev->dev);
1313 return sysfs_emit(buf, "%d\n", value);
1316 static ssize_t amdgpu_set_pp_mclk_od(struct device *dev,
1317 struct device_attribute *attr,
1321 struct drm_device *ddev = dev_get_drvdata(dev);
1322 struct amdgpu_device *adev = drm_to_adev(ddev);
1326 if (amdgpu_in_reset(adev))
1328 if (adev->in_suspend && !adev->in_runpm)
1331 ret = kstrtol(buf, 0, &value);
1336 ret = pm_runtime_resume_and_get(ddev->dev);
1340 amdgpu_dpm_set_mclk_od(adev, (uint32_t)value);
1342 pm_runtime_mark_last_busy(ddev->dev);
1343 pm_runtime_put_autosuspend(ddev->dev);
1349 * DOC: pp_power_profile_mode
1351 * The amdgpu driver provides a sysfs API for adjusting the heuristics
1352 * related to switching between power levels in a power state. The file
1353 * pp_power_profile_mode is used for this.
1355 * Reading this file outputs a list of all of the predefined power profiles
1356 * and the relevant heuristics settings for that profile.
1358 * To select a profile or create a custom profile, first select manual using
1359 * power_dpm_force_performance_level. Writing the number of a predefined
1360 * profile to pp_power_profile_mode will enable those heuristics. To
1361 * create a custom set of heuristics, write a string of numbers to the file
1362 * starting with the number of the custom profile along with a setting
1363 * for each heuristic parameter. Due to differences across asic families
1364 * the heuristic parameters vary from family to family.
1368 static ssize_t amdgpu_get_pp_power_profile_mode(struct device *dev,
1369 struct device_attribute *attr,
1372 struct drm_device *ddev = dev_get_drvdata(dev);
1373 struct amdgpu_device *adev = drm_to_adev(ddev);
1377 if (amdgpu_in_reset(adev))
1379 if (adev->in_suspend && !adev->in_runpm)
1382 ret = pm_runtime_get_if_active(ddev->dev);
1384 return ret ?: -EPERM;
1386 size = amdgpu_dpm_get_power_profile_mode(adev, buf);
1388 size = sysfs_emit(buf, "\n");
1390 pm_runtime_put_autosuspend(ddev->dev);
1396 static ssize_t amdgpu_set_pp_power_profile_mode(struct device *dev,
1397 struct device_attribute *attr,
1402 struct drm_device *ddev = dev_get_drvdata(dev);
1403 struct amdgpu_device *adev = drm_to_adev(ddev);
1404 uint32_t parameter_size = 0;
1406 char *sub_str, buf_cpy[128];
1410 long int profile_mode = 0;
1411 const char delimiter[3] = {' ', '\n', '\0'};
1413 if (amdgpu_in_reset(adev))
1415 if (adev->in_suspend && !adev->in_runpm)
1420 ret = kstrtol(tmp, 0, &profile_mode);
1424 if (profile_mode == PP_SMC_POWER_PROFILE_CUSTOM) {
1425 if (count < 2 || count > 127)
1427 while (isspace(*++buf))
1429 memcpy(buf_cpy, buf, count-i);
1431 while ((sub_str = strsep(&tmp_str, delimiter)) != NULL) {
1432 if (strlen(sub_str) == 0)
1434 ret = kstrtol(sub_str, 0, ¶meter[parameter_size]);
1438 while (isspace(*tmp_str))
1442 parameter[parameter_size] = profile_mode;
1444 ret = pm_runtime_resume_and_get(ddev->dev);
1448 ret = amdgpu_dpm_set_power_profile_mode(adev, parameter, parameter_size);
1450 pm_runtime_mark_last_busy(ddev->dev);
1451 pm_runtime_put_autosuspend(ddev->dev);
1459 static int amdgpu_hwmon_get_sensor_generic(struct amdgpu_device *adev,
1460 enum amd_pp_sensors sensor,
1463 int r, size = sizeof(uint32_t);
1465 if (amdgpu_in_reset(adev))
1467 if (adev->in_suspend && !adev->in_runpm)
1470 r = pm_runtime_get_if_active(adev->dev);
1474 /* get the sensor value */
1475 r = amdgpu_dpm_read_sensor(adev, sensor, query, &size);
1477 pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
1483 * DOC: gpu_busy_percent
1485 * The amdgpu driver provides a sysfs API for reading how busy the GPU
1486 * is as a percentage. The file gpu_busy_percent is used for this.
1487 * The SMU firmware computes a percentage of load based on the
1488 * aggregate activity level in the IP cores.
1490 static ssize_t amdgpu_get_gpu_busy_percent(struct device *dev,
1491 struct device_attribute *attr,
1494 struct drm_device *ddev = dev_get_drvdata(dev);
1495 struct amdgpu_device *adev = drm_to_adev(ddev);
1499 r = amdgpu_hwmon_get_sensor_generic(adev, AMDGPU_PP_SENSOR_GPU_LOAD, &value);
1503 return sysfs_emit(buf, "%d\n", value);
1507 * DOC: mem_busy_percent
1509 * The amdgpu driver provides a sysfs API for reading how busy the VRAM
1510 * is as a percentage. The file mem_busy_percent is used for this.
1511 * The SMU firmware computes a percentage of load based on the
1512 * aggregate activity level in the IP cores.
1514 static ssize_t amdgpu_get_mem_busy_percent(struct device *dev,
1515 struct device_attribute *attr,
1518 struct drm_device *ddev = dev_get_drvdata(dev);
1519 struct amdgpu_device *adev = drm_to_adev(ddev);
1523 r = amdgpu_hwmon_get_sensor_generic(adev, AMDGPU_PP_SENSOR_MEM_LOAD, &value);
1527 return sysfs_emit(buf, "%d\n", value);
1531 * DOC: vcn_busy_percent
1533 * The amdgpu driver provides a sysfs API for reading how busy the VCN
1534 * is as a percentage. The file vcn_busy_percent is used for this.
1535 * The SMU firmware computes a percentage of load based on the
1536 * aggregate activity level in the IP cores.
1538 static ssize_t amdgpu_get_vcn_busy_percent(struct device *dev,
1539 struct device_attribute *attr,
1542 struct drm_device *ddev = dev_get_drvdata(dev);
1543 struct amdgpu_device *adev = drm_to_adev(ddev);
1547 r = amdgpu_hwmon_get_sensor_generic(adev, AMDGPU_PP_SENSOR_VCN_LOAD, &value);
1551 return sysfs_emit(buf, "%d\n", value);
1557 * The amdgpu driver provides a sysfs API for estimating how much data
1558 * has been received and sent by the GPU in the last second through PCIe.
1559 * The file pcie_bw is used for this.
1560 * The Perf counters count the number of received and sent messages and return
1561 * those values, as well as the maximum payload size of a PCIe packet (mps).
1562 * Note that it is not possible to easily and quickly obtain the size of each
1563 * packet transmitted, so we output the max payload size (mps) to allow for
1564 * quick estimation of the PCIe bandwidth usage
1566 static ssize_t amdgpu_get_pcie_bw(struct device *dev,
1567 struct device_attribute *attr,
1570 struct drm_device *ddev = dev_get_drvdata(dev);
1571 struct amdgpu_device *adev = drm_to_adev(ddev);
1572 uint64_t count0 = 0, count1 = 0;
1575 if (amdgpu_in_reset(adev))
1577 if (adev->in_suspend && !adev->in_runpm)
1580 if (adev->flags & AMD_IS_APU)
1583 if (!adev->asic_funcs->get_pcie_usage)
1586 ret = pm_runtime_get_if_active(ddev->dev);
1588 return ret ?: -EPERM;
1590 amdgpu_asic_get_pcie_usage(adev, &count0, &count1);
1592 pm_runtime_put_autosuspend(ddev->dev);
1594 return sysfs_emit(buf, "%llu %llu %i\n",
1595 count0, count1, pcie_get_mps(adev->pdev));
1601 * The amdgpu driver provides a sysfs API for providing a unique ID for the GPU
1602 * The file unique_id is used for this.
1603 * This will provide a Unique ID that will persist from machine to machine
1605 * NOTE: This will only work for GFX9 and newer. This file will be absent
1606 * on unsupported ASICs (GFX8 and older)
1608 static ssize_t amdgpu_get_unique_id(struct device *dev,
1609 struct device_attribute *attr,
1612 struct drm_device *ddev = dev_get_drvdata(dev);
1613 struct amdgpu_device *adev = drm_to_adev(ddev);
1615 if (amdgpu_in_reset(adev))
1617 if (adev->in_suspend && !adev->in_runpm)
1620 if (adev->unique_id)
1621 return sysfs_emit(buf, "%016llx\n", adev->unique_id);
1627 * DOC: thermal_throttling_logging
1629 * Thermal throttling pulls down the clock frequency and thus the performance.
1630 * It's an useful mechanism to protect the chip from overheating. Since it
1631 * impacts performance, the user controls whether it is enabled and if so,
1632 * the log frequency.
1634 * Reading back the file shows you the status(enabled or disabled) and
1635 * the interval(in seconds) between each thermal logging.
1637 * Writing an integer to the file, sets a new logging interval, in seconds.
1638 * The value should be between 1 and 3600. If the value is less than 1,
1639 * thermal logging is disabled. Values greater than 3600 are ignored.
1641 static ssize_t amdgpu_get_thermal_throttling_logging(struct device *dev,
1642 struct device_attribute *attr,
1645 struct drm_device *ddev = dev_get_drvdata(dev);
1646 struct amdgpu_device *adev = drm_to_adev(ddev);
1648 return sysfs_emit(buf, "%s: thermal throttling logging %s, with interval %d seconds\n",
1649 adev_to_drm(adev)->unique,
1650 atomic_read(&adev->throttling_logging_enabled) ? "enabled" : "disabled",
1651 adev->throttling_logging_rs.interval / HZ + 1);
1654 static ssize_t amdgpu_set_thermal_throttling_logging(struct device *dev,
1655 struct device_attribute *attr,
1659 struct drm_device *ddev = dev_get_drvdata(dev);
1660 struct amdgpu_device *adev = drm_to_adev(ddev);
1661 long throttling_logging_interval;
1662 unsigned long flags;
1665 ret = kstrtol(buf, 0, &throttling_logging_interval);
1669 if (throttling_logging_interval > 3600)
1672 if (throttling_logging_interval > 0) {
1673 raw_spin_lock_irqsave(&adev->throttling_logging_rs.lock, flags);
1675 * Reset the ratelimit timer internals.
1676 * This can effectively restart the timer.
1678 adev->throttling_logging_rs.interval =
1679 (throttling_logging_interval - 1) * HZ;
1680 adev->throttling_logging_rs.begin = 0;
1681 adev->throttling_logging_rs.printed = 0;
1682 adev->throttling_logging_rs.missed = 0;
1683 raw_spin_unlock_irqrestore(&adev->throttling_logging_rs.lock, flags);
1685 atomic_set(&adev->throttling_logging_enabled, 1);
1687 atomic_set(&adev->throttling_logging_enabled, 0);
1694 * DOC: apu_thermal_cap
1696 * The amdgpu driver provides a sysfs API for retrieving/updating thermal
1697 * limit temperature in millidegrees Celsius
1699 * Reading back the file shows you core limit value
1701 * Writing an integer to the file, sets a new thermal limit. The value
1702 * should be between 0 and 100. If the value is less than 0 or greater
1703 * than 100, then the write request will be ignored.
1705 static ssize_t amdgpu_get_apu_thermal_cap(struct device *dev,
1706 struct device_attribute *attr,
1711 struct drm_device *ddev = dev_get_drvdata(dev);
1712 struct amdgpu_device *adev = drm_to_adev(ddev);
1714 ret = pm_runtime_get_if_active(ddev->dev);
1716 return ret ?: -EPERM;
1718 ret = amdgpu_dpm_get_apu_thermal_limit(adev, &limit);
1720 size = sysfs_emit(buf, "%u\n", limit);
1722 size = sysfs_emit(buf, "failed to get thermal limit\n");
1724 pm_runtime_put_autosuspend(ddev->dev);
1729 static ssize_t amdgpu_set_apu_thermal_cap(struct device *dev,
1730 struct device_attribute *attr,
1736 struct drm_device *ddev = dev_get_drvdata(dev);
1737 struct amdgpu_device *adev = drm_to_adev(ddev);
1739 ret = kstrtou32(buf, 10, &value);
1744 dev_err(dev, "Invalid argument !\n");
1748 ret = pm_runtime_resume_and_get(ddev->dev);
1752 ret = amdgpu_dpm_set_apu_thermal_limit(adev, value);
1754 pm_runtime_mark_last_busy(ddev->dev);
1755 pm_runtime_put_autosuspend(ddev->dev);
1756 dev_err(dev, "failed to update thermal limit\n");
1760 pm_runtime_mark_last_busy(ddev->dev);
1761 pm_runtime_put_autosuspend(ddev->dev);
1766 static int amdgpu_pm_metrics_attr_update(struct amdgpu_device *adev,
1767 struct amdgpu_device_attr *attr,
1769 enum amdgpu_device_attr_states *states)
1771 if (amdgpu_dpm_get_pm_metrics(adev, NULL, 0) == -EOPNOTSUPP)
1772 *states = ATTR_STATE_UNSUPPORTED;
1777 static ssize_t amdgpu_get_pm_metrics(struct device *dev,
1778 struct device_attribute *attr, char *buf)
1780 struct drm_device *ddev = dev_get_drvdata(dev);
1781 struct amdgpu_device *adev = drm_to_adev(ddev);
1785 if (amdgpu_in_reset(adev))
1787 if (adev->in_suspend && !adev->in_runpm)
1790 ret = pm_runtime_get_if_active(ddev->dev);
1792 return ret ?: -EPERM;
1794 size = amdgpu_dpm_get_pm_metrics(adev, buf, PAGE_SIZE);
1796 pm_runtime_put_autosuspend(ddev->dev);
1804 * The amdgpu driver provides a sysfs API for retrieving current gpu
1805 * metrics data. The file gpu_metrics is used for this. Reading the
1806 * file will dump all the current gpu metrics data.
1808 * These data include temperature, frequency, engines utilization,
1809 * power consume, throttler status, fan speed and cpu core statistics(
1810 * available for APU only). That's it will give a snapshot of all sensors
1813 static ssize_t amdgpu_get_gpu_metrics(struct device *dev,
1814 struct device_attribute *attr,
1817 struct drm_device *ddev = dev_get_drvdata(dev);
1818 struct amdgpu_device *adev = drm_to_adev(ddev);
1823 if (amdgpu_in_reset(adev))
1825 if (adev->in_suspend && !adev->in_runpm)
1828 ret = pm_runtime_get_if_active(ddev->dev);
1830 return ret ?: -EPERM;
1832 size = amdgpu_dpm_get_gpu_metrics(adev, &gpu_metrics);
1836 if (size >= PAGE_SIZE)
1837 size = PAGE_SIZE - 1;
1839 memcpy(buf, gpu_metrics, size);
1842 pm_runtime_put_autosuspend(ddev->dev);
1847 static int amdgpu_show_powershift_percent(struct device *dev,
1848 char *buf, enum amd_pp_sensors sensor)
1850 struct drm_device *ddev = dev_get_drvdata(dev);
1851 struct amdgpu_device *adev = drm_to_adev(ddev);
1855 r = amdgpu_hwmon_get_sensor_generic(adev, sensor, (void *)&ss_power);
1856 if (r == -EOPNOTSUPP) {
1857 /* sensor not available on dGPU, try to read from APU */
1859 mutex_lock(&mgpu_info.mutex);
1860 for (i = 0; i < mgpu_info.num_gpu; i++) {
1861 if (mgpu_info.gpu_ins[i].adev->flags & AMD_IS_APU) {
1862 adev = mgpu_info.gpu_ins[i].adev;
1866 mutex_unlock(&mgpu_info.mutex);
1868 r = amdgpu_hwmon_get_sensor_generic(adev, sensor, (void *)&ss_power);
1874 return sysfs_emit(buf, "%u%%\n", ss_power);
1878 * DOC: smartshift_apu_power
1880 * The amdgpu driver provides a sysfs API for reporting APU power
1881 * shift in percentage if platform supports smartshift. Value 0 means that
1882 * there is no powershift and values between [1-100] means that the power
1883 * is shifted to APU, the percentage of boost is with respect to APU power
1884 * limit on the platform.
1887 static ssize_t amdgpu_get_smartshift_apu_power(struct device *dev, struct device_attribute *attr,
1890 return amdgpu_show_powershift_percent(dev, buf, AMDGPU_PP_SENSOR_SS_APU_SHARE);
1894 * DOC: smartshift_dgpu_power
1896 * The amdgpu driver provides a sysfs API for reporting dGPU power
1897 * shift in percentage if platform supports smartshift. Value 0 means that
1898 * there is no powershift and values between [1-100] means that the power is
1899 * shifted to dGPU, the percentage of boost is with respect to dGPU power
1900 * limit on the platform.
1903 static ssize_t amdgpu_get_smartshift_dgpu_power(struct device *dev, struct device_attribute *attr,
1906 return amdgpu_show_powershift_percent(dev, buf, AMDGPU_PP_SENSOR_SS_DGPU_SHARE);
1910 * DOC: smartshift_bias
1912 * The amdgpu driver provides a sysfs API for reporting the
1913 * smartshift(SS2.0) bias level. The value ranges from -100 to 100
1914 * and the default is 0. -100 sets maximum preference to APU
1915 * and 100 sets max perference to dGPU.
1918 static ssize_t amdgpu_get_smartshift_bias(struct device *dev,
1919 struct device_attribute *attr,
1924 r = sysfs_emit(buf, "%d\n", amdgpu_smartshift_bias);
1929 static ssize_t amdgpu_set_smartshift_bias(struct device *dev,
1930 struct device_attribute *attr,
1931 const char *buf, size_t count)
1933 struct drm_device *ddev = dev_get_drvdata(dev);
1934 struct amdgpu_device *adev = drm_to_adev(ddev);
1938 if (amdgpu_in_reset(adev))
1940 if (adev->in_suspend && !adev->in_runpm)
1943 r = pm_runtime_resume_and_get(ddev->dev);
1947 r = kstrtoint(buf, 10, &bias);
1951 if (bias > AMDGPU_SMARTSHIFT_MAX_BIAS)
1952 bias = AMDGPU_SMARTSHIFT_MAX_BIAS;
1953 else if (bias < AMDGPU_SMARTSHIFT_MIN_BIAS)
1954 bias = AMDGPU_SMARTSHIFT_MIN_BIAS;
1956 amdgpu_smartshift_bias = bias;
1959 /* TODO: update bias level with SMU message */
1962 pm_runtime_mark_last_busy(ddev->dev);
1963 pm_runtime_put_autosuspend(ddev->dev);
1967 static int ss_power_attr_update(struct amdgpu_device *adev, struct amdgpu_device_attr *attr,
1968 uint32_t mask, enum amdgpu_device_attr_states *states)
1970 if (!amdgpu_device_supports_smart_shift(adev_to_drm(adev)))
1971 *states = ATTR_STATE_UNSUPPORTED;
1976 static int ss_bias_attr_update(struct amdgpu_device *adev, struct amdgpu_device_attr *attr,
1977 uint32_t mask, enum amdgpu_device_attr_states *states)
1981 if (!amdgpu_device_supports_smart_shift(adev_to_drm(adev)))
1982 *states = ATTR_STATE_UNSUPPORTED;
1983 else if (amdgpu_hwmon_get_sensor_generic(adev, AMDGPU_PP_SENSOR_SS_APU_SHARE,
1985 *states = ATTR_STATE_UNSUPPORTED;
1986 else if (amdgpu_hwmon_get_sensor_generic(adev, AMDGPU_PP_SENSOR_SS_DGPU_SHARE,
1988 *states = ATTR_STATE_UNSUPPORTED;
1993 static int pp_od_clk_voltage_attr_update(struct amdgpu_device *adev, struct amdgpu_device_attr *attr,
1994 uint32_t mask, enum amdgpu_device_attr_states *states)
1996 uint32_t gc_ver = amdgpu_ip_version(adev, GC_HWIP, 0);
1998 *states = ATTR_STATE_SUPPORTED;
2000 if (!amdgpu_dpm_is_overdrive_supported(adev)) {
2001 *states = ATTR_STATE_UNSUPPORTED;
2005 /* Enable pp_od_clk_voltage node for gc 9.4.3 SRIOV/BM support */
2006 if (gc_ver == IP_VERSION(9, 4, 3) ||
2007 gc_ver == IP_VERSION(9, 4, 4)) {
2008 if (amdgpu_sriov_vf(adev) && !amdgpu_sriov_is_pp_one_vf(adev))
2009 *states = ATTR_STATE_UNSUPPORTED;
2013 if (!(attr->flags & mask))
2014 *states = ATTR_STATE_UNSUPPORTED;
2019 static int pp_dpm_dcefclk_attr_update(struct amdgpu_device *adev, struct amdgpu_device_attr *attr,
2020 uint32_t mask, enum amdgpu_device_attr_states *states)
2022 struct device_attribute *dev_attr = &attr->dev_attr;
2025 *states = ATTR_STATE_SUPPORTED;
2027 if (!(attr->flags & mask)) {
2028 *states = ATTR_STATE_UNSUPPORTED;
2032 gc_ver = amdgpu_ip_version(adev, GC_HWIP, 0);
2033 /* dcefclk node is not available on gfx 11.0.3 sriov */
2034 if ((gc_ver == IP_VERSION(11, 0, 3) && amdgpu_sriov_is_pp_one_vf(adev)) ||
2035 gc_ver < IP_VERSION(9, 0, 0) ||
2036 !amdgpu_device_has_display_hardware(adev))
2037 *states = ATTR_STATE_UNSUPPORTED;
2039 /* SMU MP1 does not support dcefclk level setting,
2040 * setting should not be allowed from VF if not in one VF mode.
2042 if (gc_ver >= IP_VERSION(10, 0, 0) ||
2043 (amdgpu_sriov_vf(adev) && !amdgpu_sriov_is_pp_one_vf(adev))) {
2044 dev_attr->attr.mode &= ~S_IWUGO;
2045 dev_attr->store = NULL;
2051 static int pp_dpm_clk_default_attr_update(struct amdgpu_device *adev, struct amdgpu_device_attr *attr,
2052 uint32_t mask, enum amdgpu_device_attr_states *states)
2054 struct device_attribute *dev_attr = &attr->dev_attr;
2055 enum amdgpu_device_attr_id attr_id = attr->attr_id;
2056 uint32_t mp1_ver = amdgpu_ip_version(adev, MP1_HWIP, 0);
2057 uint32_t gc_ver = amdgpu_ip_version(adev, GC_HWIP, 0);
2059 *states = ATTR_STATE_SUPPORTED;
2061 if (!(attr->flags & mask)) {
2062 *states = ATTR_STATE_UNSUPPORTED;
2066 if (DEVICE_ATTR_IS(pp_dpm_socclk)) {
2067 if (gc_ver < IP_VERSION(9, 0, 0))
2068 *states = ATTR_STATE_UNSUPPORTED;
2069 } else if (DEVICE_ATTR_IS(pp_dpm_fclk)) {
2070 if (mp1_ver < IP_VERSION(10, 0, 0))
2071 *states = ATTR_STATE_UNSUPPORTED;
2072 } else if (DEVICE_ATTR_IS(pp_dpm_vclk)) {
2073 if (!(gc_ver == IP_VERSION(10, 3, 1) ||
2074 gc_ver == IP_VERSION(10, 3, 3) ||
2075 gc_ver == IP_VERSION(10, 3, 6) ||
2076 gc_ver == IP_VERSION(10, 3, 7) ||
2077 gc_ver == IP_VERSION(10, 3, 0) ||
2078 gc_ver == IP_VERSION(10, 1, 2) ||
2079 gc_ver == IP_VERSION(11, 0, 0) ||
2080 gc_ver == IP_VERSION(11, 0, 1) ||
2081 gc_ver == IP_VERSION(11, 0, 4) ||
2082 gc_ver == IP_VERSION(11, 5, 0) ||
2083 gc_ver == IP_VERSION(11, 0, 2) ||
2084 gc_ver == IP_VERSION(11, 0, 3) ||
2085 gc_ver == IP_VERSION(9, 4, 3) ||
2086 gc_ver == IP_VERSION(9, 4, 4)))
2087 *states = ATTR_STATE_UNSUPPORTED;
2088 } else if (DEVICE_ATTR_IS(pp_dpm_vclk1)) {
2089 if (!((gc_ver == IP_VERSION(10, 3, 1) ||
2090 gc_ver == IP_VERSION(10, 3, 0) ||
2091 gc_ver == IP_VERSION(11, 0, 2) ||
2092 gc_ver == IP_VERSION(11, 0, 3)) && adev->vcn.num_vcn_inst >= 2))
2093 *states = ATTR_STATE_UNSUPPORTED;
2094 } else if (DEVICE_ATTR_IS(pp_dpm_dclk)) {
2095 if (!(gc_ver == IP_VERSION(10, 3, 1) ||
2096 gc_ver == IP_VERSION(10, 3, 3) ||
2097 gc_ver == IP_VERSION(10, 3, 6) ||
2098 gc_ver == IP_VERSION(10, 3, 7) ||
2099 gc_ver == IP_VERSION(10, 3, 0) ||
2100 gc_ver == IP_VERSION(10, 1, 2) ||
2101 gc_ver == IP_VERSION(11, 0, 0) ||
2102 gc_ver == IP_VERSION(11, 0, 1) ||
2103 gc_ver == IP_VERSION(11, 0, 4) ||
2104 gc_ver == IP_VERSION(11, 5, 0) ||
2105 gc_ver == IP_VERSION(11, 0, 2) ||
2106 gc_ver == IP_VERSION(11, 0, 3) ||
2107 gc_ver == IP_VERSION(9, 4, 3) ||
2108 gc_ver == IP_VERSION(9, 4, 4)))
2109 *states = ATTR_STATE_UNSUPPORTED;
2110 } else if (DEVICE_ATTR_IS(pp_dpm_dclk1)) {
2111 if (!((gc_ver == IP_VERSION(10, 3, 1) ||
2112 gc_ver == IP_VERSION(10, 3, 0) ||
2113 gc_ver == IP_VERSION(11, 0, 2) ||
2114 gc_ver == IP_VERSION(11, 0, 3)) && adev->vcn.num_vcn_inst >= 2))
2115 *states = ATTR_STATE_UNSUPPORTED;
2116 } else if (DEVICE_ATTR_IS(pp_dpm_pcie)) {
2117 if (gc_ver == IP_VERSION(9, 4, 2) ||
2118 gc_ver == IP_VERSION(9, 4, 3) ||
2119 gc_ver == IP_VERSION(9, 4, 4))
2120 *states = ATTR_STATE_UNSUPPORTED;
2124 case IP_VERSION(9, 4, 1):
2125 case IP_VERSION(9, 4, 2):
2126 /* the Mi series card does not support standalone mclk/socclk/fclk level setting */
2127 if (DEVICE_ATTR_IS(pp_dpm_mclk) ||
2128 DEVICE_ATTR_IS(pp_dpm_socclk) ||
2129 DEVICE_ATTR_IS(pp_dpm_fclk)) {
2130 dev_attr->attr.mode &= ~S_IWUGO;
2131 dev_attr->store = NULL;
2138 /* setting should not be allowed from VF if not in one VF mode */
2139 if (amdgpu_sriov_vf(adev) && amdgpu_sriov_is_pp_one_vf(adev)) {
2140 dev_attr->attr.mode &= ~S_IWUGO;
2141 dev_attr->store = NULL;
2147 /* pm policy attributes */
2148 struct amdgpu_pm_policy_attr {
2149 struct device_attribute dev_attr;
2150 enum pp_pm_policy id;
2156 * Certain SOCs can support different power policies to optimize application
2157 * performance. However, this policy is provided only at SOC level and not at a
2158 * per-process level. This is useful especially when entire SOC is utilized for
2159 * dedicated workload.
2161 * The amdgpu driver provides a sysfs API for selecting the policy. Presently,
2162 * only two types of policies are supported through this interface.
2164 * Pstate Policy Selection - This is to select different Pstate profiles which
2165 * decides clock/throttling preferences.
2167 * XGMI PLPD Policy Selection - When multiple devices are connected over XGMI,
2168 * this helps to select policy to be applied for per link power down.
2170 * The list of available policies and policy levels vary between SOCs. They can
2171 * be viewed under pm_policy node directory. If SOC doesn't support any policy,
2172 * this node won't be available. The different policies supported will be
2173 * available as separate nodes under pm_policy.
2175 * cat /sys/bus/pci/devices/.../pm_policy/<policy_type>
2177 * Reading the policy file shows the different levels supported. The level which
2178 * is applied presently is denoted by * (asterisk). E.g.,
2180 * .. code-block:: console
2182 * cat /sys/bus/pci/devices/.../pm_policy/soc_pstate
2183 * 0 : soc_pstate_default
2188 * cat /sys/bus/pci/devices/.../pm_policy/xgmi_plpd
2191 * 2 : plpd_optimized*
2193 * To apply a specific policy
2195 * "echo <level> > /sys/bus/pci/devices/.../pm_policy/<policy_type>"
2197 * For the levels listed in the example above, to select "plpd_optimized" for
2198 * XGMI and "soc_pstate_2" for soc pstate policy -
2200 * .. code-block:: console
2202 * echo "2" > /sys/bus/pci/devices/.../pm_policy/xgmi_plpd
2203 * echo "3" > /sys/bus/pci/devices/.../pm_policy/soc_pstate
2206 static ssize_t amdgpu_get_pm_policy_attr(struct device *dev,
2207 struct device_attribute *attr,
2210 struct drm_device *ddev = dev_get_drvdata(dev);
2211 struct amdgpu_device *adev = drm_to_adev(ddev);
2212 struct amdgpu_pm_policy_attr *policy_attr;
2215 container_of(attr, struct amdgpu_pm_policy_attr, dev_attr);
2217 if (amdgpu_in_reset(adev))
2219 if (adev->in_suspend && !adev->in_runpm)
2222 return amdgpu_dpm_get_pm_policy_info(adev, policy_attr->id, buf);
2225 static ssize_t amdgpu_set_pm_policy_attr(struct device *dev,
2226 struct device_attribute *attr,
2227 const char *buf, size_t count)
2229 struct drm_device *ddev = dev_get_drvdata(dev);
2230 struct amdgpu_device *adev = drm_to_adev(ddev);
2231 struct amdgpu_pm_policy_attr *policy_attr;
2232 int ret, num_params = 0;
2233 char delimiter[] = " \n\t";
2238 if (amdgpu_in_reset(adev))
2240 if (adev->in_suspend && !adev->in_runpm)
2243 count = min(count, sizeof(tmp_buf));
2244 memcpy(tmp_buf, buf, count);
2245 tmp_buf[count - 1] = '\0';
2248 tmp = skip_spaces(tmp);
2249 while ((param = strsep(&tmp, delimiter))) {
2250 if (!strlen(param)) {
2251 tmp = skip_spaces(tmp);
2254 ret = kstrtol(param, 0, &val);
2262 if (num_params != 1)
2266 container_of(attr, struct amdgpu_pm_policy_attr, dev_attr);
2268 ret = pm_runtime_resume_and_get(ddev->dev);
2272 ret = amdgpu_dpm_set_pm_policy(adev, policy_attr->id, val);
2274 pm_runtime_mark_last_busy(ddev->dev);
2275 pm_runtime_put_autosuspend(ddev->dev);
2283 #define AMDGPU_PM_POLICY_ATTR(_name, _id) \
2284 static struct amdgpu_pm_policy_attr pm_policy_attr_##_name = { \
2285 .dev_attr = __ATTR(_name, 0644, amdgpu_get_pm_policy_attr, \
2286 amdgpu_set_pm_policy_attr), \
2287 .id = PP_PM_POLICY_##_id, \
2290 #define AMDGPU_PM_POLICY_ATTR_VAR(_name) pm_policy_attr_##_name.dev_attr.attr
2292 AMDGPU_PM_POLICY_ATTR(soc_pstate, SOC_PSTATE)
2293 AMDGPU_PM_POLICY_ATTR(xgmi_plpd, XGMI_PLPD)
2295 static struct attribute *pm_policy_attrs[] = {
2296 &AMDGPU_PM_POLICY_ATTR_VAR(soc_pstate),
2297 &AMDGPU_PM_POLICY_ATTR_VAR(xgmi_plpd),
2301 static umode_t amdgpu_pm_policy_attr_visible(struct kobject *kobj,
2302 struct attribute *attr, int n)
2304 struct device *dev = kobj_to_dev(kobj);
2305 struct drm_device *ddev = dev_get_drvdata(dev);
2306 struct amdgpu_device *adev = drm_to_adev(ddev);
2307 struct amdgpu_pm_policy_attr *policy_attr;
2310 container_of(attr, struct amdgpu_pm_policy_attr, dev_attr.attr);
2312 if (amdgpu_dpm_get_pm_policy_info(adev, policy_attr->id, NULL) ==
2319 const struct attribute_group amdgpu_pm_policy_attr_group = {
2320 .name = "pm_policy",
2321 .attrs = pm_policy_attrs,
2322 .is_visible = amdgpu_pm_policy_attr_visible,
2325 static struct amdgpu_device_attr amdgpu_device_attrs[] = {
2326 AMDGPU_DEVICE_ATTR_RW(power_dpm_state, ATTR_FLAG_BASIC|ATTR_FLAG_ONEVF),
2327 AMDGPU_DEVICE_ATTR_RW(power_dpm_force_performance_level, ATTR_FLAG_BASIC|ATTR_FLAG_ONEVF),
2328 AMDGPU_DEVICE_ATTR_RO(pp_num_states, ATTR_FLAG_BASIC|ATTR_FLAG_ONEVF),
2329 AMDGPU_DEVICE_ATTR_RO(pp_cur_state, ATTR_FLAG_BASIC|ATTR_FLAG_ONEVF),
2330 AMDGPU_DEVICE_ATTR_RW(pp_force_state, ATTR_FLAG_BASIC|ATTR_FLAG_ONEVF),
2331 AMDGPU_DEVICE_ATTR_RW(pp_table, ATTR_FLAG_BASIC|ATTR_FLAG_ONEVF),
2332 AMDGPU_DEVICE_ATTR_RW(pp_dpm_sclk, ATTR_FLAG_BASIC|ATTR_FLAG_ONEVF,
2333 .attr_update = pp_dpm_clk_default_attr_update),
2334 AMDGPU_DEVICE_ATTR_RW(pp_dpm_mclk, ATTR_FLAG_BASIC|ATTR_FLAG_ONEVF,
2335 .attr_update = pp_dpm_clk_default_attr_update),
2336 AMDGPU_DEVICE_ATTR_RW(pp_dpm_socclk, ATTR_FLAG_BASIC|ATTR_FLAG_ONEVF,
2337 .attr_update = pp_dpm_clk_default_attr_update),
2338 AMDGPU_DEVICE_ATTR_RW(pp_dpm_fclk, ATTR_FLAG_BASIC|ATTR_FLAG_ONEVF,
2339 .attr_update = pp_dpm_clk_default_attr_update),
2340 AMDGPU_DEVICE_ATTR_RW(pp_dpm_vclk, ATTR_FLAG_BASIC|ATTR_FLAG_ONEVF,
2341 .attr_update = pp_dpm_clk_default_attr_update),
2342 AMDGPU_DEVICE_ATTR_RW(pp_dpm_vclk1, ATTR_FLAG_BASIC|ATTR_FLAG_ONEVF,
2343 .attr_update = pp_dpm_clk_default_attr_update),
2344 AMDGPU_DEVICE_ATTR_RW(pp_dpm_dclk, ATTR_FLAG_BASIC|ATTR_FLAG_ONEVF,
2345 .attr_update = pp_dpm_clk_default_attr_update),
2346 AMDGPU_DEVICE_ATTR_RW(pp_dpm_dclk1, ATTR_FLAG_BASIC|ATTR_FLAG_ONEVF,
2347 .attr_update = pp_dpm_clk_default_attr_update),
2348 AMDGPU_DEVICE_ATTR_RW(pp_dpm_dcefclk, ATTR_FLAG_BASIC|ATTR_FLAG_ONEVF,
2349 .attr_update = pp_dpm_dcefclk_attr_update),
2350 AMDGPU_DEVICE_ATTR_RW(pp_dpm_pcie, ATTR_FLAG_BASIC|ATTR_FLAG_ONEVF,
2351 .attr_update = pp_dpm_clk_default_attr_update),
2352 AMDGPU_DEVICE_ATTR_RW(pp_sclk_od, ATTR_FLAG_BASIC),
2353 AMDGPU_DEVICE_ATTR_RW(pp_mclk_od, ATTR_FLAG_BASIC),
2354 AMDGPU_DEVICE_ATTR_RW(pp_power_profile_mode, ATTR_FLAG_BASIC|ATTR_FLAG_ONEVF),
2355 AMDGPU_DEVICE_ATTR_RW(pp_od_clk_voltage, ATTR_FLAG_BASIC,
2356 .attr_update = pp_od_clk_voltage_attr_update),
2357 AMDGPU_DEVICE_ATTR_RO(gpu_busy_percent, ATTR_FLAG_BASIC|ATTR_FLAG_ONEVF),
2358 AMDGPU_DEVICE_ATTR_RO(mem_busy_percent, ATTR_FLAG_BASIC|ATTR_FLAG_ONEVF),
2359 AMDGPU_DEVICE_ATTR_RO(vcn_busy_percent, ATTR_FLAG_BASIC|ATTR_FLAG_ONEVF),
2360 AMDGPU_DEVICE_ATTR_RO(pcie_bw, ATTR_FLAG_BASIC),
2361 AMDGPU_DEVICE_ATTR_RW(pp_features, ATTR_FLAG_BASIC|ATTR_FLAG_ONEVF),
2362 AMDGPU_DEVICE_ATTR_RO(unique_id, ATTR_FLAG_BASIC|ATTR_FLAG_ONEVF),
2363 AMDGPU_DEVICE_ATTR_RW(thermal_throttling_logging, ATTR_FLAG_BASIC|ATTR_FLAG_ONEVF),
2364 AMDGPU_DEVICE_ATTR_RW(apu_thermal_cap, ATTR_FLAG_BASIC|ATTR_FLAG_ONEVF),
2365 AMDGPU_DEVICE_ATTR_RO(gpu_metrics, ATTR_FLAG_BASIC|ATTR_FLAG_ONEVF),
2366 AMDGPU_DEVICE_ATTR_RO(smartshift_apu_power, ATTR_FLAG_BASIC,
2367 .attr_update = ss_power_attr_update),
2368 AMDGPU_DEVICE_ATTR_RO(smartshift_dgpu_power, ATTR_FLAG_BASIC,
2369 .attr_update = ss_power_attr_update),
2370 AMDGPU_DEVICE_ATTR_RW(smartshift_bias, ATTR_FLAG_BASIC,
2371 .attr_update = ss_bias_attr_update),
2372 AMDGPU_DEVICE_ATTR_RO(pm_metrics, ATTR_FLAG_BASIC,
2373 .attr_update = amdgpu_pm_metrics_attr_update),
2376 static int default_attr_update(struct amdgpu_device *adev, struct amdgpu_device_attr *attr,
2377 uint32_t mask, enum amdgpu_device_attr_states *states)
2379 struct device_attribute *dev_attr = &attr->dev_attr;
2380 enum amdgpu_device_attr_id attr_id = attr->attr_id;
2381 uint32_t gc_ver = amdgpu_ip_version(adev, GC_HWIP, 0);
2383 if (!(attr->flags & mask)) {
2384 *states = ATTR_STATE_UNSUPPORTED;
2388 if (DEVICE_ATTR_IS(mem_busy_percent)) {
2389 if ((adev->flags & AMD_IS_APU &&
2390 gc_ver != IP_VERSION(9, 4, 3)) ||
2391 gc_ver == IP_VERSION(9, 0, 1))
2392 *states = ATTR_STATE_UNSUPPORTED;
2393 } else if (DEVICE_ATTR_IS(vcn_busy_percent)) {
2394 if (!(gc_ver == IP_VERSION(10, 3, 1) ||
2395 gc_ver == IP_VERSION(10, 3, 3) ||
2396 gc_ver == IP_VERSION(10, 3, 6) ||
2397 gc_ver == IP_VERSION(10, 3, 7) ||
2398 gc_ver == IP_VERSION(11, 0, 1) ||
2399 gc_ver == IP_VERSION(11, 0, 4) ||
2400 gc_ver == IP_VERSION(11, 5, 0)))
2401 *states = ATTR_STATE_UNSUPPORTED;
2402 } else if (DEVICE_ATTR_IS(pcie_bw)) {
2403 /* PCIe Perf counters won't work on APU nodes */
2404 if (adev->flags & AMD_IS_APU ||
2405 !adev->asic_funcs->get_pcie_usage)
2406 *states = ATTR_STATE_UNSUPPORTED;
2407 } else if (DEVICE_ATTR_IS(unique_id)) {
2409 case IP_VERSION(9, 0, 1):
2410 case IP_VERSION(9, 4, 0):
2411 case IP_VERSION(9, 4, 1):
2412 case IP_VERSION(9, 4, 2):
2413 case IP_VERSION(9, 4, 3):
2414 case IP_VERSION(9, 4, 4):
2415 case IP_VERSION(10, 3, 0):
2416 case IP_VERSION(11, 0, 0):
2417 case IP_VERSION(11, 0, 1):
2418 case IP_VERSION(11, 0, 2):
2419 case IP_VERSION(11, 0, 3):
2420 *states = ATTR_STATE_SUPPORTED;
2423 *states = ATTR_STATE_UNSUPPORTED;
2425 } else if (DEVICE_ATTR_IS(pp_features)) {
2426 if ((adev->flags & AMD_IS_APU &&
2427 gc_ver != IP_VERSION(9, 4, 3)) ||
2428 gc_ver < IP_VERSION(9, 0, 0))
2429 *states = ATTR_STATE_UNSUPPORTED;
2430 } else if (DEVICE_ATTR_IS(gpu_metrics)) {
2431 if (gc_ver < IP_VERSION(9, 1, 0))
2432 *states = ATTR_STATE_UNSUPPORTED;
2433 } else if (DEVICE_ATTR_IS(pp_power_profile_mode)) {
2434 if (amdgpu_dpm_get_power_profile_mode(adev, NULL) == -EOPNOTSUPP)
2435 *states = ATTR_STATE_UNSUPPORTED;
2436 else if ((gc_ver == IP_VERSION(10, 3, 0) ||
2437 gc_ver == IP_VERSION(11, 0, 3)) && amdgpu_sriov_vf(adev))
2438 *states = ATTR_STATE_UNSUPPORTED;
2439 } else if (DEVICE_ATTR_IS(pp_mclk_od)) {
2440 if (amdgpu_dpm_get_mclk_od(adev) == -EOPNOTSUPP)
2441 *states = ATTR_STATE_UNSUPPORTED;
2442 } else if (DEVICE_ATTR_IS(pp_sclk_od)) {
2443 if (amdgpu_dpm_get_sclk_od(adev) == -EOPNOTSUPP)
2444 *states = ATTR_STATE_UNSUPPORTED;
2445 } else if (DEVICE_ATTR_IS(apu_thermal_cap)) {
2448 if (amdgpu_dpm_get_apu_thermal_limit(adev, &limit) ==
2450 *states = ATTR_STATE_UNSUPPORTED;
2454 case IP_VERSION(10, 3, 0):
2455 if (DEVICE_ATTR_IS(power_dpm_force_performance_level) &&
2456 amdgpu_sriov_vf(adev)) {
2457 dev_attr->attr.mode &= ~0222;
2458 dev_attr->store = NULL;
2469 static int amdgpu_device_attr_create(struct amdgpu_device *adev,
2470 struct amdgpu_device_attr *attr,
2471 uint32_t mask, struct list_head *attr_list)
2474 enum amdgpu_device_attr_states attr_states = ATTR_STATE_SUPPORTED;
2475 struct amdgpu_device_attr_entry *attr_entry;
2476 struct device_attribute *dev_attr;
2479 int (*attr_update)(struct amdgpu_device *adev, struct amdgpu_device_attr *attr,
2480 uint32_t mask, enum amdgpu_device_attr_states *states) = default_attr_update;
2485 dev_attr = &attr->dev_attr;
2486 name = dev_attr->attr.name;
2488 attr_update = attr->attr_update ? attr->attr_update : default_attr_update;
2490 ret = attr_update(adev, attr, mask, &attr_states);
2492 dev_err(adev->dev, "failed to update device file %s, ret = %d\n",
2497 if (attr_states == ATTR_STATE_UNSUPPORTED)
2500 ret = device_create_file(adev->dev, dev_attr);
2502 dev_err(adev->dev, "failed to create device file %s, ret = %d\n",
2506 attr_entry = kmalloc(sizeof(*attr_entry), GFP_KERNEL);
2510 attr_entry->attr = attr;
2511 INIT_LIST_HEAD(&attr_entry->entry);
2513 list_add_tail(&attr_entry->entry, attr_list);
2518 static void amdgpu_device_attr_remove(struct amdgpu_device *adev, struct amdgpu_device_attr *attr)
2520 struct device_attribute *dev_attr = &attr->dev_attr;
2522 device_remove_file(adev->dev, dev_attr);
2525 static void amdgpu_device_attr_remove_groups(struct amdgpu_device *adev,
2526 struct list_head *attr_list);
2528 static int amdgpu_device_attr_create_groups(struct amdgpu_device *adev,
2529 struct amdgpu_device_attr *attrs,
2532 struct list_head *attr_list)
2537 for (i = 0; i < counts; i++) {
2538 ret = amdgpu_device_attr_create(adev, &attrs[i], mask, attr_list);
2546 amdgpu_device_attr_remove_groups(adev, attr_list);
2551 static void amdgpu_device_attr_remove_groups(struct amdgpu_device *adev,
2552 struct list_head *attr_list)
2554 struct amdgpu_device_attr_entry *entry, *entry_tmp;
2556 if (list_empty(attr_list))
2559 list_for_each_entry_safe(entry, entry_tmp, attr_list, entry) {
2560 amdgpu_device_attr_remove(adev, entry->attr);
2561 list_del(&entry->entry);
2566 static ssize_t amdgpu_hwmon_show_temp(struct device *dev,
2567 struct device_attribute *attr,
2570 struct amdgpu_device *adev = dev_get_drvdata(dev);
2571 int channel = to_sensor_dev_attr(attr)->index;
2574 if (channel >= PP_TEMP_MAX)
2578 case PP_TEMP_JUNCTION:
2579 /* get current junction temperature */
2580 r = amdgpu_hwmon_get_sensor_generic(adev, AMDGPU_PP_SENSOR_HOTSPOT_TEMP,
2584 /* get current edge temperature */
2585 r = amdgpu_hwmon_get_sensor_generic(adev, AMDGPU_PP_SENSOR_EDGE_TEMP,
2589 /* get current memory temperature */
2590 r = amdgpu_hwmon_get_sensor_generic(adev, AMDGPU_PP_SENSOR_MEM_TEMP,
2601 return sysfs_emit(buf, "%d\n", temp);
2604 static ssize_t amdgpu_hwmon_show_temp_thresh(struct device *dev,
2605 struct device_attribute *attr,
2608 struct amdgpu_device *adev = dev_get_drvdata(dev);
2609 int hyst = to_sensor_dev_attr(attr)->index;
2613 temp = adev->pm.dpm.thermal.min_temp;
2615 temp = adev->pm.dpm.thermal.max_temp;
2617 return sysfs_emit(buf, "%d\n", temp);
2620 static ssize_t amdgpu_hwmon_show_hotspot_temp_thresh(struct device *dev,
2621 struct device_attribute *attr,
2624 struct amdgpu_device *adev = dev_get_drvdata(dev);
2625 int hyst = to_sensor_dev_attr(attr)->index;
2629 temp = adev->pm.dpm.thermal.min_hotspot_temp;
2631 temp = adev->pm.dpm.thermal.max_hotspot_crit_temp;
2633 return sysfs_emit(buf, "%d\n", temp);
2636 static ssize_t amdgpu_hwmon_show_mem_temp_thresh(struct device *dev,
2637 struct device_attribute *attr,
2640 struct amdgpu_device *adev = dev_get_drvdata(dev);
2641 int hyst = to_sensor_dev_attr(attr)->index;
2645 temp = adev->pm.dpm.thermal.min_mem_temp;
2647 temp = adev->pm.dpm.thermal.max_mem_crit_temp;
2649 return sysfs_emit(buf, "%d\n", temp);
2652 static ssize_t amdgpu_hwmon_show_temp_label(struct device *dev,
2653 struct device_attribute *attr,
2656 int channel = to_sensor_dev_attr(attr)->index;
2658 if (channel >= PP_TEMP_MAX)
2661 return sysfs_emit(buf, "%s\n", temp_label[channel].label);
2664 static ssize_t amdgpu_hwmon_show_temp_emergency(struct device *dev,
2665 struct device_attribute *attr,
2668 struct amdgpu_device *adev = dev_get_drvdata(dev);
2669 int channel = to_sensor_dev_attr(attr)->index;
2672 if (channel >= PP_TEMP_MAX)
2676 case PP_TEMP_JUNCTION:
2677 temp = adev->pm.dpm.thermal.max_hotspot_emergency_temp;
2680 temp = adev->pm.dpm.thermal.max_edge_emergency_temp;
2683 temp = adev->pm.dpm.thermal.max_mem_emergency_temp;
2687 return sysfs_emit(buf, "%d\n", temp);
2690 static ssize_t amdgpu_hwmon_get_pwm1_enable(struct device *dev,
2691 struct device_attribute *attr,
2694 struct amdgpu_device *adev = dev_get_drvdata(dev);
2698 if (amdgpu_in_reset(adev))
2700 if (adev->in_suspend && !adev->in_runpm)
2703 ret = pm_runtime_get_if_active(adev->dev);
2705 return ret ?: -EPERM;
2707 ret = amdgpu_dpm_get_fan_control_mode(adev, &pwm_mode);
2709 pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
2714 return sysfs_emit(buf, "%u\n", pwm_mode);
2717 static ssize_t amdgpu_hwmon_set_pwm1_enable(struct device *dev,
2718 struct device_attribute *attr,
2722 struct amdgpu_device *adev = dev_get_drvdata(dev);
2727 if (amdgpu_in_reset(adev))
2729 if (adev->in_suspend && !adev->in_runpm)
2732 err = kstrtoint(buf, 10, &value);
2737 pwm_mode = AMD_FAN_CTRL_NONE;
2738 else if (value == 1)
2739 pwm_mode = AMD_FAN_CTRL_MANUAL;
2740 else if (value == 2)
2741 pwm_mode = AMD_FAN_CTRL_AUTO;
2745 ret = pm_runtime_resume_and_get(adev->dev);
2749 ret = amdgpu_dpm_set_fan_control_mode(adev, pwm_mode);
2751 pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
2752 pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
2760 static ssize_t amdgpu_hwmon_get_pwm1_min(struct device *dev,
2761 struct device_attribute *attr,
2764 return sysfs_emit(buf, "%i\n", 0);
2767 static ssize_t amdgpu_hwmon_get_pwm1_max(struct device *dev,
2768 struct device_attribute *attr,
2771 return sysfs_emit(buf, "%i\n", 255);
2774 static ssize_t amdgpu_hwmon_set_pwm1(struct device *dev,
2775 struct device_attribute *attr,
2776 const char *buf, size_t count)
2778 struct amdgpu_device *adev = dev_get_drvdata(dev);
2783 if (amdgpu_in_reset(adev))
2785 if (adev->in_suspend && !adev->in_runpm)
2788 err = kstrtou32(buf, 10, &value);
2792 err = pm_runtime_resume_and_get(adev->dev);
2796 err = amdgpu_dpm_get_fan_control_mode(adev, &pwm_mode);
2800 if (pwm_mode != AMD_FAN_CTRL_MANUAL) {
2801 pr_info("manual fan speed control should be enabled first\n");
2806 err = amdgpu_dpm_set_fan_speed_pwm(adev, value);
2809 pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
2810 pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
2818 static ssize_t amdgpu_hwmon_get_pwm1(struct device *dev,
2819 struct device_attribute *attr,
2822 struct amdgpu_device *adev = dev_get_drvdata(dev);
2826 if (amdgpu_in_reset(adev))
2828 if (adev->in_suspend && !adev->in_runpm)
2831 err = pm_runtime_get_if_active(adev->dev);
2833 return err ?: -EPERM;
2835 err = amdgpu_dpm_get_fan_speed_pwm(adev, &speed);
2837 pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
2842 return sysfs_emit(buf, "%i\n", speed);
2845 static ssize_t amdgpu_hwmon_get_fan1_input(struct device *dev,
2846 struct device_attribute *attr,
2849 struct amdgpu_device *adev = dev_get_drvdata(dev);
2853 if (amdgpu_in_reset(adev))
2855 if (adev->in_suspend && !adev->in_runpm)
2858 err = pm_runtime_get_if_active(adev->dev);
2860 return err ?: -EPERM;
2862 err = amdgpu_dpm_get_fan_speed_rpm(adev, &speed);
2864 pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
2869 return sysfs_emit(buf, "%i\n", speed);
2872 static ssize_t amdgpu_hwmon_get_fan1_min(struct device *dev,
2873 struct device_attribute *attr,
2876 struct amdgpu_device *adev = dev_get_drvdata(dev);
2880 r = amdgpu_hwmon_get_sensor_generic(adev, AMDGPU_PP_SENSOR_MIN_FAN_RPM,
2886 return sysfs_emit(buf, "%d\n", min_rpm);
2889 static ssize_t amdgpu_hwmon_get_fan1_max(struct device *dev,
2890 struct device_attribute *attr,
2893 struct amdgpu_device *adev = dev_get_drvdata(dev);
2897 r = amdgpu_hwmon_get_sensor_generic(adev, AMDGPU_PP_SENSOR_MAX_FAN_RPM,
2903 return sysfs_emit(buf, "%d\n", max_rpm);
2906 static ssize_t amdgpu_hwmon_get_fan1_target(struct device *dev,
2907 struct device_attribute *attr,
2910 struct amdgpu_device *adev = dev_get_drvdata(dev);
2914 if (amdgpu_in_reset(adev))
2916 if (adev->in_suspend && !adev->in_runpm)
2919 err = pm_runtime_get_if_active(adev->dev);
2921 return err ?: -EPERM;
2923 err = amdgpu_dpm_get_fan_speed_rpm(adev, &rpm);
2925 pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
2930 return sysfs_emit(buf, "%i\n", rpm);
2933 static ssize_t amdgpu_hwmon_set_fan1_target(struct device *dev,
2934 struct device_attribute *attr,
2935 const char *buf, size_t count)
2937 struct amdgpu_device *adev = dev_get_drvdata(dev);
2942 if (amdgpu_in_reset(adev))
2944 if (adev->in_suspend && !adev->in_runpm)
2947 err = kstrtou32(buf, 10, &value);
2951 err = pm_runtime_resume_and_get(adev->dev);
2955 err = amdgpu_dpm_get_fan_control_mode(adev, &pwm_mode);
2959 if (pwm_mode != AMD_FAN_CTRL_MANUAL) {
2964 err = amdgpu_dpm_set_fan_speed_rpm(adev, value);
2967 pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
2968 pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
2976 static ssize_t amdgpu_hwmon_get_fan1_enable(struct device *dev,
2977 struct device_attribute *attr,
2980 struct amdgpu_device *adev = dev_get_drvdata(dev);
2984 if (amdgpu_in_reset(adev))
2986 if (adev->in_suspend && !adev->in_runpm)
2989 ret = pm_runtime_get_if_active(adev->dev);
2991 return ret ?: -EPERM;
2993 ret = amdgpu_dpm_get_fan_control_mode(adev, &pwm_mode);
2995 pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
3000 return sysfs_emit(buf, "%i\n", pwm_mode == AMD_FAN_CTRL_AUTO ? 0 : 1);
3003 static ssize_t amdgpu_hwmon_set_fan1_enable(struct device *dev,
3004 struct device_attribute *attr,
3008 struct amdgpu_device *adev = dev_get_drvdata(dev);
3013 if (amdgpu_in_reset(adev))
3015 if (adev->in_suspend && !adev->in_runpm)
3018 err = kstrtoint(buf, 10, &value);
3023 pwm_mode = AMD_FAN_CTRL_AUTO;
3024 else if (value == 1)
3025 pwm_mode = AMD_FAN_CTRL_MANUAL;
3029 err = pm_runtime_resume_and_get(adev->dev);
3033 err = amdgpu_dpm_set_fan_control_mode(adev, pwm_mode);
3035 pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
3036 pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
3044 static ssize_t amdgpu_hwmon_show_vddgfx(struct device *dev,
3045 struct device_attribute *attr,
3048 struct amdgpu_device *adev = dev_get_drvdata(dev);
3052 /* get the voltage */
3053 r = amdgpu_hwmon_get_sensor_generic(adev, AMDGPU_PP_SENSOR_VDDGFX,
3058 return sysfs_emit(buf, "%d\n", vddgfx);
3061 static ssize_t amdgpu_hwmon_show_vddgfx_label(struct device *dev,
3062 struct device_attribute *attr,
3065 return sysfs_emit(buf, "vddgfx\n");
3068 static ssize_t amdgpu_hwmon_show_vddnb(struct device *dev,
3069 struct device_attribute *attr,
3072 struct amdgpu_device *adev = dev_get_drvdata(dev);
3076 /* only APUs have vddnb */
3077 if (!(adev->flags & AMD_IS_APU))
3080 /* get the voltage */
3081 r = amdgpu_hwmon_get_sensor_generic(adev, AMDGPU_PP_SENSOR_VDDNB,
3086 return sysfs_emit(buf, "%d\n", vddnb);
3089 static ssize_t amdgpu_hwmon_show_vddnb_label(struct device *dev,
3090 struct device_attribute *attr,
3093 return sysfs_emit(buf, "vddnb\n");
3096 static int amdgpu_hwmon_get_power(struct device *dev,
3097 enum amd_pp_sensors sensor)
3099 struct amdgpu_device *adev = dev_get_drvdata(dev);
3104 r = amdgpu_hwmon_get_sensor_generic(adev, sensor, (void *)&query);
3108 /* convert to microwatts */
3109 uw = (query >> 8) * 1000000 + (query & 0xff) * 1000;
3114 static ssize_t amdgpu_hwmon_show_power_avg(struct device *dev,
3115 struct device_attribute *attr,
3120 val = amdgpu_hwmon_get_power(dev, AMDGPU_PP_SENSOR_GPU_AVG_POWER);
3124 return sysfs_emit(buf, "%zd\n", val);
3127 static ssize_t amdgpu_hwmon_show_power_input(struct device *dev,
3128 struct device_attribute *attr,
3133 val = amdgpu_hwmon_get_power(dev, AMDGPU_PP_SENSOR_GPU_INPUT_POWER);
3137 return sysfs_emit(buf, "%zd\n", val);
3140 static ssize_t amdgpu_hwmon_show_power_cap_generic(struct device *dev,
3141 struct device_attribute *attr,
3143 enum pp_power_limit_level pp_limit_level)
3145 struct amdgpu_device *adev = dev_get_drvdata(dev);
3146 enum pp_power_type power_type = to_sensor_dev_attr(attr)->index;
3151 if (amdgpu_in_reset(adev))
3153 if (adev->in_suspend && !adev->in_runpm)
3156 r = pm_runtime_get_if_active(adev->dev);
3160 r = amdgpu_dpm_get_power_limit(adev, &limit,
3161 pp_limit_level, power_type);
3164 size = sysfs_emit(buf, "%u\n", limit * 1000000);
3166 size = sysfs_emit(buf, "\n");
3168 pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
3173 static ssize_t amdgpu_hwmon_show_power_cap_min(struct device *dev,
3174 struct device_attribute *attr,
3177 return amdgpu_hwmon_show_power_cap_generic(dev, attr, buf, PP_PWR_LIMIT_MIN);
3180 static ssize_t amdgpu_hwmon_show_power_cap_max(struct device *dev,
3181 struct device_attribute *attr,
3184 return amdgpu_hwmon_show_power_cap_generic(dev, attr, buf, PP_PWR_LIMIT_MAX);
3188 static ssize_t amdgpu_hwmon_show_power_cap(struct device *dev,
3189 struct device_attribute *attr,
3192 return amdgpu_hwmon_show_power_cap_generic(dev, attr, buf, PP_PWR_LIMIT_CURRENT);
3196 static ssize_t amdgpu_hwmon_show_power_cap_default(struct device *dev,
3197 struct device_attribute *attr,
3200 return amdgpu_hwmon_show_power_cap_generic(dev, attr, buf, PP_PWR_LIMIT_DEFAULT);
3204 static ssize_t amdgpu_hwmon_show_power_label(struct device *dev,
3205 struct device_attribute *attr,
3208 struct amdgpu_device *adev = dev_get_drvdata(dev);
3209 uint32_t gc_ver = amdgpu_ip_version(adev, GC_HWIP, 0);
3211 if (gc_ver == IP_VERSION(10, 3, 1))
3212 return sysfs_emit(buf, "%s\n",
3213 to_sensor_dev_attr(attr)->index == PP_PWR_TYPE_FAST ?
3214 "fastPPT" : "slowPPT");
3216 return sysfs_emit(buf, "PPT\n");
3219 static ssize_t amdgpu_hwmon_set_power_cap(struct device *dev,
3220 struct device_attribute *attr,
3224 struct amdgpu_device *adev = dev_get_drvdata(dev);
3225 int limit_type = to_sensor_dev_attr(attr)->index;
3229 if (amdgpu_in_reset(adev))
3231 if (adev->in_suspend && !adev->in_runpm)
3234 if (amdgpu_sriov_vf(adev))
3237 err = kstrtou32(buf, 10, &value);
3241 value = value / 1000000; /* convert to Watt */
3242 value |= limit_type << 24;
3244 err = pm_runtime_resume_and_get(adev->dev);
3248 err = amdgpu_dpm_set_power_limit(adev, value);
3250 pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
3251 pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
3259 static ssize_t amdgpu_hwmon_show_sclk(struct device *dev,
3260 struct device_attribute *attr,
3263 struct amdgpu_device *adev = dev_get_drvdata(dev);
3268 r = amdgpu_hwmon_get_sensor_generic(adev, AMDGPU_PP_SENSOR_GFX_SCLK,
3273 return sysfs_emit(buf, "%u\n", sclk * 10 * 1000);
3276 static ssize_t amdgpu_hwmon_show_sclk_label(struct device *dev,
3277 struct device_attribute *attr,
3280 return sysfs_emit(buf, "sclk\n");
3283 static ssize_t amdgpu_hwmon_show_mclk(struct device *dev,
3284 struct device_attribute *attr,
3287 struct amdgpu_device *adev = dev_get_drvdata(dev);
3292 r = amdgpu_hwmon_get_sensor_generic(adev, AMDGPU_PP_SENSOR_GFX_MCLK,
3297 return sysfs_emit(buf, "%u\n", mclk * 10 * 1000);
3300 static ssize_t amdgpu_hwmon_show_mclk_label(struct device *dev,
3301 struct device_attribute *attr,
3304 return sysfs_emit(buf, "mclk\n");
3310 * The amdgpu driver exposes the following sensor interfaces:
3312 * - GPU temperature (via the on-die sensor)
3316 * - Northbridge voltage (APUs only)
3322 * - GPU gfx/compute engine clock
3324 * - GPU memory clock (dGPU only)
3326 * hwmon interfaces for GPU temperature:
3328 * - temp[1-3]_input: the on die GPU temperature in millidegrees Celsius
3329 * - temp2_input and temp3_input are supported on SOC15 dGPUs only
3331 * - temp[1-3]_label: temperature channel label
3332 * - temp2_label and temp3_label are supported on SOC15 dGPUs only
3334 * - temp[1-3]_crit: temperature critical max value in millidegrees Celsius
3335 * - temp2_crit and temp3_crit are supported on SOC15 dGPUs only
3337 * - temp[1-3]_crit_hyst: temperature hysteresis for critical limit in millidegrees Celsius
3338 * - temp2_crit_hyst and temp3_crit_hyst are supported on SOC15 dGPUs only
3340 * - temp[1-3]_emergency: temperature emergency max value(asic shutdown) in millidegrees Celsius
3341 * - these are supported on SOC15 dGPUs only
3343 * hwmon interfaces for GPU voltage:
3345 * - in0_input: the voltage on the GPU in millivolts
3347 * - in1_input: the voltage on the Northbridge in millivolts
3349 * hwmon interfaces for GPU power:
3351 * - power1_average: average power used by the SoC in microWatts. On APUs this includes the CPU.
3353 * - power1_input: instantaneous power used by the SoC in microWatts. On APUs this includes the CPU.
3355 * - power1_cap_min: minimum cap supported in microWatts
3357 * - power1_cap_max: maximum cap supported in microWatts
3359 * - power1_cap: selected power cap in microWatts
3361 * hwmon interfaces for GPU fan:
3363 * - pwm1: pulse width modulation fan level (0-255)
3365 * - pwm1_enable: pulse width modulation fan control method (0: no fan speed control, 1: manual fan speed control using pwm interface, 2: automatic fan speed control)
3367 * - pwm1_min: pulse width modulation fan control minimum level (0)
3369 * - pwm1_max: pulse width modulation fan control maximum level (255)
3371 * - fan1_min: a minimum value Unit: revolution/min (RPM)
3373 * - fan1_max: a maximum value Unit: revolution/max (RPM)
3375 * - fan1_input: fan speed in RPM
3377 * - fan[1-\*]_target: Desired fan speed Unit: revolution/min (RPM)
3379 * - fan[1-\*]_enable: Enable or disable the sensors.1: Enable 0: Disable
3381 * NOTE: DO NOT set the fan speed via "pwm1" and "fan[1-\*]_target" interfaces at the same time.
3382 * That will get the former one overridden.
3384 * hwmon interfaces for GPU clocks:
3386 * - freq1_input: the gfx/compute clock in hertz
3388 * - freq2_input: the memory clock in hertz
3390 * You can use hwmon tools like sensors to view this information on your system.
3394 static SENSOR_DEVICE_ATTR(temp1_input, S_IRUGO, amdgpu_hwmon_show_temp, NULL, PP_TEMP_EDGE);
3395 static SENSOR_DEVICE_ATTR(temp1_crit, S_IRUGO, amdgpu_hwmon_show_temp_thresh, NULL, 0);
3396 static SENSOR_DEVICE_ATTR(temp1_crit_hyst, S_IRUGO, amdgpu_hwmon_show_temp_thresh, NULL, 1);
3397 static SENSOR_DEVICE_ATTR(temp1_emergency, S_IRUGO, amdgpu_hwmon_show_temp_emergency, NULL, PP_TEMP_EDGE);
3398 static SENSOR_DEVICE_ATTR(temp2_input, S_IRUGO, amdgpu_hwmon_show_temp, NULL, PP_TEMP_JUNCTION);
3399 static SENSOR_DEVICE_ATTR(temp2_crit, S_IRUGO, amdgpu_hwmon_show_hotspot_temp_thresh, NULL, 0);
3400 static SENSOR_DEVICE_ATTR(temp2_crit_hyst, S_IRUGO, amdgpu_hwmon_show_hotspot_temp_thresh, NULL, 1);
3401 static SENSOR_DEVICE_ATTR(temp2_emergency, S_IRUGO, amdgpu_hwmon_show_temp_emergency, NULL, PP_TEMP_JUNCTION);
3402 static SENSOR_DEVICE_ATTR(temp3_input, S_IRUGO, amdgpu_hwmon_show_temp, NULL, PP_TEMP_MEM);
3403 static SENSOR_DEVICE_ATTR(temp3_crit, S_IRUGO, amdgpu_hwmon_show_mem_temp_thresh, NULL, 0);
3404 static SENSOR_DEVICE_ATTR(temp3_crit_hyst, S_IRUGO, amdgpu_hwmon_show_mem_temp_thresh, NULL, 1);
3405 static SENSOR_DEVICE_ATTR(temp3_emergency, S_IRUGO, amdgpu_hwmon_show_temp_emergency, NULL, PP_TEMP_MEM);
3406 static SENSOR_DEVICE_ATTR(temp1_label, S_IRUGO, amdgpu_hwmon_show_temp_label, NULL, PP_TEMP_EDGE);
3407 static SENSOR_DEVICE_ATTR(temp2_label, S_IRUGO, amdgpu_hwmon_show_temp_label, NULL, PP_TEMP_JUNCTION);
3408 static SENSOR_DEVICE_ATTR(temp3_label, S_IRUGO, amdgpu_hwmon_show_temp_label, NULL, PP_TEMP_MEM);
3409 static SENSOR_DEVICE_ATTR(pwm1, S_IRUGO | S_IWUSR, amdgpu_hwmon_get_pwm1, amdgpu_hwmon_set_pwm1, 0);
3410 static SENSOR_DEVICE_ATTR(pwm1_enable, S_IRUGO | S_IWUSR, amdgpu_hwmon_get_pwm1_enable, amdgpu_hwmon_set_pwm1_enable, 0);
3411 static SENSOR_DEVICE_ATTR(pwm1_min, S_IRUGO, amdgpu_hwmon_get_pwm1_min, NULL, 0);
3412 static SENSOR_DEVICE_ATTR(pwm1_max, S_IRUGO, amdgpu_hwmon_get_pwm1_max, NULL, 0);
3413 static SENSOR_DEVICE_ATTR(fan1_input, S_IRUGO, amdgpu_hwmon_get_fan1_input, NULL, 0);
3414 static SENSOR_DEVICE_ATTR(fan1_min, S_IRUGO, amdgpu_hwmon_get_fan1_min, NULL, 0);
3415 static SENSOR_DEVICE_ATTR(fan1_max, S_IRUGO, amdgpu_hwmon_get_fan1_max, NULL, 0);
3416 static SENSOR_DEVICE_ATTR(fan1_target, S_IRUGO | S_IWUSR, amdgpu_hwmon_get_fan1_target, amdgpu_hwmon_set_fan1_target, 0);
3417 static SENSOR_DEVICE_ATTR(fan1_enable, S_IRUGO | S_IWUSR, amdgpu_hwmon_get_fan1_enable, amdgpu_hwmon_set_fan1_enable, 0);
3418 static SENSOR_DEVICE_ATTR(in0_input, S_IRUGO, amdgpu_hwmon_show_vddgfx, NULL, 0);
3419 static SENSOR_DEVICE_ATTR(in0_label, S_IRUGO, amdgpu_hwmon_show_vddgfx_label, NULL, 0);
3420 static SENSOR_DEVICE_ATTR(in1_input, S_IRUGO, amdgpu_hwmon_show_vddnb, NULL, 0);
3421 static SENSOR_DEVICE_ATTR(in1_label, S_IRUGO, amdgpu_hwmon_show_vddnb_label, NULL, 0);
3422 static SENSOR_DEVICE_ATTR(power1_average, S_IRUGO, amdgpu_hwmon_show_power_avg, NULL, 0);
3423 static SENSOR_DEVICE_ATTR(power1_input, S_IRUGO, amdgpu_hwmon_show_power_input, NULL, 0);
3424 static SENSOR_DEVICE_ATTR(power1_cap_max, S_IRUGO, amdgpu_hwmon_show_power_cap_max, NULL, 0);
3425 static SENSOR_DEVICE_ATTR(power1_cap_min, S_IRUGO, amdgpu_hwmon_show_power_cap_min, NULL, 0);
3426 static SENSOR_DEVICE_ATTR(power1_cap, S_IRUGO | S_IWUSR, amdgpu_hwmon_show_power_cap, amdgpu_hwmon_set_power_cap, 0);
3427 static SENSOR_DEVICE_ATTR(power1_cap_default, S_IRUGO, amdgpu_hwmon_show_power_cap_default, NULL, 0);
3428 static SENSOR_DEVICE_ATTR(power1_label, S_IRUGO, amdgpu_hwmon_show_power_label, NULL, 0);
3429 static SENSOR_DEVICE_ATTR(power2_average, S_IRUGO, amdgpu_hwmon_show_power_avg, NULL, 1);
3430 static SENSOR_DEVICE_ATTR(power2_cap_max, S_IRUGO, amdgpu_hwmon_show_power_cap_max, NULL, 1);
3431 static SENSOR_DEVICE_ATTR(power2_cap_min, S_IRUGO, amdgpu_hwmon_show_power_cap_min, NULL, 1);
3432 static SENSOR_DEVICE_ATTR(power2_cap, S_IRUGO | S_IWUSR, amdgpu_hwmon_show_power_cap, amdgpu_hwmon_set_power_cap, 1);
3433 static SENSOR_DEVICE_ATTR(power2_cap_default, S_IRUGO, amdgpu_hwmon_show_power_cap_default, NULL, 1);
3434 static SENSOR_DEVICE_ATTR(power2_label, S_IRUGO, amdgpu_hwmon_show_power_label, NULL, 1);
3435 static SENSOR_DEVICE_ATTR(freq1_input, S_IRUGO, amdgpu_hwmon_show_sclk, NULL, 0);
3436 static SENSOR_DEVICE_ATTR(freq1_label, S_IRUGO, amdgpu_hwmon_show_sclk_label, NULL, 0);
3437 static SENSOR_DEVICE_ATTR(freq2_input, S_IRUGO, amdgpu_hwmon_show_mclk, NULL, 0);
3438 static SENSOR_DEVICE_ATTR(freq2_label, S_IRUGO, amdgpu_hwmon_show_mclk_label, NULL, 0);
3440 static struct attribute *hwmon_attributes[] = {
3441 &sensor_dev_attr_temp1_input.dev_attr.attr,
3442 &sensor_dev_attr_temp1_crit.dev_attr.attr,
3443 &sensor_dev_attr_temp1_crit_hyst.dev_attr.attr,
3444 &sensor_dev_attr_temp2_input.dev_attr.attr,
3445 &sensor_dev_attr_temp2_crit.dev_attr.attr,
3446 &sensor_dev_attr_temp2_crit_hyst.dev_attr.attr,
3447 &sensor_dev_attr_temp3_input.dev_attr.attr,
3448 &sensor_dev_attr_temp3_crit.dev_attr.attr,
3449 &sensor_dev_attr_temp3_crit_hyst.dev_attr.attr,
3450 &sensor_dev_attr_temp1_emergency.dev_attr.attr,
3451 &sensor_dev_attr_temp2_emergency.dev_attr.attr,
3452 &sensor_dev_attr_temp3_emergency.dev_attr.attr,
3453 &sensor_dev_attr_temp1_label.dev_attr.attr,
3454 &sensor_dev_attr_temp2_label.dev_attr.attr,
3455 &sensor_dev_attr_temp3_label.dev_attr.attr,
3456 &sensor_dev_attr_pwm1.dev_attr.attr,
3457 &sensor_dev_attr_pwm1_enable.dev_attr.attr,
3458 &sensor_dev_attr_pwm1_min.dev_attr.attr,
3459 &sensor_dev_attr_pwm1_max.dev_attr.attr,
3460 &sensor_dev_attr_fan1_input.dev_attr.attr,
3461 &sensor_dev_attr_fan1_min.dev_attr.attr,
3462 &sensor_dev_attr_fan1_max.dev_attr.attr,
3463 &sensor_dev_attr_fan1_target.dev_attr.attr,
3464 &sensor_dev_attr_fan1_enable.dev_attr.attr,
3465 &sensor_dev_attr_in0_input.dev_attr.attr,
3466 &sensor_dev_attr_in0_label.dev_attr.attr,
3467 &sensor_dev_attr_in1_input.dev_attr.attr,
3468 &sensor_dev_attr_in1_label.dev_attr.attr,
3469 &sensor_dev_attr_power1_average.dev_attr.attr,
3470 &sensor_dev_attr_power1_input.dev_attr.attr,
3471 &sensor_dev_attr_power1_cap_max.dev_attr.attr,
3472 &sensor_dev_attr_power1_cap_min.dev_attr.attr,
3473 &sensor_dev_attr_power1_cap.dev_attr.attr,
3474 &sensor_dev_attr_power1_cap_default.dev_attr.attr,
3475 &sensor_dev_attr_power1_label.dev_attr.attr,
3476 &sensor_dev_attr_power2_average.dev_attr.attr,
3477 &sensor_dev_attr_power2_cap_max.dev_attr.attr,
3478 &sensor_dev_attr_power2_cap_min.dev_attr.attr,
3479 &sensor_dev_attr_power2_cap.dev_attr.attr,
3480 &sensor_dev_attr_power2_cap_default.dev_attr.attr,
3481 &sensor_dev_attr_power2_label.dev_attr.attr,
3482 &sensor_dev_attr_freq1_input.dev_attr.attr,
3483 &sensor_dev_attr_freq1_label.dev_attr.attr,
3484 &sensor_dev_attr_freq2_input.dev_attr.attr,
3485 &sensor_dev_attr_freq2_label.dev_attr.attr,
3489 static umode_t hwmon_attributes_visible(struct kobject *kobj,
3490 struct attribute *attr, int index)
3492 struct device *dev = kobj_to_dev(kobj);
3493 struct amdgpu_device *adev = dev_get_drvdata(dev);
3494 umode_t effective_mode = attr->mode;
3495 uint32_t gc_ver = amdgpu_ip_version(adev, GC_HWIP, 0);
3498 /* under pp one vf mode manage of hwmon attributes is not supported */
3499 if (amdgpu_sriov_is_pp_one_vf(adev))
3500 effective_mode &= ~S_IWUSR;
3502 /* Skip fan attributes if fan is not present */
3503 if (adev->pm.no_fan && (attr == &sensor_dev_attr_pwm1.dev_attr.attr ||
3504 attr == &sensor_dev_attr_pwm1_enable.dev_attr.attr ||
3505 attr == &sensor_dev_attr_pwm1_max.dev_attr.attr ||
3506 attr == &sensor_dev_attr_pwm1_min.dev_attr.attr ||
3507 attr == &sensor_dev_attr_fan1_input.dev_attr.attr ||
3508 attr == &sensor_dev_attr_fan1_min.dev_attr.attr ||
3509 attr == &sensor_dev_attr_fan1_max.dev_attr.attr ||
3510 attr == &sensor_dev_attr_fan1_target.dev_attr.attr ||
3511 attr == &sensor_dev_attr_fan1_enable.dev_attr.attr))
3514 /* Skip fan attributes on APU */
3515 if ((adev->flags & AMD_IS_APU) &&
3516 (attr == &sensor_dev_attr_pwm1.dev_attr.attr ||
3517 attr == &sensor_dev_attr_pwm1_enable.dev_attr.attr ||
3518 attr == &sensor_dev_attr_pwm1_max.dev_attr.attr ||
3519 attr == &sensor_dev_attr_pwm1_min.dev_attr.attr ||
3520 attr == &sensor_dev_attr_fan1_input.dev_attr.attr ||
3521 attr == &sensor_dev_attr_fan1_min.dev_attr.attr ||
3522 attr == &sensor_dev_attr_fan1_max.dev_attr.attr ||
3523 attr == &sensor_dev_attr_fan1_target.dev_attr.attr ||
3524 attr == &sensor_dev_attr_fan1_enable.dev_attr.attr))
3527 /* Skip crit temp on APU */
3528 if ((((adev->flags & AMD_IS_APU) && (adev->family >= AMDGPU_FAMILY_CZ)) ||
3529 (gc_ver == IP_VERSION(9, 4, 3) || gc_ver == IP_VERSION(9, 4, 4))) &&
3530 (attr == &sensor_dev_attr_temp1_crit.dev_attr.attr ||
3531 attr == &sensor_dev_attr_temp1_crit_hyst.dev_attr.attr))
3534 /* Skip limit attributes if DPM is not enabled */
3535 if (!adev->pm.dpm_enabled &&
3536 (attr == &sensor_dev_attr_temp1_crit.dev_attr.attr ||
3537 attr == &sensor_dev_attr_temp1_crit_hyst.dev_attr.attr ||
3538 attr == &sensor_dev_attr_pwm1.dev_attr.attr ||
3539 attr == &sensor_dev_attr_pwm1_enable.dev_attr.attr ||
3540 attr == &sensor_dev_attr_pwm1_max.dev_attr.attr ||
3541 attr == &sensor_dev_attr_pwm1_min.dev_attr.attr ||
3542 attr == &sensor_dev_attr_fan1_input.dev_attr.attr ||
3543 attr == &sensor_dev_attr_fan1_min.dev_attr.attr ||
3544 attr == &sensor_dev_attr_fan1_max.dev_attr.attr ||
3545 attr == &sensor_dev_attr_fan1_target.dev_attr.attr ||
3546 attr == &sensor_dev_attr_fan1_enable.dev_attr.attr))
3549 /* mask fan attributes if we have no bindings for this asic to expose */
3550 if (((amdgpu_dpm_get_fan_speed_pwm(adev, NULL) == -EOPNOTSUPP) &&
3551 attr == &sensor_dev_attr_pwm1.dev_attr.attr) || /* can't query fan */
3552 ((amdgpu_dpm_get_fan_control_mode(adev, NULL) == -EOPNOTSUPP) &&
3553 attr == &sensor_dev_attr_pwm1_enable.dev_attr.attr)) /* can't query state */
3554 effective_mode &= ~S_IRUGO;
3556 if (((amdgpu_dpm_set_fan_speed_pwm(adev, U32_MAX) == -EOPNOTSUPP) &&
3557 attr == &sensor_dev_attr_pwm1.dev_attr.attr) || /* can't manage fan */
3558 ((amdgpu_dpm_set_fan_control_mode(adev, U32_MAX) == -EOPNOTSUPP) &&
3559 attr == &sensor_dev_attr_pwm1_enable.dev_attr.attr)) /* can't manage state */
3560 effective_mode &= ~S_IWUSR;
3562 /* not implemented yet for APUs other than GC 10.3.1 (vangogh) and 9.4.3 */
3563 if (((adev->family == AMDGPU_FAMILY_SI) ||
3564 ((adev->flags & AMD_IS_APU) && (gc_ver != IP_VERSION(10, 3, 1)) &&
3565 (gc_ver != IP_VERSION(9, 4, 3) && gc_ver != IP_VERSION(9, 4, 4)))) &&
3566 (attr == &sensor_dev_attr_power1_cap_max.dev_attr.attr ||
3567 attr == &sensor_dev_attr_power1_cap_min.dev_attr.attr ||
3568 attr == &sensor_dev_attr_power1_cap.dev_attr.attr ||
3569 attr == &sensor_dev_attr_power1_cap_default.dev_attr.attr))
3572 /* not implemented yet for APUs having < GC 9.3.0 (Renoir) */
3573 if (((adev->family == AMDGPU_FAMILY_SI) ||
3574 ((adev->flags & AMD_IS_APU) && (gc_ver < IP_VERSION(9, 3, 0)))) &&
3575 (attr == &sensor_dev_attr_power1_average.dev_attr.attr))
3578 /* not all products support both average and instantaneous */
3579 if (attr == &sensor_dev_attr_power1_average.dev_attr.attr &&
3580 amdgpu_hwmon_get_sensor_generic(adev, AMDGPU_PP_SENSOR_GPU_AVG_POWER, (void *)&tmp) == -EOPNOTSUPP)
3582 if (attr == &sensor_dev_attr_power1_input.dev_attr.attr &&
3583 amdgpu_hwmon_get_sensor_generic(adev, AMDGPU_PP_SENSOR_GPU_INPUT_POWER, (void *)&tmp) == -EOPNOTSUPP)
3586 /* hide max/min values if we can't both query and manage the fan */
3587 if (((amdgpu_dpm_set_fan_speed_pwm(adev, U32_MAX) == -EOPNOTSUPP) &&
3588 (amdgpu_dpm_get_fan_speed_pwm(adev, NULL) == -EOPNOTSUPP) &&
3589 (amdgpu_dpm_set_fan_speed_rpm(adev, U32_MAX) == -EOPNOTSUPP) &&
3590 (amdgpu_dpm_get_fan_speed_rpm(adev, NULL) == -EOPNOTSUPP)) &&
3591 (attr == &sensor_dev_attr_pwm1_max.dev_attr.attr ||
3592 attr == &sensor_dev_attr_pwm1_min.dev_attr.attr))
3595 if ((amdgpu_dpm_set_fan_speed_rpm(adev, U32_MAX) == -EOPNOTSUPP) &&
3596 (amdgpu_dpm_get_fan_speed_rpm(adev, NULL) == -EOPNOTSUPP) &&
3597 (attr == &sensor_dev_attr_fan1_max.dev_attr.attr ||
3598 attr == &sensor_dev_attr_fan1_min.dev_attr.attr))
3601 if ((adev->family == AMDGPU_FAMILY_SI || /* not implemented yet */
3602 adev->family == AMDGPU_FAMILY_KV || /* not implemented yet */
3603 (gc_ver == IP_VERSION(9, 4, 3) ||
3604 gc_ver == IP_VERSION(9, 4, 4))) &&
3605 (attr == &sensor_dev_attr_in0_input.dev_attr.attr ||
3606 attr == &sensor_dev_attr_in0_label.dev_attr.attr))
3609 /* only APUs other than gc 9,4,3 have vddnb */
3610 if ((!(adev->flags & AMD_IS_APU) ||
3611 (gc_ver == IP_VERSION(9, 4, 3) ||
3612 gc_ver == IP_VERSION(9, 4, 4))) &&
3613 (attr == &sensor_dev_attr_in1_input.dev_attr.attr ||
3614 attr == &sensor_dev_attr_in1_label.dev_attr.attr))
3617 /* no mclk on APUs other than gc 9,4,3*/
3618 if (((adev->flags & AMD_IS_APU) && (gc_ver != IP_VERSION(9, 4, 3))) &&
3619 (attr == &sensor_dev_attr_freq2_input.dev_attr.attr ||
3620 attr == &sensor_dev_attr_freq2_label.dev_attr.attr))
3623 if (((adev->flags & AMD_IS_APU) || gc_ver < IP_VERSION(9, 0, 0)) &&
3624 (gc_ver != IP_VERSION(9, 4, 3) && gc_ver != IP_VERSION(9, 4, 4)) &&
3625 (attr == &sensor_dev_attr_temp2_input.dev_attr.attr ||
3626 attr == &sensor_dev_attr_temp2_label.dev_attr.attr ||
3627 attr == &sensor_dev_attr_temp2_crit.dev_attr.attr ||
3628 attr == &sensor_dev_attr_temp3_input.dev_attr.attr ||
3629 attr == &sensor_dev_attr_temp3_label.dev_attr.attr ||
3630 attr == &sensor_dev_attr_temp3_crit.dev_attr.attr))
3633 /* hotspot temperature for gc 9,4,3*/
3634 if (gc_ver == IP_VERSION(9, 4, 3) ||
3635 gc_ver == IP_VERSION(9, 4, 4)) {
3636 if (attr == &sensor_dev_attr_temp1_input.dev_attr.attr ||
3637 attr == &sensor_dev_attr_temp1_emergency.dev_attr.attr ||
3638 attr == &sensor_dev_attr_temp1_label.dev_attr.attr)
3641 if (attr == &sensor_dev_attr_temp2_emergency.dev_attr.attr ||
3642 attr == &sensor_dev_attr_temp3_emergency.dev_attr.attr)
3646 /* only SOC15 dGPUs support hotspot and mem temperatures */
3647 if (((adev->flags & AMD_IS_APU) || gc_ver < IP_VERSION(9, 0, 0)) &&
3648 (attr == &sensor_dev_attr_temp2_crit_hyst.dev_attr.attr ||
3649 attr == &sensor_dev_attr_temp3_crit_hyst.dev_attr.attr ||
3650 attr == &sensor_dev_attr_temp1_emergency.dev_attr.attr ||
3651 attr == &sensor_dev_attr_temp2_emergency.dev_attr.attr ||
3652 attr == &sensor_dev_attr_temp3_emergency.dev_attr.attr))
3655 /* only Vangogh has fast PPT limit and power labels */
3656 if (!(gc_ver == IP_VERSION(10, 3, 1)) &&
3657 (attr == &sensor_dev_attr_power2_average.dev_attr.attr ||
3658 attr == &sensor_dev_attr_power2_cap_max.dev_attr.attr ||
3659 attr == &sensor_dev_attr_power2_cap_min.dev_attr.attr ||
3660 attr == &sensor_dev_attr_power2_cap.dev_attr.attr ||
3661 attr == &sensor_dev_attr_power2_cap_default.dev_attr.attr ||
3662 attr == &sensor_dev_attr_power2_label.dev_attr.attr))
3665 return effective_mode;
3668 static const struct attribute_group hwmon_attrgroup = {
3669 .attrs = hwmon_attributes,
3670 .is_visible = hwmon_attributes_visible,
3673 static const struct attribute_group *hwmon_groups[] = {
3678 static int amdgpu_retrieve_od_settings(struct amdgpu_device *adev,
3679 enum pp_clock_type od_type,
3685 if (amdgpu_in_reset(adev))
3687 if (adev->in_suspend && !adev->in_runpm)
3690 ret = pm_runtime_get_if_active(adev->dev);
3692 return ret ?: -EPERM;
3694 size = amdgpu_dpm_print_clock_levels(adev, od_type, buf);
3696 size = sysfs_emit(buf, "\n");
3698 pm_runtime_put_autosuspend(adev->dev);
3703 static int parse_input_od_command_lines(const char *buf,
3707 uint32_t *num_of_params)
3709 const char delimiter[3] = {' ', '\n', '\0'};
3710 uint32_t parameter_size = 0;
3711 char buf_cpy[128] = {0};
3712 char *tmp_str, *sub_str;
3715 if (count > sizeof(buf_cpy) - 1)
3718 memcpy(buf_cpy, buf, count);
3721 /* skip heading spaces */
3722 while (isspace(*tmp_str))
3727 *type = PP_OD_COMMIT_DPM_TABLE;
3730 params[parameter_size] = *type;
3732 *type = PP_OD_RESTORE_DEFAULT_TABLE;
3738 while ((sub_str = strsep(&tmp_str, delimiter)) != NULL) {
3739 if (strlen(sub_str) == 0)
3742 ret = kstrtol(sub_str, 0, ¶ms[parameter_size]);
3747 while (isspace(*tmp_str))
3751 *num_of_params = parameter_size;
3757 amdgpu_distribute_custom_od_settings(struct amdgpu_device *adev,
3758 enum PP_OD_DPM_TABLE_COMMAND cmd_type,
3762 uint32_t parameter_size = 0;
3766 if (amdgpu_in_reset(adev))
3768 if (adev->in_suspend && !adev->in_runpm)
3771 ret = parse_input_od_command_lines(in_buf,
3779 ret = pm_runtime_resume_and_get(adev->dev);
3783 ret = amdgpu_dpm_odn_edit_dpm_table(adev,
3790 if (cmd_type == PP_OD_COMMIT_DPM_TABLE) {
3791 ret = amdgpu_dpm_dispatch_task(adev,
3792 AMD_PP_TASK_READJUST_POWER_STATE,
3798 pm_runtime_mark_last_busy(adev->dev);
3799 pm_runtime_put_autosuspend(adev->dev);
3804 pm_runtime_mark_last_busy(adev->dev);
3805 pm_runtime_put_autosuspend(adev->dev);
3813 * The amdgpu driver provides a sysfs API for checking and adjusting the fan
3814 * control curve line.
3816 * Reading back the file shows you the current settings(temperature in Celsius
3817 * degree and fan speed in pwm) applied to every anchor point of the curve line
3818 * and their permitted ranges if changable.
3820 * Writing a desired string(with the format like "anchor_point_index temperature
3821 * fan_speed_in_pwm") to the file, change the settings for the specific anchor
3822 * point accordingly.
3824 * When you have finished the editing, write "c" (commit) to the file to commit
3827 * If you want to reset to the default value, write "r" (reset) to the file to
3830 * There are two fan control modes supported: auto and manual. With auto mode,
3831 * PMFW handles the fan speed control(how fan speed reacts to ASIC temperature).
3832 * While with manual mode, users can set their own fan curve line as what
3833 * described here. Normally the ASIC is booted up with auto mode. Any
3834 * settings via this interface will switch the fan control to manual mode
3837 static ssize_t fan_curve_show(struct kobject *kobj,
3838 struct kobj_attribute *attr,
3841 struct od_kobj *container = container_of(kobj, struct od_kobj, kobj);
3842 struct amdgpu_device *adev = (struct amdgpu_device *)container->priv;
3844 return (ssize_t)amdgpu_retrieve_od_settings(adev, OD_FAN_CURVE, buf);
3847 static ssize_t fan_curve_store(struct kobject *kobj,
3848 struct kobj_attribute *attr,
3852 struct od_kobj *container = container_of(kobj, struct od_kobj, kobj);
3853 struct amdgpu_device *adev = (struct amdgpu_device *)container->priv;
3855 return (ssize_t)amdgpu_distribute_custom_od_settings(adev,
3856 PP_OD_EDIT_FAN_CURVE,
3861 static umode_t fan_curve_visible(struct amdgpu_device *adev)
3863 umode_t umode = 0000;
3865 if (adev->pm.od_feature_mask & OD_OPS_SUPPORT_FAN_CURVE_RETRIEVE)
3866 umode |= S_IRUSR | S_IRGRP | S_IROTH;
3868 if (adev->pm.od_feature_mask & OD_OPS_SUPPORT_FAN_CURVE_SET)
3875 * DOC: acoustic_limit_rpm_threshold
3877 * The amdgpu driver provides a sysfs API for checking and adjusting the
3878 * acoustic limit in RPM for fan control.
3880 * Reading back the file shows you the current setting and the permitted
3881 * ranges if changable.
3883 * Writing an integer to the file, change the setting accordingly.
3885 * When you have finished the editing, write "c" (commit) to the file to commit
3888 * If you want to reset to the default value, write "r" (reset) to the file to
3891 * This setting works under auto fan control mode only. It adjusts the PMFW's
3892 * behavior about the maximum speed in RPM the fan can spin. Setting via this
3893 * interface will switch the fan control to auto mode implicitly.
3895 static ssize_t acoustic_limit_threshold_show(struct kobject *kobj,
3896 struct kobj_attribute *attr,
3899 struct od_kobj *container = container_of(kobj, struct od_kobj, kobj);
3900 struct amdgpu_device *adev = (struct amdgpu_device *)container->priv;
3902 return (ssize_t)amdgpu_retrieve_od_settings(adev, OD_ACOUSTIC_LIMIT, buf);
3905 static ssize_t acoustic_limit_threshold_store(struct kobject *kobj,
3906 struct kobj_attribute *attr,
3910 struct od_kobj *container = container_of(kobj, struct od_kobj, kobj);
3911 struct amdgpu_device *adev = (struct amdgpu_device *)container->priv;
3913 return (ssize_t)amdgpu_distribute_custom_od_settings(adev,
3914 PP_OD_EDIT_ACOUSTIC_LIMIT,
3919 static umode_t acoustic_limit_threshold_visible(struct amdgpu_device *adev)
3921 umode_t umode = 0000;
3923 if (adev->pm.od_feature_mask & OD_OPS_SUPPORT_ACOUSTIC_LIMIT_THRESHOLD_RETRIEVE)
3924 umode |= S_IRUSR | S_IRGRP | S_IROTH;
3926 if (adev->pm.od_feature_mask & OD_OPS_SUPPORT_ACOUSTIC_LIMIT_THRESHOLD_SET)
3933 * DOC: acoustic_target_rpm_threshold
3935 * The amdgpu driver provides a sysfs API for checking and adjusting the
3936 * acoustic target in RPM for fan control.
3938 * Reading back the file shows you the current setting and the permitted
3939 * ranges if changable.
3941 * Writing an integer to the file, change the setting accordingly.
3943 * When you have finished the editing, write "c" (commit) to the file to commit
3946 * If you want to reset to the default value, write "r" (reset) to the file to
3949 * This setting works under auto fan control mode only. It can co-exist with
3950 * other settings which can work also under auto mode. It adjusts the PMFW's
3951 * behavior about the maximum speed in RPM the fan can spin when ASIC
3952 * temperature is not greater than target temperature. Setting via this
3953 * interface will switch the fan control to auto mode implicitly.
3955 static ssize_t acoustic_target_threshold_show(struct kobject *kobj,
3956 struct kobj_attribute *attr,
3959 struct od_kobj *container = container_of(kobj, struct od_kobj, kobj);
3960 struct amdgpu_device *adev = (struct amdgpu_device *)container->priv;
3962 return (ssize_t)amdgpu_retrieve_od_settings(adev, OD_ACOUSTIC_TARGET, buf);
3965 static ssize_t acoustic_target_threshold_store(struct kobject *kobj,
3966 struct kobj_attribute *attr,
3970 struct od_kobj *container = container_of(kobj, struct od_kobj, kobj);
3971 struct amdgpu_device *adev = (struct amdgpu_device *)container->priv;
3973 return (ssize_t)amdgpu_distribute_custom_od_settings(adev,
3974 PP_OD_EDIT_ACOUSTIC_TARGET,
3979 static umode_t acoustic_target_threshold_visible(struct amdgpu_device *adev)
3981 umode_t umode = 0000;
3983 if (adev->pm.od_feature_mask & OD_OPS_SUPPORT_ACOUSTIC_TARGET_THRESHOLD_RETRIEVE)
3984 umode |= S_IRUSR | S_IRGRP | S_IROTH;
3986 if (adev->pm.od_feature_mask & OD_OPS_SUPPORT_ACOUSTIC_TARGET_THRESHOLD_SET)
3993 * DOC: fan_target_temperature
3995 * The amdgpu driver provides a sysfs API for checking and adjusting the
3996 * target tempeature in Celsius degree for fan control.
3998 * Reading back the file shows you the current setting and the permitted
3999 * ranges if changable.
4001 * Writing an integer to the file, change the setting accordingly.
4003 * When you have finished the editing, write "c" (commit) to the file to commit
4006 * If you want to reset to the default value, write "r" (reset) to the file to
4009 * This setting works under auto fan control mode only. It can co-exist with
4010 * other settings which can work also under auto mode. Paring with the
4011 * acoustic_target_rpm_threshold setting, they define the maximum speed in
4012 * RPM the fan can spin when ASIC temperature is not greater than target
4013 * temperature. Setting via this interface will switch the fan control to
4014 * auto mode implicitly.
4016 static ssize_t fan_target_temperature_show(struct kobject *kobj,
4017 struct kobj_attribute *attr,
4020 struct od_kobj *container = container_of(kobj, struct od_kobj, kobj);
4021 struct amdgpu_device *adev = (struct amdgpu_device *)container->priv;
4023 return (ssize_t)amdgpu_retrieve_od_settings(adev, OD_FAN_TARGET_TEMPERATURE, buf);
4026 static ssize_t fan_target_temperature_store(struct kobject *kobj,
4027 struct kobj_attribute *attr,
4031 struct od_kobj *container = container_of(kobj, struct od_kobj, kobj);
4032 struct amdgpu_device *adev = (struct amdgpu_device *)container->priv;
4034 return (ssize_t)amdgpu_distribute_custom_od_settings(adev,
4035 PP_OD_EDIT_FAN_TARGET_TEMPERATURE,
4040 static umode_t fan_target_temperature_visible(struct amdgpu_device *adev)
4042 umode_t umode = 0000;
4044 if (adev->pm.od_feature_mask & OD_OPS_SUPPORT_FAN_TARGET_TEMPERATURE_RETRIEVE)
4045 umode |= S_IRUSR | S_IRGRP | S_IROTH;
4047 if (adev->pm.od_feature_mask & OD_OPS_SUPPORT_FAN_TARGET_TEMPERATURE_SET)
4054 * DOC: fan_minimum_pwm
4056 * The amdgpu driver provides a sysfs API for checking and adjusting the
4057 * minimum fan speed in PWM.
4059 * Reading back the file shows you the current setting and the permitted
4060 * ranges if changable.
4062 * Writing an integer to the file, change the setting accordingly.
4064 * When you have finished the editing, write "c" (commit) to the file to commit
4067 * If you want to reset to the default value, write "r" (reset) to the file to
4070 * This setting works under auto fan control mode only. It can co-exist with
4071 * other settings which can work also under auto mode. It adjusts the PMFW's
4072 * behavior about the minimum fan speed in PWM the fan should spin. Setting
4073 * via this interface will switch the fan control to auto mode implicitly.
4075 static ssize_t fan_minimum_pwm_show(struct kobject *kobj,
4076 struct kobj_attribute *attr,
4079 struct od_kobj *container = container_of(kobj, struct od_kobj, kobj);
4080 struct amdgpu_device *adev = (struct amdgpu_device *)container->priv;
4082 return (ssize_t)amdgpu_retrieve_od_settings(adev, OD_FAN_MINIMUM_PWM, buf);
4085 static ssize_t fan_minimum_pwm_store(struct kobject *kobj,
4086 struct kobj_attribute *attr,
4090 struct od_kobj *container = container_of(kobj, struct od_kobj, kobj);
4091 struct amdgpu_device *adev = (struct amdgpu_device *)container->priv;
4093 return (ssize_t)amdgpu_distribute_custom_od_settings(adev,
4094 PP_OD_EDIT_FAN_MINIMUM_PWM,
4099 static umode_t fan_minimum_pwm_visible(struct amdgpu_device *adev)
4101 umode_t umode = 0000;
4103 if (adev->pm.od_feature_mask & OD_OPS_SUPPORT_FAN_MINIMUM_PWM_RETRIEVE)
4104 umode |= S_IRUSR | S_IRGRP | S_IROTH;
4106 if (adev->pm.od_feature_mask & OD_OPS_SUPPORT_FAN_MINIMUM_PWM_SET)
4113 * DOC: fan_zero_rpm_enable
4115 * The amdgpu driver provides a sysfs API for checking and adjusting the
4118 * Reading back the file shows you the current setting and the permitted
4119 * ranges if changable.
4121 * Writing an integer to the file, change the setting accordingly.
4123 * When you have finished the editing, write "c" (commit) to the file to commit
4126 * If you want to reset to the default value, write "r" (reset) to the file to
4129 static ssize_t fan_zero_rpm_enable_show(struct kobject *kobj,
4130 struct kobj_attribute *attr,
4133 struct od_kobj *container = container_of(kobj, struct od_kobj, kobj);
4134 struct amdgpu_device *adev = (struct amdgpu_device *)container->priv;
4136 return (ssize_t)amdgpu_retrieve_od_settings(adev, OD_FAN_ZERO_RPM_ENABLE, buf);
4139 static ssize_t fan_zero_rpm_enable_store(struct kobject *kobj,
4140 struct kobj_attribute *attr,
4144 struct od_kobj *container = container_of(kobj, struct od_kobj, kobj);
4145 struct amdgpu_device *adev = (struct amdgpu_device *)container->priv;
4147 return (ssize_t)amdgpu_distribute_custom_od_settings(adev,
4148 PP_OD_EDIT_FAN_ZERO_RPM_ENABLE,
4153 static umode_t fan_zero_rpm_enable_visible(struct amdgpu_device *adev)
4155 umode_t umode = 0000;
4157 if (adev->pm.od_feature_mask & OD_OPS_SUPPORT_FAN_ZERO_RPM_ENABLE_RETRIEVE)
4158 umode |= S_IRUSR | S_IRGRP | S_IROTH;
4160 if (adev->pm.od_feature_mask & OD_OPS_SUPPORT_FAN_ZERO_RPM_ENABLE_SET)
4167 * DOC: fan_zero_rpm_stop_temperature
4169 * The amdgpu driver provides a sysfs API for checking and adjusting the
4170 * zero RPM stop temperature feature.
4172 * Reading back the file shows you the current setting and the permitted
4173 * ranges if changable.
4175 * Writing an integer to the file, change the setting accordingly.
4177 * When you have finished the editing, write "c" (commit) to the file to commit
4180 * If you want to reset to the default value, write "r" (reset) to the file to
4183 * This setting works only if the Zero RPM setting is enabled. It adjusts the
4184 * temperature below which the fan can stop.
4186 static ssize_t fan_zero_rpm_stop_temp_show(struct kobject *kobj,
4187 struct kobj_attribute *attr,
4190 struct od_kobj *container = container_of(kobj, struct od_kobj, kobj);
4191 struct amdgpu_device *adev = (struct amdgpu_device *)container->priv;
4193 return (ssize_t)amdgpu_retrieve_od_settings(adev, OD_FAN_ZERO_RPM_STOP_TEMP, buf);
4196 static ssize_t fan_zero_rpm_stop_temp_store(struct kobject *kobj,
4197 struct kobj_attribute *attr,
4201 struct od_kobj *container = container_of(kobj, struct od_kobj, kobj);
4202 struct amdgpu_device *adev = (struct amdgpu_device *)container->priv;
4204 return (ssize_t)amdgpu_distribute_custom_od_settings(adev,
4205 PP_OD_EDIT_FAN_ZERO_RPM_STOP_TEMP,
4210 static umode_t fan_zero_rpm_stop_temp_visible(struct amdgpu_device *adev)
4212 umode_t umode = 0000;
4214 if (adev->pm.od_feature_mask & OD_OPS_SUPPORT_FAN_ZERO_RPM_STOP_TEMP_RETRIEVE)
4215 umode |= S_IRUSR | S_IRGRP | S_IROTH;
4217 if (adev->pm.od_feature_mask & OD_OPS_SUPPORT_FAN_ZERO_RPM_STOP_TEMP_SET)
4223 static struct od_feature_set amdgpu_od_set = {
4229 .name = "fan_curve",
4231 .is_visible = fan_curve_visible,
4232 .show = fan_curve_show,
4233 .store = fan_curve_store,
4237 .name = "acoustic_limit_rpm_threshold",
4239 .is_visible = acoustic_limit_threshold_visible,
4240 .show = acoustic_limit_threshold_show,
4241 .store = acoustic_limit_threshold_store,
4245 .name = "acoustic_target_rpm_threshold",
4247 .is_visible = acoustic_target_threshold_visible,
4248 .show = acoustic_target_threshold_show,
4249 .store = acoustic_target_threshold_store,
4253 .name = "fan_target_temperature",
4255 .is_visible = fan_target_temperature_visible,
4256 .show = fan_target_temperature_show,
4257 .store = fan_target_temperature_store,
4261 .name = "fan_minimum_pwm",
4263 .is_visible = fan_minimum_pwm_visible,
4264 .show = fan_minimum_pwm_show,
4265 .store = fan_minimum_pwm_store,
4269 .name = "fan_zero_rpm_enable",
4271 .is_visible = fan_zero_rpm_enable_visible,
4272 .show = fan_zero_rpm_enable_show,
4273 .store = fan_zero_rpm_enable_store,
4277 .name = "fan_zero_rpm_stop_temperature",
4279 .is_visible = fan_zero_rpm_stop_temp_visible,
4280 .show = fan_zero_rpm_stop_temp_show,
4281 .store = fan_zero_rpm_stop_temp_store,
4289 static void od_kobj_release(struct kobject *kobj)
4291 struct od_kobj *od_kobj = container_of(kobj, struct od_kobj, kobj);
4296 static const struct kobj_type od_ktype = {
4297 .release = od_kobj_release,
4298 .sysfs_ops = &kobj_sysfs_ops,
4301 static void amdgpu_od_set_fini(struct amdgpu_device *adev)
4303 struct od_kobj *container, *container_next;
4304 struct od_attribute *attribute, *attribute_next;
4306 if (list_empty(&adev->pm.od_kobj_list))
4309 list_for_each_entry_safe(container, container_next,
4310 &adev->pm.od_kobj_list, entry) {
4311 list_del(&container->entry);
4313 list_for_each_entry_safe(attribute, attribute_next,
4314 &container->attribute, entry) {
4315 list_del(&attribute->entry);
4316 sysfs_remove_file(&container->kobj,
4317 &attribute->attribute.attr);
4321 kobject_put(&container->kobj);
4325 static bool amdgpu_is_od_feature_supported(struct amdgpu_device *adev,
4326 struct od_feature_ops *feature_ops)
4330 if (!feature_ops->is_visible)
4334 * If the feature has no user read and write mode set,
4335 * we can assume the feature is actually not supported.(?)
4336 * And the revelant sysfs interface should not be exposed.
4338 mode = feature_ops->is_visible(adev);
4339 if (mode & (S_IRUSR | S_IWUSR))
4345 static bool amdgpu_od_is_self_contained(struct amdgpu_device *adev,
4346 struct od_feature_container *container)
4351 * If there is no valid entry within the container, the container
4352 * is recognized as a self contained container. And the valid entry
4353 * here means it has a valid naming and it is visible/supported by
4356 for (i = 0; i < ARRAY_SIZE(container->sub_feature); i++) {
4357 if (container->sub_feature[i].name &&
4358 amdgpu_is_od_feature_supported(adev,
4359 &container->sub_feature[i].ops))
4366 static int amdgpu_od_set_init(struct amdgpu_device *adev)
4368 struct od_kobj *top_set, *sub_set;
4369 struct od_attribute *attribute;
4370 struct od_feature_container *container;
4371 struct od_feature_item *feature;
4375 /* Setup the top `gpu_od` directory which holds all other OD interfaces */
4376 top_set = kzalloc(sizeof(*top_set), GFP_KERNEL);
4379 list_add(&top_set->entry, &adev->pm.od_kobj_list);
4381 ret = kobject_init_and_add(&top_set->kobj,
4388 INIT_LIST_HEAD(&top_set->attribute);
4389 top_set->priv = adev;
4391 for (i = 0; i < ARRAY_SIZE(amdgpu_od_set.containers); i++) {
4392 container = &amdgpu_od_set.containers[i];
4394 if (!container->name)
4398 * If there is valid entries within the container, the container
4399 * will be presented as a sub directory and all its holding entries
4400 * will be presented as plain files under it.
4401 * While if there is no valid entry within the container, the container
4402 * itself will be presented as a plain file under top `gpu_od` directory.
4404 if (amdgpu_od_is_self_contained(adev, container)) {
4405 if (!amdgpu_is_od_feature_supported(adev,
4410 * The container is presented as a plain file under top `gpu_od`
4413 attribute = kzalloc(sizeof(*attribute), GFP_KERNEL);
4418 list_add(&attribute->entry, &top_set->attribute);
4420 attribute->attribute.attr.mode =
4421 container->ops.is_visible(adev);
4422 attribute->attribute.attr.name = container->name;
4423 attribute->attribute.show =
4424 container->ops.show;
4425 attribute->attribute.store =
4426 container->ops.store;
4427 ret = sysfs_create_file(&top_set->kobj,
4428 &attribute->attribute.attr);
4432 /* The container is presented as a sub directory. */
4433 sub_set = kzalloc(sizeof(*sub_set), GFP_KERNEL);
4438 list_add(&sub_set->entry, &adev->pm.od_kobj_list);
4440 ret = kobject_init_and_add(&sub_set->kobj,
4447 INIT_LIST_HEAD(&sub_set->attribute);
4448 sub_set->priv = adev;
4450 for (j = 0; j < ARRAY_SIZE(container->sub_feature); j++) {
4451 feature = &container->sub_feature[j];
4455 if (!amdgpu_is_od_feature_supported(adev,
4460 * With the container presented as a sub directory, the entry within
4461 * it is presented as a plain file under the sub directory.
4463 attribute = kzalloc(sizeof(*attribute), GFP_KERNEL);
4468 list_add(&attribute->entry, &sub_set->attribute);
4470 attribute->attribute.attr.mode =
4471 feature->ops.is_visible(adev);
4472 attribute->attribute.attr.name = feature->name;
4473 attribute->attribute.show =
4475 attribute->attribute.store =
4477 ret = sysfs_create_file(&sub_set->kobj,
4478 &attribute->attribute.attr);
4486 * If gpu_od is the only member in the list, that means gpu_od is an
4487 * empty directory, so remove it.
4489 if (list_is_singular(&adev->pm.od_kobj_list))
4495 amdgpu_od_set_fini(adev);
4500 int amdgpu_pm_sysfs_init(struct amdgpu_device *adev)
4502 enum amdgpu_sriov_vf_mode mode;
4506 if (adev->pm.sysfs_initialized)
4509 INIT_LIST_HEAD(&adev->pm.pm_attr_list);
4511 if (adev->pm.dpm_enabled == 0)
4514 mode = amdgpu_virt_get_sriov_vf_mode(adev);
4516 /* under multi-vf mode, the hwmon attributes are all not supported */
4517 if (mode != SRIOV_VF_MODE_MULTI_VF) {
4518 adev->pm.int_hwmon_dev = hwmon_device_register_with_groups(adev->dev,
4521 if (IS_ERR(adev->pm.int_hwmon_dev)) {
4522 ret = PTR_ERR(adev->pm.int_hwmon_dev);
4523 dev_err(adev->dev, "Unable to register hwmon device: %d\n", ret);
4529 case SRIOV_VF_MODE_ONE_VF:
4530 mask = ATTR_FLAG_ONEVF;
4532 case SRIOV_VF_MODE_MULTI_VF:
4535 case SRIOV_VF_MODE_BARE_METAL:
4537 mask = ATTR_FLAG_MASK_ALL;
4541 ret = amdgpu_device_attr_create_groups(adev,
4542 amdgpu_device_attrs,
4543 ARRAY_SIZE(amdgpu_device_attrs),
4545 &adev->pm.pm_attr_list);
4549 if (amdgpu_dpm_is_overdrive_supported(adev)) {
4550 ret = amdgpu_od_set_init(adev);
4553 } else if (adev->pm.pp_feature & PP_OVERDRIVE_MASK) {
4554 dev_info(adev->dev, "overdrive feature is not supported\n");
4557 if (amdgpu_dpm_get_pm_policy_info(adev, PP_PM_POLICY_NONE, NULL) !=
4559 ret = devm_device_add_group(adev->dev,
4560 &amdgpu_pm_policy_attr_group);
4565 adev->pm.sysfs_initialized = true;
4570 amdgpu_device_attr_remove_groups(adev, &adev->pm.pm_attr_list);
4572 if (adev->pm.int_hwmon_dev)
4573 hwmon_device_unregister(adev->pm.int_hwmon_dev);
4578 void amdgpu_pm_sysfs_fini(struct amdgpu_device *adev)
4580 amdgpu_od_set_fini(adev);
4582 if (adev->pm.int_hwmon_dev)
4583 hwmon_device_unregister(adev->pm.int_hwmon_dev);
4585 amdgpu_device_attr_remove_groups(adev, &adev->pm.pm_attr_list);
4591 #if defined(CONFIG_DEBUG_FS)
4593 static void amdgpu_debugfs_prints_cpu_info(struct seq_file *m,
4594 struct amdgpu_device *adev)
4599 uint32_t num_cpu_cores = amdgpu_dpm_get_num_cpu_cores(adev);
4601 if (amdgpu_dpm_is_cclk_dpm_supported(adev)) {
4602 p_val = kcalloc(num_cpu_cores, sizeof(uint16_t),
4605 if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_CPU_CLK,
4606 (void *)p_val, &size)) {
4607 for (i = 0; i < num_cpu_cores; i++)
4608 seq_printf(m, "\t%u MHz (CPU%d)\n",
4616 static int amdgpu_debugfs_pm_info_pp(struct seq_file *m, struct amdgpu_device *adev)
4618 uint32_t mp1_ver = amdgpu_ip_version(adev, MP1_HWIP, 0);
4619 uint32_t gc_ver = amdgpu_ip_version(adev, GC_HWIP, 0);
4621 uint64_t value64 = 0;
4626 size = sizeof(value);
4627 seq_printf(m, "GFX Clocks and Power:\n");
4629 amdgpu_debugfs_prints_cpu_info(m, adev);
4631 if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_GFX_MCLK, (void *)&value, &size))
4632 seq_printf(m, "\t%u MHz (MCLK)\n", value/100);
4633 if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_GFX_SCLK, (void *)&value, &size))
4634 seq_printf(m, "\t%u MHz (SCLK)\n", value/100);
4635 if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_STABLE_PSTATE_SCLK, (void *)&value, &size))
4636 seq_printf(m, "\t%u MHz (PSTATE_SCLK)\n", value/100);
4637 if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_STABLE_PSTATE_MCLK, (void *)&value, &size))
4638 seq_printf(m, "\t%u MHz (PSTATE_MCLK)\n", value/100);
4639 if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_VDDGFX, (void *)&value, &size))
4640 seq_printf(m, "\t%u mV (VDDGFX)\n", value);
4641 if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_VDDNB, (void *)&value, &size))
4642 seq_printf(m, "\t%u mV (VDDNB)\n", value);
4643 size = sizeof(uint32_t);
4644 if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_GPU_AVG_POWER, (void *)&query, &size)) {
4645 if (adev->flags & AMD_IS_APU)
4646 seq_printf(m, "\t%u.%02u W (average SoC including CPU)\n", query >> 8, query & 0xff);
4648 seq_printf(m, "\t%u.%02u W (average SoC)\n", query >> 8, query & 0xff);
4650 size = sizeof(uint32_t);
4651 if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_GPU_INPUT_POWER, (void *)&query, &size)) {
4652 if (adev->flags & AMD_IS_APU)
4653 seq_printf(m, "\t%u.%02u W (current SoC including CPU)\n", query >> 8, query & 0xff);
4655 seq_printf(m, "\t%u.%02u W (current SoC)\n", query >> 8, query & 0xff);
4657 size = sizeof(value);
4658 seq_printf(m, "\n");
4661 if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_GPU_TEMP, (void *)&value, &size))
4662 seq_printf(m, "GPU Temperature: %u C\n", value/1000);
4665 if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_GPU_LOAD, (void *)&value, &size))
4666 seq_printf(m, "GPU Load: %u %%\n", value);
4668 if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_MEM_LOAD, (void *)&value, &size))
4669 seq_printf(m, "MEM Load: %u %%\n", value);
4671 if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_VCN_LOAD, (void *)&value, &size))
4672 seq_printf(m, "VCN Load: %u %%\n", value);
4674 seq_printf(m, "\n");
4676 /* SMC feature mask */
4677 if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_ENABLED_SMC_FEATURES_MASK, (void *)&value64, &size))
4678 seq_printf(m, "SMC Feature Mask: 0x%016llx\n", value64);
4680 /* ASICs greater than CHIP_VEGA20 supports these sensors */
4681 if (gc_ver != IP_VERSION(9, 4, 0) && mp1_ver > IP_VERSION(9, 0, 0)) {
4683 if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_VCN_POWER_STATE, (void *)&value, &size)) {
4685 seq_printf(m, "VCN: Powered down\n");
4687 seq_printf(m, "VCN: Powered up\n");
4688 if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_UVD_DCLK, (void *)&value, &size))
4689 seq_printf(m, "\t%u MHz (DCLK)\n", value/100);
4690 if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_UVD_VCLK, (void *)&value, &size))
4691 seq_printf(m, "\t%u MHz (VCLK)\n", value/100);
4694 seq_printf(m, "\n");
4697 if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_UVD_POWER, (void *)&value, &size)) {
4699 seq_printf(m, "UVD: Powered down\n");
4701 seq_printf(m, "UVD: Powered up\n");
4702 if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_UVD_DCLK, (void *)&value, &size))
4703 seq_printf(m, "\t%u MHz (DCLK)\n", value/100);
4704 if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_UVD_VCLK, (void *)&value, &size))
4705 seq_printf(m, "\t%u MHz (VCLK)\n", value/100);
4708 seq_printf(m, "\n");
4711 if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_VCE_POWER, (void *)&value, &size)) {
4713 seq_printf(m, "VCE: Powered down\n");
4715 seq_printf(m, "VCE: Powered up\n");
4716 if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_VCE_ECCLK, (void *)&value, &size))
4717 seq_printf(m, "\t%u MHz (ECCLK)\n", value/100);
4725 static const struct cg_flag_name clocks[] = {
4726 {AMD_CG_SUPPORT_GFX_FGCG, "Graphics Fine Grain Clock Gating"},
4727 {AMD_CG_SUPPORT_GFX_MGCG, "Graphics Medium Grain Clock Gating"},
4728 {AMD_CG_SUPPORT_GFX_MGLS, "Graphics Medium Grain memory Light Sleep"},
4729 {AMD_CG_SUPPORT_GFX_CGCG, "Graphics Coarse Grain Clock Gating"},
4730 {AMD_CG_SUPPORT_GFX_CGLS, "Graphics Coarse Grain memory Light Sleep"},
4731 {AMD_CG_SUPPORT_GFX_CGTS, "Graphics Coarse Grain Tree Shader Clock Gating"},
4732 {AMD_CG_SUPPORT_GFX_CGTS_LS, "Graphics Coarse Grain Tree Shader Light Sleep"},
4733 {AMD_CG_SUPPORT_GFX_CP_LS, "Graphics Command Processor Light Sleep"},
4734 {AMD_CG_SUPPORT_GFX_RLC_LS, "Graphics Run List Controller Light Sleep"},
4735 {AMD_CG_SUPPORT_GFX_3D_CGCG, "Graphics 3D Coarse Grain Clock Gating"},
4736 {AMD_CG_SUPPORT_GFX_3D_CGLS, "Graphics 3D Coarse Grain memory Light Sleep"},
4737 {AMD_CG_SUPPORT_MC_LS, "Memory Controller Light Sleep"},
4738 {AMD_CG_SUPPORT_MC_MGCG, "Memory Controller Medium Grain Clock Gating"},
4739 {AMD_CG_SUPPORT_SDMA_LS, "System Direct Memory Access Light Sleep"},
4740 {AMD_CG_SUPPORT_SDMA_MGCG, "System Direct Memory Access Medium Grain Clock Gating"},
4741 {AMD_CG_SUPPORT_BIF_MGCG, "Bus Interface Medium Grain Clock Gating"},
4742 {AMD_CG_SUPPORT_BIF_LS, "Bus Interface Light Sleep"},
4743 {AMD_CG_SUPPORT_UVD_MGCG, "Unified Video Decoder Medium Grain Clock Gating"},
4744 {AMD_CG_SUPPORT_VCE_MGCG, "Video Compression Engine Medium Grain Clock Gating"},
4745 {AMD_CG_SUPPORT_HDP_LS, "Host Data Path Light Sleep"},
4746 {AMD_CG_SUPPORT_HDP_MGCG, "Host Data Path Medium Grain Clock Gating"},
4747 {AMD_CG_SUPPORT_DRM_MGCG, "Digital Right Management Medium Grain Clock Gating"},
4748 {AMD_CG_SUPPORT_DRM_LS, "Digital Right Management Light Sleep"},
4749 {AMD_CG_SUPPORT_ROM_MGCG, "Rom Medium Grain Clock Gating"},
4750 {AMD_CG_SUPPORT_DF_MGCG, "Data Fabric Medium Grain Clock Gating"},
4751 {AMD_CG_SUPPORT_VCN_MGCG, "VCN Medium Grain Clock Gating"},
4752 {AMD_CG_SUPPORT_HDP_DS, "Host Data Path Deep Sleep"},
4753 {AMD_CG_SUPPORT_HDP_SD, "Host Data Path Shutdown"},
4754 {AMD_CG_SUPPORT_IH_CG, "Interrupt Handler Clock Gating"},
4755 {AMD_CG_SUPPORT_JPEG_MGCG, "JPEG Medium Grain Clock Gating"},
4756 {AMD_CG_SUPPORT_REPEATER_FGCG, "Repeater Fine Grain Clock Gating"},
4757 {AMD_CG_SUPPORT_GFX_PERF_CLK, "Perfmon Clock Gating"},
4758 {AMD_CG_SUPPORT_ATHUB_MGCG, "Address Translation Hub Medium Grain Clock Gating"},
4759 {AMD_CG_SUPPORT_ATHUB_LS, "Address Translation Hub Light Sleep"},
4763 static void amdgpu_parse_cg_state(struct seq_file *m, u64 flags)
4767 for (i = 0; clocks[i].flag; i++)
4768 seq_printf(m, "\t%s: %s\n", clocks[i].name,
4769 (flags & clocks[i].flag) ? "On" : "Off");
4772 static int amdgpu_debugfs_pm_info_show(struct seq_file *m, void *unused)
4774 struct amdgpu_device *adev = (struct amdgpu_device *)m->private;
4775 struct drm_device *dev = adev_to_drm(adev);
4779 if (amdgpu_in_reset(adev))
4781 if (adev->in_suspend && !adev->in_runpm)
4784 r = pm_runtime_resume_and_get(dev->dev);
4788 if (amdgpu_dpm_debugfs_print_current_performance_level(adev, m)) {
4789 r = amdgpu_debugfs_pm_info_pp(m, adev);
4794 amdgpu_device_ip_get_clockgating_state(adev, &flags);
4796 seq_printf(m, "Clock Gating Flags Mask: 0x%llx\n", flags);
4797 amdgpu_parse_cg_state(m, flags);
4798 seq_printf(m, "\n");
4801 pm_runtime_put_autosuspend(dev->dev);
4806 DEFINE_SHOW_ATTRIBUTE(amdgpu_debugfs_pm_info);
4809 * amdgpu_pm_priv_buffer_read - Read memory region allocated to FW
4811 * Reads debug memory region allocated to PMFW
4813 static ssize_t amdgpu_pm_prv_buffer_read(struct file *f, char __user *buf,
4814 size_t size, loff_t *pos)
4816 struct amdgpu_device *adev = file_inode(f)->i_private;
4817 size_t smu_prv_buf_size;
4821 if (amdgpu_in_reset(adev))
4823 if (adev->in_suspend && !adev->in_runpm)
4826 ret = amdgpu_dpm_get_smu_prv_buf_details(adev, &smu_prv_buf, &smu_prv_buf_size);
4830 if (!smu_prv_buf || !smu_prv_buf_size)
4833 return simple_read_from_buffer(buf, size, pos, smu_prv_buf,
4837 static const struct file_operations amdgpu_debugfs_pm_prv_buffer_fops = {
4838 .owner = THIS_MODULE,
4839 .open = simple_open,
4840 .read = amdgpu_pm_prv_buffer_read,
4841 .llseek = default_llseek,
4846 void amdgpu_debugfs_pm_init(struct amdgpu_device *adev)
4848 #if defined(CONFIG_DEBUG_FS)
4849 struct drm_minor *minor = adev_to_drm(adev)->primary;
4850 struct dentry *root = minor->debugfs_root;
4852 if (!adev->pm.dpm_enabled)
4855 debugfs_create_file("amdgpu_pm_info", 0444, root, adev,
4856 &amdgpu_debugfs_pm_info_fops);
4858 if (adev->pm.smu_prv_buffer_size > 0)
4859 debugfs_create_file_size("amdgpu_pm_prv_buffer", 0444, root,
4861 &amdgpu_debugfs_pm_prv_buffer_fops,
4862 adev->pm.smu_prv_buffer_size);
4864 amdgpu_dpm_stb_debug_fs_init(adev);