2 * Copyright 2017 Advanced Micro Devices, Inc.
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
26 #include <drm/drm_debugfs.h>
29 #include "amdgpu_drv.h"
30 #include "amdgpu_pm.h"
31 #include "amdgpu_dpm.h"
32 #include "amdgpu_smu.h"
34 #include <linux/pci.h>
35 #include <linux/hwmon.h>
36 #include <linux/hwmon-sysfs.h>
37 #include <linux/nospec.h>
38 #include <linux/pm_runtime.h>
41 static const struct cg_flag_name clocks[] = {
42 {AMD_CG_SUPPORT_GFX_FGCG, "Graphics Fine Grain Clock Gating"},
43 {AMD_CG_SUPPORT_GFX_MGCG, "Graphics Medium Grain Clock Gating"},
44 {AMD_CG_SUPPORT_GFX_MGLS, "Graphics Medium Grain memory Light Sleep"},
45 {AMD_CG_SUPPORT_GFX_CGCG, "Graphics Coarse Grain Clock Gating"},
46 {AMD_CG_SUPPORT_GFX_CGLS, "Graphics Coarse Grain memory Light Sleep"},
47 {AMD_CG_SUPPORT_GFX_CGTS, "Graphics Coarse Grain Tree Shader Clock Gating"},
48 {AMD_CG_SUPPORT_GFX_CGTS_LS, "Graphics Coarse Grain Tree Shader Light Sleep"},
49 {AMD_CG_SUPPORT_GFX_CP_LS, "Graphics Command Processor Light Sleep"},
50 {AMD_CG_SUPPORT_GFX_RLC_LS, "Graphics Run List Controller Light Sleep"},
51 {AMD_CG_SUPPORT_GFX_3D_CGCG, "Graphics 3D Coarse Grain Clock Gating"},
52 {AMD_CG_SUPPORT_GFX_3D_CGLS, "Graphics 3D Coarse Grain memory Light Sleep"},
53 {AMD_CG_SUPPORT_MC_LS, "Memory Controller Light Sleep"},
54 {AMD_CG_SUPPORT_MC_MGCG, "Memory Controller Medium Grain Clock Gating"},
55 {AMD_CG_SUPPORT_SDMA_LS, "System Direct Memory Access Light Sleep"},
56 {AMD_CG_SUPPORT_SDMA_MGCG, "System Direct Memory Access Medium Grain Clock Gating"},
57 {AMD_CG_SUPPORT_BIF_MGCG, "Bus Interface Medium Grain Clock Gating"},
58 {AMD_CG_SUPPORT_BIF_LS, "Bus Interface Light Sleep"},
59 {AMD_CG_SUPPORT_UVD_MGCG, "Unified Video Decoder Medium Grain Clock Gating"},
60 {AMD_CG_SUPPORT_VCE_MGCG, "Video Compression Engine Medium Grain Clock Gating"},
61 {AMD_CG_SUPPORT_HDP_LS, "Host Data Path Light Sleep"},
62 {AMD_CG_SUPPORT_HDP_MGCG, "Host Data Path Medium Grain Clock Gating"},
63 {AMD_CG_SUPPORT_DRM_MGCG, "Digital Right Management Medium Grain Clock Gating"},
64 {AMD_CG_SUPPORT_DRM_LS, "Digital Right Management Light Sleep"},
65 {AMD_CG_SUPPORT_ROM_MGCG, "Rom Medium Grain Clock Gating"},
66 {AMD_CG_SUPPORT_DF_MGCG, "Data Fabric Medium Grain Clock Gating"},
67 {AMD_CG_SUPPORT_VCN_MGCG, "VCN Medium Grain Clock Gating"},
68 {AMD_CG_SUPPORT_HDP_DS, "Host Data Path Deep Sleep"},
69 {AMD_CG_SUPPORT_HDP_SD, "Host Data Path Shutdown"},
70 {AMD_CG_SUPPORT_IH_CG, "Interrupt Handler Clock Gating"},
71 {AMD_CG_SUPPORT_JPEG_MGCG, "JPEG Medium Grain Clock Gating"},
73 {AMD_CG_SUPPORT_ATHUB_MGCG, "Address Translation Hub Medium Grain Clock Gating"},
74 {AMD_CG_SUPPORT_ATHUB_LS, "Address Translation Hub Light Sleep"},
78 static const struct hwmon_temp_label {
79 enum PP_HWMON_TEMP channel;
82 {PP_TEMP_EDGE, "edge"},
83 {PP_TEMP_JUNCTION, "junction"},
88 * DOC: power_dpm_state
90 * The power_dpm_state file is a legacy interface and is only provided for
91 * backwards compatibility. The amdgpu driver provides a sysfs API for adjusting
92 * certain power related parameters. The file power_dpm_state is used for this.
93 * It accepts the following arguments:
103 * On older GPUs, the vbios provided a special power state for battery
104 * operation. Selecting battery switched to this state. This is no
105 * longer provided on newer GPUs so the option does nothing in that case.
109 * On older GPUs, the vbios provided a special power state for balanced
110 * operation. Selecting balanced switched to this state. This is no
111 * longer provided on newer GPUs so the option does nothing in that case.
115 * On older GPUs, the vbios provided a special power state for performance
116 * operation. Selecting performance switched to this state. This is no
117 * longer provided on newer GPUs so the option does nothing in that case.
121 static ssize_t amdgpu_get_power_dpm_state(struct device *dev,
122 struct device_attribute *attr,
125 struct drm_device *ddev = dev_get_drvdata(dev);
126 struct amdgpu_device *adev = drm_to_adev(ddev);
127 enum amd_pm_state_type pm;
130 if (amdgpu_in_reset(adev))
133 ret = pm_runtime_get_sync(ddev->dev);
135 pm_runtime_put_autosuspend(ddev->dev);
139 if (is_support_sw_smu(adev)) {
140 if (adev->smu.ppt_funcs->get_current_power_state)
141 pm = smu_get_current_power_state(&adev->smu);
143 pm = adev->pm.dpm.user_state;
144 } else if (adev->powerplay.pp_funcs->get_current_power_state) {
145 pm = amdgpu_dpm_get_current_power_state(adev);
147 pm = adev->pm.dpm.user_state;
150 pm_runtime_mark_last_busy(ddev->dev);
151 pm_runtime_put_autosuspend(ddev->dev);
153 return snprintf(buf, PAGE_SIZE, "%s\n",
154 (pm == POWER_STATE_TYPE_BATTERY) ? "battery" :
155 (pm == POWER_STATE_TYPE_BALANCED) ? "balanced" : "performance");
158 static ssize_t amdgpu_set_power_dpm_state(struct device *dev,
159 struct device_attribute *attr,
163 struct drm_device *ddev = dev_get_drvdata(dev);
164 struct amdgpu_device *adev = drm_to_adev(ddev);
165 enum amd_pm_state_type state;
168 if (amdgpu_in_reset(adev))
171 if (strncmp("battery", buf, strlen("battery")) == 0)
172 state = POWER_STATE_TYPE_BATTERY;
173 else if (strncmp("balanced", buf, strlen("balanced")) == 0)
174 state = POWER_STATE_TYPE_BALANCED;
175 else if (strncmp("performance", buf, strlen("performance")) == 0)
176 state = POWER_STATE_TYPE_PERFORMANCE;
180 ret = pm_runtime_get_sync(ddev->dev);
182 pm_runtime_put_autosuspend(ddev->dev);
186 if (is_support_sw_smu(adev)) {
187 mutex_lock(&adev->pm.mutex);
188 adev->pm.dpm.user_state = state;
189 mutex_unlock(&adev->pm.mutex);
190 } else if (adev->powerplay.pp_funcs->dispatch_tasks) {
191 amdgpu_dpm_dispatch_task(adev, AMD_PP_TASK_ENABLE_USER_STATE, &state);
193 mutex_lock(&adev->pm.mutex);
194 adev->pm.dpm.user_state = state;
195 mutex_unlock(&adev->pm.mutex);
197 amdgpu_pm_compute_clocks(adev);
199 pm_runtime_mark_last_busy(ddev->dev);
200 pm_runtime_put_autosuspend(ddev->dev);
207 * DOC: power_dpm_force_performance_level
209 * The amdgpu driver provides a sysfs API for adjusting certain power
210 * related parameters. The file power_dpm_force_performance_level is
211 * used for this. It accepts the following arguments:
231 * When auto is selected, the driver will attempt to dynamically select
232 * the optimal power profile for current conditions in the driver.
236 * When low is selected, the clocks are forced to the lowest power state.
240 * When high is selected, the clocks are forced to the highest power state.
244 * When manual is selected, the user can manually adjust which power states
245 * are enabled for each clock domain via the sysfs pp_dpm_mclk, pp_dpm_sclk,
246 * and pp_dpm_pcie files and adjust the power state transition heuristics
247 * via the pp_power_profile_mode sysfs file.
254 * When the profiling modes are selected, clock and power gating are
255 * disabled and the clocks are set for different profiling cases. This
256 * mode is recommended for profiling specific work loads where you do
257 * not want clock or power gating for clock fluctuation to interfere
258 * with your results. profile_standard sets the clocks to a fixed clock
259 * level which varies from asic to asic. profile_min_sclk forces the sclk
260 * to the lowest level. profile_min_mclk forces the mclk to the lowest level.
261 * profile_peak sets all clocks (mclk, sclk, pcie) to the highest levels.
265 static ssize_t amdgpu_get_power_dpm_force_performance_level(struct device *dev,
266 struct device_attribute *attr,
269 struct drm_device *ddev = dev_get_drvdata(dev);
270 struct amdgpu_device *adev = drm_to_adev(ddev);
271 enum amd_dpm_forced_level level = 0xff;
274 if (amdgpu_in_reset(adev))
277 ret = pm_runtime_get_sync(ddev->dev);
279 pm_runtime_put_autosuspend(ddev->dev);
283 if (is_support_sw_smu(adev))
284 level = smu_get_performance_level(&adev->smu);
285 else if (adev->powerplay.pp_funcs->get_performance_level)
286 level = amdgpu_dpm_get_performance_level(adev);
288 level = adev->pm.dpm.forced_level;
290 pm_runtime_mark_last_busy(ddev->dev);
291 pm_runtime_put_autosuspend(ddev->dev);
293 return snprintf(buf, PAGE_SIZE, "%s\n",
294 (level == AMD_DPM_FORCED_LEVEL_AUTO) ? "auto" :
295 (level == AMD_DPM_FORCED_LEVEL_LOW) ? "low" :
296 (level == AMD_DPM_FORCED_LEVEL_HIGH) ? "high" :
297 (level == AMD_DPM_FORCED_LEVEL_MANUAL) ? "manual" :
298 (level == AMD_DPM_FORCED_LEVEL_PROFILE_STANDARD) ? "profile_standard" :
299 (level == AMD_DPM_FORCED_LEVEL_PROFILE_MIN_SCLK) ? "profile_min_sclk" :
300 (level == AMD_DPM_FORCED_LEVEL_PROFILE_MIN_MCLK) ? "profile_min_mclk" :
301 (level == AMD_DPM_FORCED_LEVEL_PROFILE_PEAK) ? "profile_peak" :
305 static ssize_t amdgpu_set_power_dpm_force_performance_level(struct device *dev,
306 struct device_attribute *attr,
310 struct drm_device *ddev = dev_get_drvdata(dev);
311 struct amdgpu_device *adev = drm_to_adev(ddev);
312 enum amd_dpm_forced_level level;
313 enum amd_dpm_forced_level current_level = 0xff;
316 if (amdgpu_in_reset(adev))
319 if (strncmp("low", buf, strlen("low")) == 0) {
320 level = AMD_DPM_FORCED_LEVEL_LOW;
321 } else if (strncmp("high", buf, strlen("high")) == 0) {
322 level = AMD_DPM_FORCED_LEVEL_HIGH;
323 } else if (strncmp("auto", buf, strlen("auto")) == 0) {
324 level = AMD_DPM_FORCED_LEVEL_AUTO;
325 } else if (strncmp("manual", buf, strlen("manual")) == 0) {
326 level = AMD_DPM_FORCED_LEVEL_MANUAL;
327 } else if (strncmp("profile_exit", buf, strlen("profile_exit")) == 0) {
328 level = AMD_DPM_FORCED_LEVEL_PROFILE_EXIT;
329 } else if (strncmp("profile_standard", buf, strlen("profile_standard")) == 0) {
330 level = AMD_DPM_FORCED_LEVEL_PROFILE_STANDARD;
331 } else if (strncmp("profile_min_sclk", buf, strlen("profile_min_sclk")) == 0) {
332 level = AMD_DPM_FORCED_LEVEL_PROFILE_MIN_SCLK;
333 } else if (strncmp("profile_min_mclk", buf, strlen("profile_min_mclk")) == 0) {
334 level = AMD_DPM_FORCED_LEVEL_PROFILE_MIN_MCLK;
335 } else if (strncmp("profile_peak", buf, strlen("profile_peak")) == 0) {
336 level = AMD_DPM_FORCED_LEVEL_PROFILE_PEAK;
341 ret = pm_runtime_get_sync(ddev->dev);
343 pm_runtime_put_autosuspend(ddev->dev);
347 if (is_support_sw_smu(adev))
348 current_level = smu_get_performance_level(&adev->smu);
349 else if (adev->powerplay.pp_funcs->get_performance_level)
350 current_level = amdgpu_dpm_get_performance_level(adev);
352 if (current_level == level) {
353 pm_runtime_mark_last_busy(ddev->dev);
354 pm_runtime_put_autosuspend(ddev->dev);
358 if (adev->asic_type == CHIP_RAVEN) {
359 if (!(adev->apu_flags & AMD_APU_IS_RAVEN2)) {
360 if (current_level != AMD_DPM_FORCED_LEVEL_MANUAL && level == AMD_DPM_FORCED_LEVEL_MANUAL)
361 amdgpu_gfx_off_ctrl(adev, false);
362 else if (current_level == AMD_DPM_FORCED_LEVEL_MANUAL && level != AMD_DPM_FORCED_LEVEL_MANUAL)
363 amdgpu_gfx_off_ctrl(adev, true);
367 /* profile_exit setting is valid only when current mode is in profile mode */
368 if (!(current_level & (AMD_DPM_FORCED_LEVEL_PROFILE_STANDARD |
369 AMD_DPM_FORCED_LEVEL_PROFILE_MIN_SCLK |
370 AMD_DPM_FORCED_LEVEL_PROFILE_MIN_MCLK |
371 AMD_DPM_FORCED_LEVEL_PROFILE_PEAK)) &&
372 (level == AMD_DPM_FORCED_LEVEL_PROFILE_EXIT)) {
373 pr_err("Currently not in any profile mode!\n");
374 pm_runtime_mark_last_busy(ddev->dev);
375 pm_runtime_put_autosuspend(ddev->dev);
379 if (is_support_sw_smu(adev)) {
380 ret = smu_force_performance_level(&adev->smu, level);
382 pm_runtime_mark_last_busy(ddev->dev);
383 pm_runtime_put_autosuspend(ddev->dev);
386 } else if (adev->powerplay.pp_funcs->force_performance_level) {
387 mutex_lock(&adev->pm.mutex);
388 if (adev->pm.dpm.thermal_active) {
389 mutex_unlock(&adev->pm.mutex);
390 pm_runtime_mark_last_busy(ddev->dev);
391 pm_runtime_put_autosuspend(ddev->dev);
394 ret = amdgpu_dpm_force_performance_level(adev, level);
396 mutex_unlock(&adev->pm.mutex);
397 pm_runtime_mark_last_busy(ddev->dev);
398 pm_runtime_put_autosuspend(ddev->dev);
401 adev->pm.dpm.forced_level = level;
403 mutex_unlock(&adev->pm.mutex);
405 pm_runtime_mark_last_busy(ddev->dev);
406 pm_runtime_put_autosuspend(ddev->dev);
411 static ssize_t amdgpu_get_pp_num_states(struct device *dev,
412 struct device_attribute *attr,
415 struct drm_device *ddev = dev_get_drvdata(dev);
416 struct amdgpu_device *adev = drm_to_adev(ddev);
417 struct pp_states_info data;
420 if (amdgpu_in_reset(adev))
423 ret = pm_runtime_get_sync(ddev->dev);
425 pm_runtime_put_autosuspend(ddev->dev);
429 if (is_support_sw_smu(adev)) {
430 ret = smu_get_power_num_states(&adev->smu, &data);
433 } else if (adev->powerplay.pp_funcs->get_pp_num_states) {
434 amdgpu_dpm_get_pp_num_states(adev, &data);
436 memset(&data, 0, sizeof(data));
439 pm_runtime_mark_last_busy(ddev->dev);
440 pm_runtime_put_autosuspend(ddev->dev);
442 buf_len = snprintf(buf, PAGE_SIZE, "states: %d\n", data.nums);
443 for (i = 0; i < data.nums; i++)
444 buf_len += snprintf(buf + buf_len, PAGE_SIZE, "%d %s\n", i,
445 (data.states[i] == POWER_STATE_TYPE_INTERNAL_BOOT) ? "boot" :
446 (data.states[i] == POWER_STATE_TYPE_BATTERY) ? "battery" :
447 (data.states[i] == POWER_STATE_TYPE_BALANCED) ? "balanced" :
448 (data.states[i] == POWER_STATE_TYPE_PERFORMANCE) ? "performance" : "default");
453 static ssize_t amdgpu_get_pp_cur_state(struct device *dev,
454 struct device_attribute *attr,
457 struct drm_device *ddev = dev_get_drvdata(dev);
458 struct amdgpu_device *adev = drm_to_adev(ddev);
459 struct pp_states_info data;
460 struct smu_context *smu = &adev->smu;
461 enum amd_pm_state_type pm = 0;
464 if (amdgpu_in_reset(adev))
467 ret = pm_runtime_get_sync(ddev->dev);
469 pm_runtime_put_autosuspend(ddev->dev);
473 if (is_support_sw_smu(adev)) {
474 pm = smu_get_current_power_state(smu);
475 ret = smu_get_power_num_states(smu, &data);
478 } else if (adev->powerplay.pp_funcs->get_current_power_state
479 && adev->powerplay.pp_funcs->get_pp_num_states) {
480 pm = amdgpu_dpm_get_current_power_state(adev);
481 amdgpu_dpm_get_pp_num_states(adev, &data);
484 pm_runtime_mark_last_busy(ddev->dev);
485 pm_runtime_put_autosuspend(ddev->dev);
487 for (i = 0; i < data.nums; i++) {
488 if (pm == data.states[i])
495 return snprintf(buf, PAGE_SIZE, "%d\n", i);
498 static ssize_t amdgpu_get_pp_force_state(struct device *dev,
499 struct device_attribute *attr,
502 struct drm_device *ddev = dev_get_drvdata(dev);
503 struct amdgpu_device *adev = drm_to_adev(ddev);
505 if (amdgpu_in_reset(adev))
508 if (adev->pp_force_state_enabled)
509 return amdgpu_get_pp_cur_state(dev, attr, buf);
511 return snprintf(buf, PAGE_SIZE, "\n");
514 static ssize_t amdgpu_set_pp_force_state(struct device *dev,
515 struct device_attribute *attr,
519 struct drm_device *ddev = dev_get_drvdata(dev);
520 struct amdgpu_device *adev = drm_to_adev(ddev);
521 enum amd_pm_state_type state = 0;
525 if (amdgpu_in_reset(adev))
528 if (strlen(buf) == 1)
529 adev->pp_force_state_enabled = false;
530 else if (is_support_sw_smu(adev))
531 adev->pp_force_state_enabled = false;
532 else if (adev->powerplay.pp_funcs->dispatch_tasks &&
533 adev->powerplay.pp_funcs->get_pp_num_states) {
534 struct pp_states_info data;
536 ret = kstrtoul(buf, 0, &idx);
537 if (ret || idx >= ARRAY_SIZE(data.states))
540 idx = array_index_nospec(idx, ARRAY_SIZE(data.states));
542 amdgpu_dpm_get_pp_num_states(adev, &data);
543 state = data.states[idx];
545 ret = pm_runtime_get_sync(ddev->dev);
547 pm_runtime_put_autosuspend(ddev->dev);
551 /* only set user selected power states */
552 if (state != POWER_STATE_TYPE_INTERNAL_BOOT &&
553 state != POWER_STATE_TYPE_DEFAULT) {
554 amdgpu_dpm_dispatch_task(adev,
555 AMD_PP_TASK_ENABLE_USER_STATE, &state);
556 adev->pp_force_state_enabled = true;
558 pm_runtime_mark_last_busy(ddev->dev);
559 pm_runtime_put_autosuspend(ddev->dev);
568 * The amdgpu driver provides a sysfs API for uploading new powerplay
569 * tables. The file pp_table is used for this. Reading the file
570 * will dump the current power play table. Writing to the file
571 * will attempt to upload a new powerplay table and re-initialize
572 * powerplay using that new table.
576 static ssize_t amdgpu_get_pp_table(struct device *dev,
577 struct device_attribute *attr,
580 struct drm_device *ddev = dev_get_drvdata(dev);
581 struct amdgpu_device *adev = drm_to_adev(ddev);
585 if (amdgpu_in_reset(adev))
588 ret = pm_runtime_get_sync(ddev->dev);
590 pm_runtime_put_autosuspend(ddev->dev);
594 if (is_support_sw_smu(adev)) {
595 size = smu_sys_get_pp_table(&adev->smu, (void **)&table);
596 pm_runtime_mark_last_busy(ddev->dev);
597 pm_runtime_put_autosuspend(ddev->dev);
600 } else if (adev->powerplay.pp_funcs->get_pp_table) {
601 size = amdgpu_dpm_get_pp_table(adev, &table);
602 pm_runtime_mark_last_busy(ddev->dev);
603 pm_runtime_put_autosuspend(ddev->dev);
607 pm_runtime_mark_last_busy(ddev->dev);
608 pm_runtime_put_autosuspend(ddev->dev);
612 if (size >= PAGE_SIZE)
613 size = PAGE_SIZE - 1;
615 memcpy(buf, table, size);
620 static ssize_t amdgpu_set_pp_table(struct device *dev,
621 struct device_attribute *attr,
625 struct drm_device *ddev = dev_get_drvdata(dev);
626 struct amdgpu_device *adev = drm_to_adev(ddev);
629 if (amdgpu_in_reset(adev))
632 ret = pm_runtime_get_sync(ddev->dev);
634 pm_runtime_put_autosuspend(ddev->dev);
638 if (is_support_sw_smu(adev)) {
639 ret = smu_sys_set_pp_table(&adev->smu, (void *)buf, count);
641 pm_runtime_mark_last_busy(ddev->dev);
642 pm_runtime_put_autosuspend(ddev->dev);
645 } else if (adev->powerplay.pp_funcs->set_pp_table)
646 amdgpu_dpm_set_pp_table(adev, buf, count);
648 pm_runtime_mark_last_busy(ddev->dev);
649 pm_runtime_put_autosuspend(ddev->dev);
655 * DOC: pp_od_clk_voltage
657 * The amdgpu driver provides a sysfs API for adjusting the clocks and voltages
658 * in each power level within a power state. The pp_od_clk_voltage is used for
661 * Note that the actual memory controller clock rate are exposed, not
662 * the effective memory clock of the DRAMs. To translate it, use the
665 * Clock conversion (Mhz):
667 * HBM: effective_memory_clock = memory_controller_clock * 1
669 * G5: effective_memory_clock = memory_controller_clock * 1
671 * G6: effective_memory_clock = memory_controller_clock * 2
673 * DRAM data rate (MT/s):
675 * HBM: effective_memory_clock * 2 = data_rate
677 * G5: effective_memory_clock * 4 = data_rate
679 * G6: effective_memory_clock * 8 = data_rate
683 * data_rate * vram_bit_width / 8 = memory_bandwidth
689 * memory_controller_clock = 1750 Mhz
691 * effective_memory_clock = 1750 Mhz * 1 = 1750 Mhz
693 * data rate = 1750 * 4 = 7000 MT/s
695 * memory_bandwidth = 7000 * 128 bits / 8 = 112000 MB/s
699 * memory_controller_clock = 875 Mhz
701 * effective_memory_clock = 875 Mhz * 2 = 1750 Mhz
703 * data rate = 1750 * 8 = 14000 MT/s
705 * memory_bandwidth = 14000 * 256 bits / 8 = 448000 MB/s
707 * < For Vega10 and previous ASICs >
709 * Reading the file will display:
711 * - a list of engine clock levels and voltages labeled OD_SCLK
713 * - a list of memory clock levels and voltages labeled OD_MCLK
715 * - a list of valid ranges for sclk, mclk, and voltage labeled OD_RANGE
717 * To manually adjust these settings, first select manual using
718 * power_dpm_force_performance_level. Enter a new value for each
719 * level by writing a string that contains "s/m level clock voltage" to
720 * the file. E.g., "s 1 500 820" will update sclk level 1 to be 500 MHz
721 * at 820 mV; "m 0 350 810" will update mclk level 0 to be 350 MHz at
722 * 810 mV. When you have edited all of the states as needed, write
723 * "c" (commit) to the file to commit your changes. If you want to reset to the
724 * default power levels, write "r" (reset) to the file to reset them.
727 * < For Vega20 and newer ASICs >
729 * Reading the file will display:
731 * - minimum and maximum engine clock labeled OD_SCLK
733 * - maximum memory clock labeled OD_MCLK
735 * - three <frequency, voltage> points labeled OD_VDDC_CURVE.
736 * They can be used to calibrate the sclk voltage curve.
738 * - a list of valid ranges for sclk, mclk, and voltage curve points
741 * To manually adjust these settings:
743 * - First select manual using power_dpm_force_performance_level
745 * - For clock frequency setting, enter a new value by writing a
746 * string that contains "s/m index clock" to the file. The index
747 * should be 0 if to set minimum clock. And 1 if to set maximum
748 * clock. E.g., "s 0 500" will update minimum sclk to be 500 MHz.
749 * "m 1 800" will update maximum mclk to be 800Mhz.
751 * For sclk voltage curve, enter the new values by writing a
752 * string that contains "vc point clock voltage" to the file. The
753 * points are indexed by 0, 1 and 2. E.g., "vc 0 300 600" will
754 * update point1 with clock set as 300Mhz and voltage as
755 * 600mV. "vc 2 1000 1000" will update point3 with clock set
756 * as 1000Mhz and voltage 1000mV.
758 * - When you have edited all of the states as needed, write "c" (commit)
759 * to the file to commit your changes
761 * - If you want to reset to the default power levels, write "r" (reset)
762 * to the file to reset them
766 static ssize_t amdgpu_set_pp_od_clk_voltage(struct device *dev,
767 struct device_attribute *attr,
771 struct drm_device *ddev = dev_get_drvdata(dev);
772 struct amdgpu_device *adev = drm_to_adev(ddev);
774 uint32_t parameter_size = 0;
779 const char delimiter[3] = {' ', '\n', '\0'};
782 if (amdgpu_in_reset(adev))
789 type = PP_OD_EDIT_SCLK_VDDC_TABLE;
790 else if (*buf == 'm')
791 type = PP_OD_EDIT_MCLK_VDDC_TABLE;
793 type = PP_OD_RESTORE_DEFAULT_TABLE;
794 else if (*buf == 'c')
795 type = PP_OD_COMMIT_DPM_TABLE;
796 else if (!strncmp(buf, "vc", 2))
797 type = PP_OD_EDIT_VDDC_CURVE;
801 memcpy(buf_cpy, buf, count+1);
805 if (type == PP_OD_EDIT_VDDC_CURVE)
807 while (isspace(*++tmp_str));
810 sub_str = strsep(&tmp_str, delimiter);
811 ret = kstrtol(sub_str, 0, ¶meter[parameter_size]);
816 while (isspace(*tmp_str))
820 ret = pm_runtime_get_sync(ddev->dev);
822 pm_runtime_put_autosuspend(ddev->dev);
826 if (is_support_sw_smu(adev)) {
827 ret = smu_od_edit_dpm_table(&adev->smu, type,
828 parameter, parameter_size);
831 pm_runtime_mark_last_busy(ddev->dev);
832 pm_runtime_put_autosuspend(ddev->dev);
837 if (adev->powerplay.pp_funcs->set_fine_grain_clk_vol) {
838 ret = amdgpu_dpm_set_fine_grain_clk_vol(adev, type,
842 pm_runtime_mark_last_busy(ddev->dev);
843 pm_runtime_put_autosuspend(ddev->dev);
848 if (adev->powerplay.pp_funcs->odn_edit_dpm_table) {
849 ret = amdgpu_dpm_odn_edit_dpm_table(adev, type,
850 parameter, parameter_size);
852 pm_runtime_mark_last_busy(ddev->dev);
853 pm_runtime_put_autosuspend(ddev->dev);
858 if (type == PP_OD_COMMIT_DPM_TABLE) {
859 if (adev->powerplay.pp_funcs->dispatch_tasks) {
860 amdgpu_dpm_dispatch_task(adev,
861 AMD_PP_TASK_READJUST_POWER_STATE,
863 pm_runtime_mark_last_busy(ddev->dev);
864 pm_runtime_put_autosuspend(ddev->dev);
867 pm_runtime_mark_last_busy(ddev->dev);
868 pm_runtime_put_autosuspend(ddev->dev);
873 pm_runtime_mark_last_busy(ddev->dev);
874 pm_runtime_put_autosuspend(ddev->dev);
879 static ssize_t amdgpu_get_pp_od_clk_voltage(struct device *dev,
880 struct device_attribute *attr,
883 struct drm_device *ddev = dev_get_drvdata(dev);
884 struct amdgpu_device *adev = drm_to_adev(ddev);
888 if (amdgpu_in_reset(adev))
891 ret = pm_runtime_get_sync(ddev->dev);
893 pm_runtime_put_autosuspend(ddev->dev);
897 if (is_support_sw_smu(adev)) {
898 size = smu_print_clk_levels(&adev->smu, SMU_OD_SCLK, buf);
899 size += smu_print_clk_levels(&adev->smu, SMU_OD_MCLK, buf+size);
900 size += smu_print_clk_levels(&adev->smu, SMU_OD_VDDC_CURVE, buf+size);
901 size += smu_print_clk_levels(&adev->smu, SMU_OD_RANGE, buf+size);
902 } else if (adev->powerplay.pp_funcs->print_clock_levels) {
903 size = amdgpu_dpm_print_clock_levels(adev, OD_SCLK, buf);
904 size += amdgpu_dpm_print_clock_levels(adev, OD_MCLK, buf+size);
905 size += amdgpu_dpm_print_clock_levels(adev, OD_VDDC_CURVE, buf+size);
906 size += amdgpu_dpm_print_clock_levels(adev, OD_RANGE, buf+size);
908 size = snprintf(buf, PAGE_SIZE, "\n");
910 pm_runtime_mark_last_busy(ddev->dev);
911 pm_runtime_put_autosuspend(ddev->dev);
919 * The amdgpu driver provides a sysfs API for adjusting what powerplay
920 * features to be enabled. The file pp_features is used for this. And
921 * this is only available for Vega10 and later dGPUs.
923 * Reading back the file will show you the followings:
924 * - Current ppfeature masks
925 * - List of the all supported powerplay features with their naming,
926 * bitmasks and enablement status('Y'/'N' means "enabled"/"disabled").
928 * To manually enable or disable a specific feature, just set or clear
929 * the corresponding bit from original ppfeature masks and input the
930 * new ppfeature masks.
932 static ssize_t amdgpu_set_pp_features(struct device *dev,
933 struct device_attribute *attr,
937 struct drm_device *ddev = dev_get_drvdata(dev);
938 struct amdgpu_device *adev = drm_to_adev(ddev);
939 uint64_t featuremask;
942 if (amdgpu_in_reset(adev))
945 ret = kstrtou64(buf, 0, &featuremask);
949 ret = pm_runtime_get_sync(ddev->dev);
951 pm_runtime_put_autosuspend(ddev->dev);
955 if (is_support_sw_smu(adev)) {
956 ret = smu_sys_set_pp_feature_mask(&adev->smu, featuremask);
958 pm_runtime_mark_last_busy(ddev->dev);
959 pm_runtime_put_autosuspend(ddev->dev);
962 } else if (adev->powerplay.pp_funcs->set_ppfeature_status) {
963 ret = amdgpu_dpm_set_ppfeature_status(adev, featuremask);
965 pm_runtime_mark_last_busy(ddev->dev);
966 pm_runtime_put_autosuspend(ddev->dev);
970 pm_runtime_mark_last_busy(ddev->dev);
971 pm_runtime_put_autosuspend(ddev->dev);
976 static ssize_t amdgpu_get_pp_features(struct device *dev,
977 struct device_attribute *attr,
980 struct drm_device *ddev = dev_get_drvdata(dev);
981 struct amdgpu_device *adev = drm_to_adev(ddev);
985 if (amdgpu_in_reset(adev))
988 ret = pm_runtime_get_sync(ddev->dev);
990 pm_runtime_put_autosuspend(ddev->dev);
994 if (is_support_sw_smu(adev))
995 size = smu_sys_get_pp_feature_mask(&adev->smu, buf);
996 else if (adev->powerplay.pp_funcs->get_ppfeature_status)
997 size = amdgpu_dpm_get_ppfeature_status(adev, buf);
999 size = snprintf(buf, PAGE_SIZE, "\n");
1001 pm_runtime_mark_last_busy(ddev->dev);
1002 pm_runtime_put_autosuspend(ddev->dev);
1008 * DOC: pp_dpm_sclk pp_dpm_mclk pp_dpm_socclk pp_dpm_fclk pp_dpm_dcefclk pp_dpm_pcie
1010 * The amdgpu driver provides a sysfs API for adjusting what power levels
1011 * are enabled for a given power state. The files pp_dpm_sclk, pp_dpm_mclk,
1012 * pp_dpm_socclk, pp_dpm_fclk, pp_dpm_dcefclk and pp_dpm_pcie are used for
1015 * pp_dpm_socclk and pp_dpm_dcefclk interfaces are only available for
1016 * Vega10 and later ASICs.
1017 * pp_dpm_fclk interface is only available for Vega20 and later ASICs.
1019 * Reading back the files will show you the available power levels within
1020 * the power state and the clock information for those levels.
1022 * To manually adjust these states, first select manual using
1023 * power_dpm_force_performance_level.
1024 * Secondly, enter a new value for each level by inputing a string that
1025 * contains " echo xx xx xx > pp_dpm_sclk/mclk/pcie"
1028 * .. code-block:: bash
1030 * echo "4 5 6" > pp_dpm_sclk
1032 * will enable sclk levels 4, 5, and 6.
1034 * NOTE: change to the dcefclk max dpm level is not supported now
1037 static ssize_t amdgpu_get_pp_dpm_sclk(struct device *dev,
1038 struct device_attribute *attr,
1041 struct drm_device *ddev = dev_get_drvdata(dev);
1042 struct amdgpu_device *adev = drm_to_adev(ddev);
1046 if (amdgpu_in_reset(adev))
1049 ret = pm_runtime_get_sync(ddev->dev);
1051 pm_runtime_put_autosuspend(ddev->dev);
1055 if (is_support_sw_smu(adev))
1056 size = smu_print_clk_levels(&adev->smu, SMU_SCLK, buf);
1057 else if (adev->powerplay.pp_funcs->print_clock_levels)
1058 size = amdgpu_dpm_print_clock_levels(adev, PP_SCLK, buf);
1060 size = snprintf(buf, PAGE_SIZE, "\n");
1062 pm_runtime_mark_last_busy(ddev->dev);
1063 pm_runtime_put_autosuspend(ddev->dev);
1069 * Worst case: 32 bits individually specified, in octal at 12 characters
1070 * per line (+1 for \n).
1072 #define AMDGPU_MASK_BUF_MAX (32 * 13)
1074 static ssize_t amdgpu_read_mask(const char *buf, size_t count, uint32_t *mask)
1078 char *sub_str = NULL;
1080 char buf_cpy[AMDGPU_MASK_BUF_MAX + 1];
1081 const char delimiter[3] = {' ', '\n', '\0'};
1086 bytes = min(count, sizeof(buf_cpy) - 1);
1087 memcpy(buf_cpy, buf, bytes);
1088 buf_cpy[bytes] = '\0';
1091 sub_str = strsep(&tmp, delimiter);
1092 if (strlen(sub_str)) {
1093 ret = kstrtol(sub_str, 0, &level);
1096 *mask |= 1 << level;
1104 static ssize_t amdgpu_set_pp_dpm_sclk(struct device *dev,
1105 struct device_attribute *attr,
1109 struct drm_device *ddev = dev_get_drvdata(dev);
1110 struct amdgpu_device *adev = drm_to_adev(ddev);
1114 if (amdgpu_in_reset(adev))
1117 ret = amdgpu_read_mask(buf, count, &mask);
1121 ret = pm_runtime_get_sync(ddev->dev);
1123 pm_runtime_put_autosuspend(ddev->dev);
1127 if (is_support_sw_smu(adev))
1128 ret = smu_force_clk_levels(&adev->smu, SMU_SCLK, mask);
1129 else if (adev->powerplay.pp_funcs->force_clock_level)
1130 ret = amdgpu_dpm_force_clock_level(adev, PP_SCLK, mask);
1132 pm_runtime_mark_last_busy(ddev->dev);
1133 pm_runtime_put_autosuspend(ddev->dev);
1141 static ssize_t amdgpu_get_pp_dpm_mclk(struct device *dev,
1142 struct device_attribute *attr,
1145 struct drm_device *ddev = dev_get_drvdata(dev);
1146 struct amdgpu_device *adev = drm_to_adev(ddev);
1150 if (amdgpu_in_reset(adev))
1153 ret = pm_runtime_get_sync(ddev->dev);
1155 pm_runtime_put_autosuspend(ddev->dev);
1159 if (is_support_sw_smu(adev))
1160 size = smu_print_clk_levels(&adev->smu, SMU_MCLK, buf);
1161 else if (adev->powerplay.pp_funcs->print_clock_levels)
1162 size = amdgpu_dpm_print_clock_levels(adev, PP_MCLK, buf);
1164 size = snprintf(buf, PAGE_SIZE, "\n");
1166 pm_runtime_mark_last_busy(ddev->dev);
1167 pm_runtime_put_autosuspend(ddev->dev);
1172 static ssize_t amdgpu_set_pp_dpm_mclk(struct device *dev,
1173 struct device_attribute *attr,
1177 struct drm_device *ddev = dev_get_drvdata(dev);
1178 struct amdgpu_device *adev = drm_to_adev(ddev);
1182 if (amdgpu_in_reset(adev))
1185 ret = amdgpu_read_mask(buf, count, &mask);
1189 ret = pm_runtime_get_sync(ddev->dev);
1191 pm_runtime_put_autosuspend(ddev->dev);
1195 if (is_support_sw_smu(adev))
1196 ret = smu_force_clk_levels(&adev->smu, SMU_MCLK, mask);
1197 else if (adev->powerplay.pp_funcs->force_clock_level)
1198 ret = amdgpu_dpm_force_clock_level(adev, PP_MCLK, mask);
1200 pm_runtime_mark_last_busy(ddev->dev);
1201 pm_runtime_put_autosuspend(ddev->dev);
1209 static ssize_t amdgpu_get_pp_dpm_socclk(struct device *dev,
1210 struct device_attribute *attr,
1213 struct drm_device *ddev = dev_get_drvdata(dev);
1214 struct amdgpu_device *adev = drm_to_adev(ddev);
1218 if (amdgpu_in_reset(adev))
1221 ret = pm_runtime_get_sync(ddev->dev);
1223 pm_runtime_put_autosuspend(ddev->dev);
1227 if (is_support_sw_smu(adev))
1228 size = smu_print_clk_levels(&adev->smu, SMU_SOCCLK, buf);
1229 else if (adev->powerplay.pp_funcs->print_clock_levels)
1230 size = amdgpu_dpm_print_clock_levels(adev, PP_SOCCLK, buf);
1232 size = snprintf(buf, PAGE_SIZE, "\n");
1234 pm_runtime_mark_last_busy(ddev->dev);
1235 pm_runtime_put_autosuspend(ddev->dev);
1240 static ssize_t amdgpu_set_pp_dpm_socclk(struct device *dev,
1241 struct device_attribute *attr,
1245 struct drm_device *ddev = dev_get_drvdata(dev);
1246 struct amdgpu_device *adev = drm_to_adev(ddev);
1250 if (amdgpu_in_reset(adev))
1253 ret = amdgpu_read_mask(buf, count, &mask);
1257 ret = pm_runtime_get_sync(ddev->dev);
1259 pm_runtime_put_autosuspend(ddev->dev);
1263 if (is_support_sw_smu(adev))
1264 ret = smu_force_clk_levels(&adev->smu, SMU_SOCCLK, mask);
1265 else if (adev->powerplay.pp_funcs->force_clock_level)
1266 ret = amdgpu_dpm_force_clock_level(adev, PP_SOCCLK, mask);
1270 pm_runtime_mark_last_busy(ddev->dev);
1271 pm_runtime_put_autosuspend(ddev->dev);
1279 static ssize_t amdgpu_get_pp_dpm_fclk(struct device *dev,
1280 struct device_attribute *attr,
1283 struct drm_device *ddev = dev_get_drvdata(dev);
1284 struct amdgpu_device *adev = drm_to_adev(ddev);
1288 if (amdgpu_in_reset(adev))
1291 ret = pm_runtime_get_sync(ddev->dev);
1293 pm_runtime_put_autosuspend(ddev->dev);
1297 if (is_support_sw_smu(adev))
1298 size = smu_print_clk_levels(&adev->smu, SMU_FCLK, buf);
1299 else if (adev->powerplay.pp_funcs->print_clock_levels)
1300 size = amdgpu_dpm_print_clock_levels(adev, PP_FCLK, buf);
1302 size = snprintf(buf, PAGE_SIZE, "\n");
1304 pm_runtime_mark_last_busy(ddev->dev);
1305 pm_runtime_put_autosuspend(ddev->dev);
1310 static ssize_t amdgpu_set_pp_dpm_fclk(struct device *dev,
1311 struct device_attribute *attr,
1315 struct drm_device *ddev = dev_get_drvdata(dev);
1316 struct amdgpu_device *adev = drm_to_adev(ddev);
1320 if (amdgpu_in_reset(adev))
1323 ret = amdgpu_read_mask(buf, count, &mask);
1327 ret = pm_runtime_get_sync(ddev->dev);
1329 pm_runtime_put_autosuspend(ddev->dev);
1333 if (is_support_sw_smu(adev))
1334 ret = smu_force_clk_levels(&adev->smu, SMU_FCLK, mask);
1335 else if (adev->powerplay.pp_funcs->force_clock_level)
1336 ret = amdgpu_dpm_force_clock_level(adev, PP_FCLK, mask);
1340 pm_runtime_mark_last_busy(ddev->dev);
1341 pm_runtime_put_autosuspend(ddev->dev);
1349 static ssize_t amdgpu_get_pp_dpm_dcefclk(struct device *dev,
1350 struct device_attribute *attr,
1353 struct drm_device *ddev = dev_get_drvdata(dev);
1354 struct amdgpu_device *adev = drm_to_adev(ddev);
1358 if (amdgpu_in_reset(adev))
1361 ret = pm_runtime_get_sync(ddev->dev);
1363 pm_runtime_put_autosuspend(ddev->dev);
1367 if (is_support_sw_smu(adev))
1368 size = smu_print_clk_levels(&adev->smu, SMU_DCEFCLK, buf);
1369 else if (adev->powerplay.pp_funcs->print_clock_levels)
1370 size = amdgpu_dpm_print_clock_levels(adev, PP_DCEFCLK, buf);
1372 size = snprintf(buf, PAGE_SIZE, "\n");
1374 pm_runtime_mark_last_busy(ddev->dev);
1375 pm_runtime_put_autosuspend(ddev->dev);
1380 static ssize_t amdgpu_set_pp_dpm_dcefclk(struct device *dev,
1381 struct device_attribute *attr,
1385 struct drm_device *ddev = dev_get_drvdata(dev);
1386 struct amdgpu_device *adev = drm_to_adev(ddev);
1390 if (amdgpu_in_reset(adev))
1393 ret = amdgpu_read_mask(buf, count, &mask);
1397 ret = pm_runtime_get_sync(ddev->dev);
1399 pm_runtime_put_autosuspend(ddev->dev);
1403 if (is_support_sw_smu(adev))
1404 ret = smu_force_clk_levels(&adev->smu, SMU_DCEFCLK, mask);
1405 else if (adev->powerplay.pp_funcs->force_clock_level)
1406 ret = amdgpu_dpm_force_clock_level(adev, PP_DCEFCLK, mask);
1410 pm_runtime_mark_last_busy(ddev->dev);
1411 pm_runtime_put_autosuspend(ddev->dev);
1419 static ssize_t amdgpu_get_pp_dpm_pcie(struct device *dev,
1420 struct device_attribute *attr,
1423 struct drm_device *ddev = dev_get_drvdata(dev);
1424 struct amdgpu_device *adev = drm_to_adev(ddev);
1428 if (amdgpu_in_reset(adev))
1431 ret = pm_runtime_get_sync(ddev->dev);
1433 pm_runtime_put_autosuspend(ddev->dev);
1437 if (is_support_sw_smu(adev))
1438 size = smu_print_clk_levels(&adev->smu, SMU_PCIE, buf);
1439 else if (adev->powerplay.pp_funcs->print_clock_levels)
1440 size = amdgpu_dpm_print_clock_levels(adev, PP_PCIE, buf);
1442 size = snprintf(buf, PAGE_SIZE, "\n");
1444 pm_runtime_mark_last_busy(ddev->dev);
1445 pm_runtime_put_autosuspend(ddev->dev);
1450 static ssize_t amdgpu_set_pp_dpm_pcie(struct device *dev,
1451 struct device_attribute *attr,
1455 struct drm_device *ddev = dev_get_drvdata(dev);
1456 struct amdgpu_device *adev = drm_to_adev(ddev);
1460 if (amdgpu_in_reset(adev))
1463 ret = amdgpu_read_mask(buf, count, &mask);
1467 ret = pm_runtime_get_sync(ddev->dev);
1469 pm_runtime_put_autosuspend(ddev->dev);
1473 if (is_support_sw_smu(adev))
1474 ret = smu_force_clk_levels(&adev->smu, SMU_PCIE, mask);
1475 else if (adev->powerplay.pp_funcs->force_clock_level)
1476 ret = amdgpu_dpm_force_clock_level(adev, PP_PCIE, mask);
1480 pm_runtime_mark_last_busy(ddev->dev);
1481 pm_runtime_put_autosuspend(ddev->dev);
1489 static ssize_t amdgpu_get_pp_sclk_od(struct device *dev,
1490 struct device_attribute *attr,
1493 struct drm_device *ddev = dev_get_drvdata(dev);
1494 struct amdgpu_device *adev = drm_to_adev(ddev);
1498 if (amdgpu_in_reset(adev))
1501 ret = pm_runtime_get_sync(ddev->dev);
1503 pm_runtime_put_autosuspend(ddev->dev);
1507 if (is_support_sw_smu(adev))
1509 else if (adev->powerplay.pp_funcs->get_sclk_od)
1510 value = amdgpu_dpm_get_sclk_od(adev);
1512 pm_runtime_mark_last_busy(ddev->dev);
1513 pm_runtime_put_autosuspend(ddev->dev);
1515 return snprintf(buf, PAGE_SIZE, "%d\n", value);
1518 static ssize_t amdgpu_set_pp_sclk_od(struct device *dev,
1519 struct device_attribute *attr,
1523 struct drm_device *ddev = dev_get_drvdata(dev);
1524 struct amdgpu_device *adev = drm_to_adev(ddev);
1528 if (amdgpu_in_reset(adev))
1531 ret = kstrtol(buf, 0, &value);
1536 ret = pm_runtime_get_sync(ddev->dev);
1538 pm_runtime_put_autosuspend(ddev->dev);
1542 if (is_support_sw_smu(adev)) {
1545 if (adev->powerplay.pp_funcs->set_sclk_od)
1546 amdgpu_dpm_set_sclk_od(adev, (uint32_t)value);
1548 if (adev->powerplay.pp_funcs->dispatch_tasks) {
1549 amdgpu_dpm_dispatch_task(adev, AMD_PP_TASK_READJUST_POWER_STATE, NULL);
1551 adev->pm.dpm.current_ps = adev->pm.dpm.boot_ps;
1552 amdgpu_pm_compute_clocks(adev);
1556 pm_runtime_mark_last_busy(ddev->dev);
1557 pm_runtime_put_autosuspend(ddev->dev);
1562 static ssize_t amdgpu_get_pp_mclk_od(struct device *dev,
1563 struct device_attribute *attr,
1566 struct drm_device *ddev = dev_get_drvdata(dev);
1567 struct amdgpu_device *adev = drm_to_adev(ddev);
1571 if (amdgpu_in_reset(adev))
1574 ret = pm_runtime_get_sync(ddev->dev);
1576 pm_runtime_put_autosuspend(ddev->dev);
1580 if (is_support_sw_smu(adev))
1582 else if (adev->powerplay.pp_funcs->get_mclk_od)
1583 value = amdgpu_dpm_get_mclk_od(adev);
1585 pm_runtime_mark_last_busy(ddev->dev);
1586 pm_runtime_put_autosuspend(ddev->dev);
1588 return snprintf(buf, PAGE_SIZE, "%d\n", value);
1591 static ssize_t amdgpu_set_pp_mclk_od(struct device *dev,
1592 struct device_attribute *attr,
1596 struct drm_device *ddev = dev_get_drvdata(dev);
1597 struct amdgpu_device *adev = drm_to_adev(ddev);
1601 if (amdgpu_in_reset(adev))
1604 ret = kstrtol(buf, 0, &value);
1609 ret = pm_runtime_get_sync(ddev->dev);
1611 pm_runtime_put_autosuspend(ddev->dev);
1615 if (is_support_sw_smu(adev)) {
1618 if (adev->powerplay.pp_funcs->set_mclk_od)
1619 amdgpu_dpm_set_mclk_od(adev, (uint32_t)value);
1621 if (adev->powerplay.pp_funcs->dispatch_tasks) {
1622 amdgpu_dpm_dispatch_task(adev, AMD_PP_TASK_READJUST_POWER_STATE, NULL);
1624 adev->pm.dpm.current_ps = adev->pm.dpm.boot_ps;
1625 amdgpu_pm_compute_clocks(adev);
1629 pm_runtime_mark_last_busy(ddev->dev);
1630 pm_runtime_put_autosuspend(ddev->dev);
1636 * DOC: pp_power_profile_mode
1638 * The amdgpu driver provides a sysfs API for adjusting the heuristics
1639 * related to switching between power levels in a power state. The file
1640 * pp_power_profile_mode is used for this.
1642 * Reading this file outputs a list of all of the predefined power profiles
1643 * and the relevant heuristics settings for that profile.
1645 * To select a profile or create a custom profile, first select manual using
1646 * power_dpm_force_performance_level. Writing the number of a predefined
1647 * profile to pp_power_profile_mode will enable those heuristics. To
1648 * create a custom set of heuristics, write a string of numbers to the file
1649 * starting with the number of the custom profile along with a setting
1650 * for each heuristic parameter. Due to differences across asic families
1651 * the heuristic parameters vary from family to family.
1655 static ssize_t amdgpu_get_pp_power_profile_mode(struct device *dev,
1656 struct device_attribute *attr,
1659 struct drm_device *ddev = dev_get_drvdata(dev);
1660 struct amdgpu_device *adev = drm_to_adev(ddev);
1664 if (amdgpu_in_reset(adev))
1667 ret = pm_runtime_get_sync(ddev->dev);
1669 pm_runtime_put_autosuspend(ddev->dev);
1673 if (is_support_sw_smu(adev))
1674 size = smu_get_power_profile_mode(&adev->smu, buf);
1675 else if (adev->powerplay.pp_funcs->get_power_profile_mode)
1676 size = amdgpu_dpm_get_power_profile_mode(adev, buf);
1678 size = snprintf(buf, PAGE_SIZE, "\n");
1680 pm_runtime_mark_last_busy(ddev->dev);
1681 pm_runtime_put_autosuspend(ddev->dev);
1687 static ssize_t amdgpu_set_pp_power_profile_mode(struct device *dev,
1688 struct device_attribute *attr,
1693 struct drm_device *ddev = dev_get_drvdata(dev);
1694 struct amdgpu_device *adev = drm_to_adev(ddev);
1695 uint32_t parameter_size = 0;
1697 char *sub_str, buf_cpy[128];
1701 long int profile_mode = 0;
1702 const char delimiter[3] = {' ', '\n', '\0'};
1704 if (amdgpu_in_reset(adev))
1709 ret = kstrtol(tmp, 0, &profile_mode);
1713 if (profile_mode == PP_SMC_POWER_PROFILE_CUSTOM) {
1714 if (count < 2 || count > 127)
1716 while (isspace(*++buf))
1718 memcpy(buf_cpy, buf, count-i);
1720 while (tmp_str[0]) {
1721 sub_str = strsep(&tmp_str, delimiter);
1722 ret = kstrtol(sub_str, 0, ¶meter[parameter_size]);
1726 while (isspace(*tmp_str))
1730 parameter[parameter_size] = profile_mode;
1732 ret = pm_runtime_get_sync(ddev->dev);
1734 pm_runtime_put_autosuspend(ddev->dev);
1738 if (is_support_sw_smu(adev))
1739 ret = smu_set_power_profile_mode(&adev->smu, parameter, parameter_size, true);
1740 else if (adev->powerplay.pp_funcs->set_power_profile_mode)
1741 ret = amdgpu_dpm_set_power_profile_mode(adev, parameter, parameter_size);
1743 pm_runtime_mark_last_busy(ddev->dev);
1744 pm_runtime_put_autosuspend(ddev->dev);
1753 * DOC: gpu_busy_percent
1755 * The amdgpu driver provides a sysfs API for reading how busy the GPU
1756 * is as a percentage. The file gpu_busy_percent is used for this.
1757 * The SMU firmware computes a percentage of load based on the
1758 * aggregate activity level in the IP cores.
1760 static ssize_t amdgpu_get_gpu_busy_percent(struct device *dev,
1761 struct device_attribute *attr,
1764 struct drm_device *ddev = dev_get_drvdata(dev);
1765 struct amdgpu_device *adev = drm_to_adev(ddev);
1766 int r, value, size = sizeof(value);
1768 if (amdgpu_in_reset(adev))
1771 r = pm_runtime_get_sync(ddev->dev);
1773 pm_runtime_put_autosuspend(ddev->dev);
1777 /* read the IP busy sensor */
1778 r = amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_GPU_LOAD,
1779 (void *)&value, &size);
1781 pm_runtime_mark_last_busy(ddev->dev);
1782 pm_runtime_put_autosuspend(ddev->dev);
1787 return snprintf(buf, PAGE_SIZE, "%d\n", value);
1791 * DOC: mem_busy_percent
1793 * The amdgpu driver provides a sysfs API for reading how busy the VRAM
1794 * is as a percentage. The file mem_busy_percent is used for this.
1795 * The SMU firmware computes a percentage of load based on the
1796 * aggregate activity level in the IP cores.
1798 static ssize_t amdgpu_get_mem_busy_percent(struct device *dev,
1799 struct device_attribute *attr,
1802 struct drm_device *ddev = dev_get_drvdata(dev);
1803 struct amdgpu_device *adev = drm_to_adev(ddev);
1804 int r, value, size = sizeof(value);
1806 if (amdgpu_in_reset(adev))
1809 r = pm_runtime_get_sync(ddev->dev);
1811 pm_runtime_put_autosuspend(ddev->dev);
1815 /* read the IP busy sensor */
1816 r = amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_MEM_LOAD,
1817 (void *)&value, &size);
1819 pm_runtime_mark_last_busy(ddev->dev);
1820 pm_runtime_put_autosuspend(ddev->dev);
1825 return snprintf(buf, PAGE_SIZE, "%d\n", value);
1831 * The amdgpu driver provides a sysfs API for estimating how much data
1832 * has been received and sent by the GPU in the last second through PCIe.
1833 * The file pcie_bw is used for this.
1834 * The Perf counters count the number of received and sent messages and return
1835 * those values, as well as the maximum payload size of a PCIe packet (mps).
1836 * Note that it is not possible to easily and quickly obtain the size of each
1837 * packet transmitted, so we output the max payload size (mps) to allow for
1838 * quick estimation of the PCIe bandwidth usage
1840 static ssize_t amdgpu_get_pcie_bw(struct device *dev,
1841 struct device_attribute *attr,
1844 struct drm_device *ddev = dev_get_drvdata(dev);
1845 struct amdgpu_device *adev = drm_to_adev(ddev);
1846 uint64_t count0 = 0, count1 = 0;
1849 if (amdgpu_in_reset(adev))
1852 if (adev->flags & AMD_IS_APU)
1855 if (!adev->asic_funcs->get_pcie_usage)
1858 ret = pm_runtime_get_sync(ddev->dev);
1860 pm_runtime_put_autosuspend(ddev->dev);
1864 amdgpu_asic_get_pcie_usage(adev, &count0, &count1);
1866 pm_runtime_mark_last_busy(ddev->dev);
1867 pm_runtime_put_autosuspend(ddev->dev);
1869 return snprintf(buf, PAGE_SIZE, "%llu %llu %i\n",
1870 count0, count1, pcie_get_mps(adev->pdev));
1876 * The amdgpu driver provides a sysfs API for providing a unique ID for the GPU
1877 * The file unique_id is used for this.
1878 * This will provide a Unique ID that will persist from machine to machine
1880 * NOTE: This will only work for GFX9 and newer. This file will be absent
1881 * on unsupported ASICs (GFX8 and older)
1883 static ssize_t amdgpu_get_unique_id(struct device *dev,
1884 struct device_attribute *attr,
1887 struct drm_device *ddev = dev_get_drvdata(dev);
1888 struct amdgpu_device *adev = drm_to_adev(ddev);
1890 if (amdgpu_in_reset(adev))
1893 if (adev->unique_id)
1894 return snprintf(buf, PAGE_SIZE, "%016llx\n", adev->unique_id);
1900 * DOC: thermal_throttling_logging
1902 * Thermal throttling pulls down the clock frequency and thus the performance.
1903 * It's an useful mechanism to protect the chip from overheating. Since it
1904 * impacts performance, the user controls whether it is enabled and if so,
1905 * the log frequency.
1907 * Reading back the file shows you the status(enabled or disabled) and
1908 * the interval(in seconds) between each thermal logging.
1910 * Writing an integer to the file, sets a new logging interval, in seconds.
1911 * The value should be between 1 and 3600. If the value is less than 1,
1912 * thermal logging is disabled. Values greater than 3600 are ignored.
1914 static ssize_t amdgpu_get_thermal_throttling_logging(struct device *dev,
1915 struct device_attribute *attr,
1918 struct drm_device *ddev = dev_get_drvdata(dev);
1919 struct amdgpu_device *adev = drm_to_adev(ddev);
1921 return snprintf(buf, PAGE_SIZE, "%s: thermal throttling logging %s, with interval %d seconds\n",
1922 adev_to_drm(adev)->unique,
1923 atomic_read(&adev->throttling_logging_enabled) ? "enabled" : "disabled",
1924 adev->throttling_logging_rs.interval / HZ + 1);
1927 static ssize_t amdgpu_set_thermal_throttling_logging(struct device *dev,
1928 struct device_attribute *attr,
1932 struct drm_device *ddev = dev_get_drvdata(dev);
1933 struct amdgpu_device *adev = drm_to_adev(ddev);
1934 long throttling_logging_interval;
1935 unsigned long flags;
1938 ret = kstrtol(buf, 0, &throttling_logging_interval);
1942 if (throttling_logging_interval > 3600)
1945 if (throttling_logging_interval > 0) {
1946 raw_spin_lock_irqsave(&adev->throttling_logging_rs.lock, flags);
1948 * Reset the ratelimit timer internals.
1949 * This can effectively restart the timer.
1951 adev->throttling_logging_rs.interval =
1952 (throttling_logging_interval - 1) * HZ;
1953 adev->throttling_logging_rs.begin = 0;
1954 adev->throttling_logging_rs.printed = 0;
1955 adev->throttling_logging_rs.missed = 0;
1956 raw_spin_unlock_irqrestore(&adev->throttling_logging_rs.lock, flags);
1958 atomic_set(&adev->throttling_logging_enabled, 1);
1960 atomic_set(&adev->throttling_logging_enabled, 0);
1969 * The amdgpu driver provides a sysfs API for retrieving current gpu
1970 * metrics data. The file gpu_metrics is used for this. Reading the
1971 * file will dump all the current gpu metrics data.
1973 * These data include temperature, frequency, engines utilization,
1974 * power consume, throttler status, fan speed and cpu core statistics(
1975 * available for APU only). That's it will give a snapshot of all sensors
1978 static ssize_t amdgpu_get_gpu_metrics(struct device *dev,
1979 struct device_attribute *attr,
1982 struct drm_device *ddev = dev_get_drvdata(dev);
1983 struct amdgpu_device *adev = drm_to_adev(ddev);
1988 if (amdgpu_in_reset(adev))
1991 ret = pm_runtime_get_sync(ddev->dev);
1993 pm_runtime_put_autosuspend(ddev->dev);
1997 if (is_support_sw_smu(adev))
1998 size = smu_sys_get_gpu_metrics(&adev->smu, &gpu_metrics);
1999 else if (adev->powerplay.pp_funcs->get_gpu_metrics)
2000 size = amdgpu_dpm_get_gpu_metrics(adev, &gpu_metrics);
2005 if (size >= PAGE_SIZE)
2006 size = PAGE_SIZE - 1;
2008 memcpy(buf, gpu_metrics, size);
2011 pm_runtime_mark_last_busy(ddev->dev);
2012 pm_runtime_put_autosuspend(ddev->dev);
2017 static struct amdgpu_device_attr amdgpu_device_attrs[] = {
2018 AMDGPU_DEVICE_ATTR_RW(power_dpm_state, ATTR_FLAG_BASIC|ATTR_FLAG_ONEVF),
2019 AMDGPU_DEVICE_ATTR_RW(power_dpm_force_performance_level, ATTR_FLAG_BASIC),
2020 AMDGPU_DEVICE_ATTR_RO(pp_num_states, ATTR_FLAG_BASIC),
2021 AMDGPU_DEVICE_ATTR_RO(pp_cur_state, ATTR_FLAG_BASIC),
2022 AMDGPU_DEVICE_ATTR_RW(pp_force_state, ATTR_FLAG_BASIC),
2023 AMDGPU_DEVICE_ATTR_RW(pp_table, ATTR_FLAG_BASIC),
2024 AMDGPU_DEVICE_ATTR_RW(pp_dpm_sclk, ATTR_FLAG_BASIC|ATTR_FLAG_ONEVF),
2025 AMDGPU_DEVICE_ATTR_RW(pp_dpm_mclk, ATTR_FLAG_BASIC|ATTR_FLAG_ONEVF),
2026 AMDGPU_DEVICE_ATTR_RW(pp_dpm_socclk, ATTR_FLAG_BASIC|ATTR_FLAG_ONEVF),
2027 AMDGPU_DEVICE_ATTR_RW(pp_dpm_fclk, ATTR_FLAG_BASIC|ATTR_FLAG_ONEVF),
2028 AMDGPU_DEVICE_ATTR_RW(pp_dpm_dcefclk, ATTR_FLAG_BASIC),
2029 AMDGPU_DEVICE_ATTR_RW(pp_dpm_pcie, ATTR_FLAG_BASIC),
2030 AMDGPU_DEVICE_ATTR_RW(pp_sclk_od, ATTR_FLAG_BASIC),
2031 AMDGPU_DEVICE_ATTR_RW(pp_mclk_od, ATTR_FLAG_BASIC),
2032 AMDGPU_DEVICE_ATTR_RW(pp_power_profile_mode, ATTR_FLAG_BASIC),
2033 AMDGPU_DEVICE_ATTR_RW(pp_od_clk_voltage, ATTR_FLAG_BASIC),
2034 AMDGPU_DEVICE_ATTR_RO(gpu_busy_percent, ATTR_FLAG_BASIC),
2035 AMDGPU_DEVICE_ATTR_RO(mem_busy_percent, ATTR_FLAG_BASIC),
2036 AMDGPU_DEVICE_ATTR_RO(pcie_bw, ATTR_FLAG_BASIC),
2037 AMDGPU_DEVICE_ATTR_RW(pp_features, ATTR_FLAG_BASIC),
2038 AMDGPU_DEVICE_ATTR_RO(unique_id, ATTR_FLAG_BASIC),
2039 AMDGPU_DEVICE_ATTR_RW(thermal_throttling_logging, ATTR_FLAG_BASIC),
2040 AMDGPU_DEVICE_ATTR_RO(gpu_metrics, ATTR_FLAG_BASIC),
2043 static int default_attr_update(struct amdgpu_device *adev, struct amdgpu_device_attr *attr,
2044 uint32_t mask, enum amdgpu_device_attr_states *states)
2046 struct device_attribute *dev_attr = &attr->dev_attr;
2047 const char *attr_name = dev_attr->attr.name;
2048 struct pp_hwmgr *hwmgr = adev->powerplay.pp_handle;
2049 enum amd_asic_type asic_type = adev->asic_type;
2051 if (!(attr->flags & mask)) {
2052 *states = ATTR_STATE_UNSUPPORTED;
2056 #define DEVICE_ATTR_IS(_name) (!strcmp(attr_name, #_name))
2058 if (DEVICE_ATTR_IS(pp_dpm_socclk)) {
2059 if (asic_type < CHIP_VEGA10)
2060 *states = ATTR_STATE_UNSUPPORTED;
2061 } else if (DEVICE_ATTR_IS(pp_dpm_dcefclk)) {
2062 if (asic_type < CHIP_VEGA10 || asic_type == CHIP_ARCTURUS)
2063 *states = ATTR_STATE_UNSUPPORTED;
2064 } else if (DEVICE_ATTR_IS(pp_dpm_fclk)) {
2065 if (asic_type < CHIP_VEGA20)
2066 *states = ATTR_STATE_UNSUPPORTED;
2067 } else if (DEVICE_ATTR_IS(pp_od_clk_voltage)) {
2068 *states = ATTR_STATE_UNSUPPORTED;
2069 if ((is_support_sw_smu(adev) && adev->smu.od_enabled) ||
2070 (!is_support_sw_smu(adev) && hwmgr->od_enabled))
2071 *states = ATTR_STATE_SUPPORTED;
2072 } else if (DEVICE_ATTR_IS(mem_busy_percent)) {
2073 if (adev->flags & AMD_IS_APU || asic_type == CHIP_VEGA10)
2074 *states = ATTR_STATE_UNSUPPORTED;
2075 } else if (DEVICE_ATTR_IS(pcie_bw)) {
2076 /* PCIe Perf counters won't work on APU nodes */
2077 if (adev->flags & AMD_IS_APU)
2078 *states = ATTR_STATE_UNSUPPORTED;
2079 } else if (DEVICE_ATTR_IS(unique_id)) {
2080 if (asic_type != CHIP_VEGA10 &&
2081 asic_type != CHIP_VEGA20 &&
2082 asic_type != CHIP_ARCTURUS)
2083 *states = ATTR_STATE_UNSUPPORTED;
2084 } else if (DEVICE_ATTR_IS(pp_features)) {
2085 if (adev->flags & AMD_IS_APU || asic_type < CHIP_VEGA10)
2086 *states = ATTR_STATE_UNSUPPORTED;
2087 } else if (DEVICE_ATTR_IS(gpu_metrics)) {
2088 if (asic_type < CHIP_VEGA12)
2089 *states = ATTR_STATE_UNSUPPORTED;
2092 if (asic_type == CHIP_ARCTURUS) {
2093 /* Arcturus does not support standalone mclk/socclk/fclk level setting */
2094 if (DEVICE_ATTR_IS(pp_dpm_mclk) ||
2095 DEVICE_ATTR_IS(pp_dpm_socclk) ||
2096 DEVICE_ATTR_IS(pp_dpm_fclk)) {
2097 dev_attr->attr.mode &= ~S_IWUGO;
2098 dev_attr->store = NULL;
2102 #undef DEVICE_ATTR_IS
2108 static int amdgpu_device_attr_create(struct amdgpu_device *adev,
2109 struct amdgpu_device_attr *attr,
2110 uint32_t mask, struct list_head *attr_list)
2113 struct device_attribute *dev_attr = &attr->dev_attr;
2114 const char *name = dev_attr->attr.name;
2115 enum amdgpu_device_attr_states attr_states = ATTR_STATE_SUPPORTED;
2116 struct amdgpu_device_attr_entry *attr_entry;
2118 int (*attr_update)(struct amdgpu_device *adev, struct amdgpu_device_attr *attr,
2119 uint32_t mask, enum amdgpu_device_attr_states *states) = default_attr_update;
2123 attr_update = attr->attr_update ? attr_update : default_attr_update;
2125 ret = attr_update(adev, attr, mask, &attr_states);
2127 dev_err(adev->dev, "failed to update device file %s, ret = %d\n",
2132 if (attr_states == ATTR_STATE_UNSUPPORTED)
2135 ret = device_create_file(adev->dev, dev_attr);
2137 dev_err(adev->dev, "failed to create device file %s, ret = %d\n",
2141 attr_entry = kmalloc(sizeof(*attr_entry), GFP_KERNEL);
2145 attr_entry->attr = attr;
2146 INIT_LIST_HEAD(&attr_entry->entry);
2148 list_add_tail(&attr_entry->entry, attr_list);
2153 static void amdgpu_device_attr_remove(struct amdgpu_device *adev, struct amdgpu_device_attr *attr)
2155 struct device_attribute *dev_attr = &attr->dev_attr;
2157 device_remove_file(adev->dev, dev_attr);
2160 static void amdgpu_device_attr_remove_groups(struct amdgpu_device *adev,
2161 struct list_head *attr_list);
2163 static int amdgpu_device_attr_create_groups(struct amdgpu_device *adev,
2164 struct amdgpu_device_attr *attrs,
2167 struct list_head *attr_list)
2172 for (i = 0; i < counts; i++) {
2173 ret = amdgpu_device_attr_create(adev, &attrs[i], mask, attr_list);
2181 amdgpu_device_attr_remove_groups(adev, attr_list);
2186 static void amdgpu_device_attr_remove_groups(struct amdgpu_device *adev,
2187 struct list_head *attr_list)
2189 struct amdgpu_device_attr_entry *entry, *entry_tmp;
2191 if (list_empty(attr_list))
2194 list_for_each_entry_safe(entry, entry_tmp, attr_list, entry) {
2195 amdgpu_device_attr_remove(adev, entry->attr);
2196 list_del(&entry->entry);
2201 static ssize_t amdgpu_hwmon_show_temp(struct device *dev,
2202 struct device_attribute *attr,
2205 struct amdgpu_device *adev = dev_get_drvdata(dev);
2206 int channel = to_sensor_dev_attr(attr)->index;
2207 int r, temp = 0, size = sizeof(temp);
2209 if (amdgpu_in_reset(adev))
2212 if (channel >= PP_TEMP_MAX)
2215 r = pm_runtime_get_sync(adev_to_drm(adev)->dev);
2217 pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
2222 case PP_TEMP_JUNCTION:
2223 /* get current junction temperature */
2224 r = amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_HOTSPOT_TEMP,
2225 (void *)&temp, &size);
2228 /* get current edge temperature */
2229 r = amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_EDGE_TEMP,
2230 (void *)&temp, &size);
2233 /* get current memory temperature */
2234 r = amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_MEM_TEMP,
2235 (void *)&temp, &size);
2242 pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
2243 pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
2248 return snprintf(buf, PAGE_SIZE, "%d\n", temp);
2251 static ssize_t amdgpu_hwmon_show_temp_thresh(struct device *dev,
2252 struct device_attribute *attr,
2255 struct amdgpu_device *adev = dev_get_drvdata(dev);
2256 int hyst = to_sensor_dev_attr(attr)->index;
2260 temp = adev->pm.dpm.thermal.min_temp;
2262 temp = adev->pm.dpm.thermal.max_temp;
2264 return snprintf(buf, PAGE_SIZE, "%d\n", temp);
2267 static ssize_t amdgpu_hwmon_show_hotspot_temp_thresh(struct device *dev,
2268 struct device_attribute *attr,
2271 struct amdgpu_device *adev = dev_get_drvdata(dev);
2272 int hyst = to_sensor_dev_attr(attr)->index;
2276 temp = adev->pm.dpm.thermal.min_hotspot_temp;
2278 temp = adev->pm.dpm.thermal.max_hotspot_crit_temp;
2280 return snprintf(buf, PAGE_SIZE, "%d\n", temp);
2283 static ssize_t amdgpu_hwmon_show_mem_temp_thresh(struct device *dev,
2284 struct device_attribute *attr,
2287 struct amdgpu_device *adev = dev_get_drvdata(dev);
2288 int hyst = to_sensor_dev_attr(attr)->index;
2292 temp = adev->pm.dpm.thermal.min_mem_temp;
2294 temp = adev->pm.dpm.thermal.max_mem_crit_temp;
2296 return snprintf(buf, PAGE_SIZE, "%d\n", temp);
2299 static ssize_t amdgpu_hwmon_show_temp_label(struct device *dev,
2300 struct device_attribute *attr,
2303 int channel = to_sensor_dev_attr(attr)->index;
2305 if (channel >= PP_TEMP_MAX)
2308 return snprintf(buf, PAGE_SIZE, "%s\n", temp_label[channel].label);
2311 static ssize_t amdgpu_hwmon_show_temp_emergency(struct device *dev,
2312 struct device_attribute *attr,
2315 struct amdgpu_device *adev = dev_get_drvdata(dev);
2316 int channel = to_sensor_dev_attr(attr)->index;
2319 if (channel >= PP_TEMP_MAX)
2323 case PP_TEMP_JUNCTION:
2324 temp = adev->pm.dpm.thermal.max_hotspot_emergency_temp;
2327 temp = adev->pm.dpm.thermal.max_edge_emergency_temp;
2330 temp = adev->pm.dpm.thermal.max_mem_emergency_temp;
2334 return snprintf(buf, PAGE_SIZE, "%d\n", temp);
2337 static ssize_t amdgpu_hwmon_get_pwm1_enable(struct device *dev,
2338 struct device_attribute *attr,
2341 struct amdgpu_device *adev = dev_get_drvdata(dev);
2345 if (amdgpu_in_reset(adev))
2348 ret = pm_runtime_get_sync(adev_to_drm(adev)->dev);
2350 pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
2354 if (is_support_sw_smu(adev)) {
2355 pwm_mode = smu_get_fan_control_mode(&adev->smu);
2357 if (!adev->powerplay.pp_funcs->get_fan_control_mode) {
2358 pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
2359 pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
2363 pwm_mode = amdgpu_dpm_get_fan_control_mode(adev);
2366 pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
2367 pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
2369 return sprintf(buf, "%i\n", pwm_mode);
2372 static ssize_t amdgpu_hwmon_set_pwm1_enable(struct device *dev,
2373 struct device_attribute *attr,
2377 struct amdgpu_device *adev = dev_get_drvdata(dev);
2381 if (amdgpu_in_reset(adev))
2384 err = kstrtoint(buf, 10, &value);
2388 ret = pm_runtime_get_sync(adev_to_drm(adev)->dev);
2390 pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
2394 if (is_support_sw_smu(adev)) {
2395 smu_set_fan_control_mode(&adev->smu, value);
2397 if (!adev->powerplay.pp_funcs->set_fan_control_mode) {
2398 pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
2399 pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
2403 amdgpu_dpm_set_fan_control_mode(adev, value);
2406 pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
2407 pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
2412 static ssize_t amdgpu_hwmon_get_pwm1_min(struct device *dev,
2413 struct device_attribute *attr,
2416 return sprintf(buf, "%i\n", 0);
2419 static ssize_t amdgpu_hwmon_get_pwm1_max(struct device *dev,
2420 struct device_attribute *attr,
2423 return sprintf(buf, "%i\n", 255);
2426 static ssize_t amdgpu_hwmon_set_pwm1(struct device *dev,
2427 struct device_attribute *attr,
2428 const char *buf, size_t count)
2430 struct amdgpu_device *adev = dev_get_drvdata(dev);
2435 if (amdgpu_in_reset(adev))
2438 err = pm_runtime_get_sync(adev_to_drm(adev)->dev);
2440 pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
2444 if (is_support_sw_smu(adev))
2445 pwm_mode = smu_get_fan_control_mode(&adev->smu);
2447 pwm_mode = amdgpu_dpm_get_fan_control_mode(adev);
2449 if (pwm_mode != AMD_FAN_CTRL_MANUAL) {
2450 pr_info("manual fan speed control should be enabled first\n");
2451 pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
2452 pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
2456 err = kstrtou32(buf, 10, &value);
2458 pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
2459 pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
2463 value = (value * 100) / 255;
2465 if (is_support_sw_smu(adev))
2466 err = smu_set_fan_speed_percent(&adev->smu, value);
2467 else if (adev->powerplay.pp_funcs->set_fan_speed_percent)
2468 err = amdgpu_dpm_set_fan_speed_percent(adev, value);
2472 pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
2473 pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
2481 static ssize_t amdgpu_hwmon_get_pwm1(struct device *dev,
2482 struct device_attribute *attr,
2485 struct amdgpu_device *adev = dev_get_drvdata(dev);
2489 if (amdgpu_in_reset(adev))
2492 err = pm_runtime_get_sync(adev_to_drm(adev)->dev);
2494 pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
2498 if (is_support_sw_smu(adev))
2499 err = smu_get_fan_speed_percent(&adev->smu, &speed);
2500 else if (adev->powerplay.pp_funcs->get_fan_speed_percent)
2501 err = amdgpu_dpm_get_fan_speed_percent(adev, &speed);
2505 pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
2506 pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
2511 speed = (speed * 255) / 100;
2513 return sprintf(buf, "%i\n", speed);
2516 static ssize_t amdgpu_hwmon_get_fan1_input(struct device *dev,
2517 struct device_attribute *attr,
2520 struct amdgpu_device *adev = dev_get_drvdata(dev);
2524 if (amdgpu_in_reset(adev))
2527 err = pm_runtime_get_sync(adev_to_drm(adev)->dev);
2529 pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
2533 if (is_support_sw_smu(adev))
2534 err = smu_get_fan_speed_rpm(&adev->smu, &speed);
2535 else if (adev->powerplay.pp_funcs->get_fan_speed_rpm)
2536 err = amdgpu_dpm_get_fan_speed_rpm(adev, &speed);
2540 pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
2541 pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
2546 return sprintf(buf, "%i\n", speed);
2549 static ssize_t amdgpu_hwmon_get_fan1_min(struct device *dev,
2550 struct device_attribute *attr,
2553 struct amdgpu_device *adev = dev_get_drvdata(dev);
2555 u32 size = sizeof(min_rpm);
2558 if (amdgpu_in_reset(adev))
2561 r = pm_runtime_get_sync(adev_to_drm(adev)->dev);
2563 pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
2567 r = amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_MIN_FAN_RPM,
2568 (void *)&min_rpm, &size);
2570 pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
2571 pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
2576 return snprintf(buf, PAGE_SIZE, "%d\n", min_rpm);
2579 static ssize_t amdgpu_hwmon_get_fan1_max(struct device *dev,
2580 struct device_attribute *attr,
2583 struct amdgpu_device *adev = dev_get_drvdata(dev);
2585 u32 size = sizeof(max_rpm);
2588 if (amdgpu_in_reset(adev))
2591 r = pm_runtime_get_sync(adev_to_drm(adev)->dev);
2593 pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
2597 r = amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_MAX_FAN_RPM,
2598 (void *)&max_rpm, &size);
2600 pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
2601 pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
2606 return snprintf(buf, PAGE_SIZE, "%d\n", max_rpm);
2609 static ssize_t amdgpu_hwmon_get_fan1_target(struct device *dev,
2610 struct device_attribute *attr,
2613 struct amdgpu_device *adev = dev_get_drvdata(dev);
2617 if (amdgpu_in_reset(adev))
2620 err = pm_runtime_get_sync(adev_to_drm(adev)->dev);
2622 pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
2626 if (is_support_sw_smu(adev))
2627 err = smu_get_fan_speed_rpm(&adev->smu, &rpm);
2628 else if (adev->powerplay.pp_funcs->get_fan_speed_rpm)
2629 err = amdgpu_dpm_get_fan_speed_rpm(adev, &rpm);
2633 pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
2634 pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
2639 return sprintf(buf, "%i\n", rpm);
2642 static ssize_t amdgpu_hwmon_set_fan1_target(struct device *dev,
2643 struct device_attribute *attr,
2644 const char *buf, size_t count)
2646 struct amdgpu_device *adev = dev_get_drvdata(dev);
2651 if (amdgpu_in_reset(adev))
2654 err = pm_runtime_get_sync(adev_to_drm(adev)->dev);
2656 pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
2660 if (is_support_sw_smu(adev))
2661 pwm_mode = smu_get_fan_control_mode(&adev->smu);
2663 pwm_mode = amdgpu_dpm_get_fan_control_mode(adev);
2665 if (pwm_mode != AMD_FAN_CTRL_MANUAL) {
2666 pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
2667 pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
2671 err = kstrtou32(buf, 10, &value);
2673 pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
2674 pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
2678 if (is_support_sw_smu(adev))
2679 err = smu_set_fan_speed_rpm(&adev->smu, value);
2680 else if (adev->powerplay.pp_funcs->set_fan_speed_rpm)
2681 err = amdgpu_dpm_set_fan_speed_rpm(adev, value);
2685 pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
2686 pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
2694 static ssize_t amdgpu_hwmon_get_fan1_enable(struct device *dev,
2695 struct device_attribute *attr,
2698 struct amdgpu_device *adev = dev_get_drvdata(dev);
2702 if (amdgpu_in_reset(adev))
2705 ret = pm_runtime_get_sync(adev_to_drm(adev)->dev);
2707 pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
2711 if (is_support_sw_smu(adev)) {
2712 pwm_mode = smu_get_fan_control_mode(&adev->smu);
2714 if (!adev->powerplay.pp_funcs->get_fan_control_mode) {
2715 pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
2716 pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
2720 pwm_mode = amdgpu_dpm_get_fan_control_mode(adev);
2723 pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
2724 pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
2726 return sprintf(buf, "%i\n", pwm_mode == AMD_FAN_CTRL_AUTO ? 0 : 1);
2729 static ssize_t amdgpu_hwmon_set_fan1_enable(struct device *dev,
2730 struct device_attribute *attr,
2734 struct amdgpu_device *adev = dev_get_drvdata(dev);
2739 if (amdgpu_in_reset(adev))
2742 err = kstrtoint(buf, 10, &value);
2747 pwm_mode = AMD_FAN_CTRL_AUTO;
2748 else if (value == 1)
2749 pwm_mode = AMD_FAN_CTRL_MANUAL;
2753 err = pm_runtime_get_sync(adev_to_drm(adev)->dev);
2755 pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
2759 if (is_support_sw_smu(adev)) {
2760 smu_set_fan_control_mode(&adev->smu, pwm_mode);
2762 if (!adev->powerplay.pp_funcs->set_fan_control_mode) {
2763 pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
2764 pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
2767 amdgpu_dpm_set_fan_control_mode(adev, pwm_mode);
2770 pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
2771 pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
2776 static ssize_t amdgpu_hwmon_show_vddgfx(struct device *dev,
2777 struct device_attribute *attr,
2780 struct amdgpu_device *adev = dev_get_drvdata(dev);
2782 int r, size = sizeof(vddgfx);
2784 if (amdgpu_in_reset(adev))
2787 r = pm_runtime_get_sync(adev_to_drm(adev)->dev);
2789 pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
2793 /* get the voltage */
2794 r = amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_VDDGFX,
2795 (void *)&vddgfx, &size);
2797 pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
2798 pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
2803 return snprintf(buf, PAGE_SIZE, "%d\n", vddgfx);
2806 static ssize_t amdgpu_hwmon_show_vddgfx_label(struct device *dev,
2807 struct device_attribute *attr,
2810 return snprintf(buf, PAGE_SIZE, "vddgfx\n");
2813 static ssize_t amdgpu_hwmon_show_vddnb(struct device *dev,
2814 struct device_attribute *attr,
2817 struct amdgpu_device *adev = dev_get_drvdata(dev);
2819 int r, size = sizeof(vddnb);
2821 if (amdgpu_in_reset(adev))
2824 /* only APUs have vddnb */
2825 if (!(adev->flags & AMD_IS_APU))
2828 r = pm_runtime_get_sync(adev_to_drm(adev)->dev);
2830 pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
2834 /* get the voltage */
2835 r = amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_VDDNB,
2836 (void *)&vddnb, &size);
2838 pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
2839 pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
2844 return snprintf(buf, PAGE_SIZE, "%d\n", vddnb);
2847 static ssize_t amdgpu_hwmon_show_vddnb_label(struct device *dev,
2848 struct device_attribute *attr,
2851 return snprintf(buf, PAGE_SIZE, "vddnb\n");
2854 static ssize_t amdgpu_hwmon_show_power_avg(struct device *dev,
2855 struct device_attribute *attr,
2858 struct amdgpu_device *adev = dev_get_drvdata(dev);
2860 int r, size = sizeof(u32);
2863 if (amdgpu_in_reset(adev))
2866 r = pm_runtime_get_sync(adev_to_drm(adev)->dev);
2868 pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
2872 /* get the voltage */
2873 r = amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_GPU_POWER,
2874 (void *)&query, &size);
2876 pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
2877 pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
2882 /* convert to microwatts */
2883 uw = (query >> 8) * 1000000 + (query & 0xff) * 1000;
2885 return snprintf(buf, PAGE_SIZE, "%u\n", uw);
2888 static ssize_t amdgpu_hwmon_show_power_cap_min(struct device *dev,
2889 struct device_attribute *attr,
2892 return sprintf(buf, "%i\n", 0);
2895 static ssize_t amdgpu_hwmon_show_power_cap_max(struct device *dev,
2896 struct device_attribute *attr,
2899 struct amdgpu_device *adev = dev_get_drvdata(dev);
2904 if (amdgpu_in_reset(adev))
2907 r = pm_runtime_get_sync(adev_to_drm(adev)->dev);
2909 pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
2913 if (is_support_sw_smu(adev)) {
2914 smu_get_power_limit(&adev->smu, &limit, true);
2915 size = snprintf(buf, PAGE_SIZE, "%u\n", limit * 1000000);
2916 } else if (adev->powerplay.pp_funcs && adev->powerplay.pp_funcs->get_power_limit) {
2917 adev->powerplay.pp_funcs->get_power_limit(adev->powerplay.pp_handle, &limit, true);
2918 size = snprintf(buf, PAGE_SIZE, "%u\n", limit * 1000000);
2920 size = snprintf(buf, PAGE_SIZE, "\n");
2923 pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
2924 pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
2929 static ssize_t amdgpu_hwmon_show_power_cap(struct device *dev,
2930 struct device_attribute *attr,
2933 struct amdgpu_device *adev = dev_get_drvdata(dev);
2938 if (amdgpu_in_reset(adev))
2941 r = pm_runtime_get_sync(adev_to_drm(adev)->dev);
2943 pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
2947 if (is_support_sw_smu(adev)) {
2948 smu_get_power_limit(&adev->smu, &limit, false);
2949 size = snprintf(buf, PAGE_SIZE, "%u\n", limit * 1000000);
2950 } else if (adev->powerplay.pp_funcs && adev->powerplay.pp_funcs->get_power_limit) {
2951 adev->powerplay.pp_funcs->get_power_limit(adev->powerplay.pp_handle, &limit, false);
2952 size = snprintf(buf, PAGE_SIZE, "%u\n", limit * 1000000);
2954 size = snprintf(buf, PAGE_SIZE, "\n");
2957 pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
2958 pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
2964 static ssize_t amdgpu_hwmon_set_power_cap(struct device *dev,
2965 struct device_attribute *attr,
2969 struct amdgpu_device *adev = dev_get_drvdata(dev);
2973 if (amdgpu_in_reset(adev))
2976 if (amdgpu_sriov_vf(adev))
2979 err = kstrtou32(buf, 10, &value);
2983 value = value / 1000000; /* convert to Watt */
2986 err = pm_runtime_get_sync(adev_to_drm(adev)->dev);
2988 pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
2992 if (is_support_sw_smu(adev))
2993 err = smu_set_power_limit(&adev->smu, value);
2994 else if (adev->powerplay.pp_funcs && adev->powerplay.pp_funcs->set_power_limit)
2995 err = adev->powerplay.pp_funcs->set_power_limit(adev->powerplay.pp_handle, value);
2999 pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
3000 pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
3008 static ssize_t amdgpu_hwmon_show_sclk(struct device *dev,
3009 struct device_attribute *attr,
3012 struct amdgpu_device *adev = dev_get_drvdata(dev);
3014 int r, size = sizeof(sclk);
3016 if (amdgpu_in_reset(adev))
3019 r = pm_runtime_get_sync(adev_to_drm(adev)->dev);
3021 pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
3026 r = amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_GFX_SCLK,
3027 (void *)&sclk, &size);
3029 pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
3030 pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
3035 return snprintf(buf, PAGE_SIZE, "%u\n", sclk * 10 * 1000);
3038 static ssize_t amdgpu_hwmon_show_sclk_label(struct device *dev,
3039 struct device_attribute *attr,
3042 return snprintf(buf, PAGE_SIZE, "sclk\n");
3045 static ssize_t amdgpu_hwmon_show_mclk(struct device *dev,
3046 struct device_attribute *attr,
3049 struct amdgpu_device *adev = dev_get_drvdata(dev);
3051 int r, size = sizeof(mclk);
3053 if (amdgpu_in_reset(adev))
3056 r = pm_runtime_get_sync(adev_to_drm(adev)->dev);
3058 pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
3063 r = amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_GFX_MCLK,
3064 (void *)&mclk, &size);
3066 pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
3067 pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
3072 return snprintf(buf, PAGE_SIZE, "%u\n", mclk * 10 * 1000);
3075 static ssize_t amdgpu_hwmon_show_mclk_label(struct device *dev,
3076 struct device_attribute *attr,
3079 return snprintf(buf, PAGE_SIZE, "mclk\n");
3085 * The amdgpu driver exposes the following sensor interfaces:
3087 * - GPU temperature (via the on-die sensor)
3091 * - Northbridge voltage (APUs only)
3097 * - GPU gfx/compute engine clock
3099 * - GPU memory clock (dGPU only)
3101 * hwmon interfaces for GPU temperature:
3103 * - temp[1-3]_input: the on die GPU temperature in millidegrees Celsius
3104 * - temp2_input and temp3_input are supported on SOC15 dGPUs only
3106 * - temp[1-3]_label: temperature channel label
3107 * - temp2_label and temp3_label are supported on SOC15 dGPUs only
3109 * - temp[1-3]_crit: temperature critical max value in millidegrees Celsius
3110 * - temp2_crit and temp3_crit are supported on SOC15 dGPUs only
3112 * - temp[1-3]_crit_hyst: temperature hysteresis for critical limit in millidegrees Celsius
3113 * - temp2_crit_hyst and temp3_crit_hyst are supported on SOC15 dGPUs only
3115 * - temp[1-3]_emergency: temperature emergency max value(asic shutdown) in millidegrees Celsius
3116 * - these are supported on SOC15 dGPUs only
3118 * hwmon interfaces for GPU voltage:
3120 * - in0_input: the voltage on the GPU in millivolts
3122 * - in1_input: the voltage on the Northbridge in millivolts
3124 * hwmon interfaces for GPU power:
3126 * - power1_average: average power used by the GPU in microWatts
3128 * - power1_cap_min: minimum cap supported in microWatts
3130 * - power1_cap_max: maximum cap supported in microWatts
3132 * - power1_cap: selected power cap in microWatts
3134 * hwmon interfaces for GPU fan:
3136 * - pwm1: pulse width modulation fan level (0-255)
3138 * - pwm1_enable: pulse width modulation fan control method (0: no fan speed control, 1: manual fan speed control using pwm interface, 2: automatic fan speed control)
3140 * - pwm1_min: pulse width modulation fan control minimum level (0)
3142 * - pwm1_max: pulse width modulation fan control maximum level (255)
3144 * - fan1_min: an minimum value Unit: revolution/min (RPM)
3146 * - fan1_max: an maxmum value Unit: revolution/max (RPM)
3148 * - fan1_input: fan speed in RPM
3150 * - fan[1-\*]_target: Desired fan speed Unit: revolution/min (RPM)
3152 * - fan[1-\*]_enable: Enable or disable the sensors.1: Enable 0: Disable
3154 * hwmon interfaces for GPU clocks:
3156 * - freq1_input: the gfx/compute clock in hertz
3158 * - freq2_input: the memory clock in hertz
3160 * You can use hwmon tools like sensors to view this information on your system.
3164 static SENSOR_DEVICE_ATTR(temp1_input, S_IRUGO, amdgpu_hwmon_show_temp, NULL, PP_TEMP_EDGE);
3165 static SENSOR_DEVICE_ATTR(temp1_crit, S_IRUGO, amdgpu_hwmon_show_temp_thresh, NULL, 0);
3166 static SENSOR_DEVICE_ATTR(temp1_crit_hyst, S_IRUGO, amdgpu_hwmon_show_temp_thresh, NULL, 1);
3167 static SENSOR_DEVICE_ATTR(temp1_emergency, S_IRUGO, amdgpu_hwmon_show_temp_emergency, NULL, PP_TEMP_EDGE);
3168 static SENSOR_DEVICE_ATTR(temp2_input, S_IRUGO, amdgpu_hwmon_show_temp, NULL, PP_TEMP_JUNCTION);
3169 static SENSOR_DEVICE_ATTR(temp2_crit, S_IRUGO, amdgpu_hwmon_show_hotspot_temp_thresh, NULL, 0);
3170 static SENSOR_DEVICE_ATTR(temp2_crit_hyst, S_IRUGO, amdgpu_hwmon_show_hotspot_temp_thresh, NULL, 1);
3171 static SENSOR_DEVICE_ATTR(temp2_emergency, S_IRUGO, amdgpu_hwmon_show_temp_emergency, NULL, PP_TEMP_JUNCTION);
3172 static SENSOR_DEVICE_ATTR(temp3_input, S_IRUGO, amdgpu_hwmon_show_temp, NULL, PP_TEMP_MEM);
3173 static SENSOR_DEVICE_ATTR(temp3_crit, S_IRUGO, amdgpu_hwmon_show_mem_temp_thresh, NULL, 0);
3174 static SENSOR_DEVICE_ATTR(temp3_crit_hyst, S_IRUGO, amdgpu_hwmon_show_mem_temp_thresh, NULL, 1);
3175 static SENSOR_DEVICE_ATTR(temp3_emergency, S_IRUGO, amdgpu_hwmon_show_temp_emergency, NULL, PP_TEMP_MEM);
3176 static SENSOR_DEVICE_ATTR(temp1_label, S_IRUGO, amdgpu_hwmon_show_temp_label, NULL, PP_TEMP_EDGE);
3177 static SENSOR_DEVICE_ATTR(temp2_label, S_IRUGO, amdgpu_hwmon_show_temp_label, NULL, PP_TEMP_JUNCTION);
3178 static SENSOR_DEVICE_ATTR(temp3_label, S_IRUGO, amdgpu_hwmon_show_temp_label, NULL, PP_TEMP_MEM);
3179 static SENSOR_DEVICE_ATTR(pwm1, S_IRUGO | S_IWUSR, amdgpu_hwmon_get_pwm1, amdgpu_hwmon_set_pwm1, 0);
3180 static SENSOR_DEVICE_ATTR(pwm1_enable, S_IRUGO | S_IWUSR, amdgpu_hwmon_get_pwm1_enable, amdgpu_hwmon_set_pwm1_enable, 0);
3181 static SENSOR_DEVICE_ATTR(pwm1_min, S_IRUGO, amdgpu_hwmon_get_pwm1_min, NULL, 0);
3182 static SENSOR_DEVICE_ATTR(pwm1_max, S_IRUGO, amdgpu_hwmon_get_pwm1_max, NULL, 0);
3183 static SENSOR_DEVICE_ATTR(fan1_input, S_IRUGO, amdgpu_hwmon_get_fan1_input, NULL, 0);
3184 static SENSOR_DEVICE_ATTR(fan1_min, S_IRUGO, amdgpu_hwmon_get_fan1_min, NULL, 0);
3185 static SENSOR_DEVICE_ATTR(fan1_max, S_IRUGO, amdgpu_hwmon_get_fan1_max, NULL, 0);
3186 static SENSOR_DEVICE_ATTR(fan1_target, S_IRUGO | S_IWUSR, amdgpu_hwmon_get_fan1_target, amdgpu_hwmon_set_fan1_target, 0);
3187 static SENSOR_DEVICE_ATTR(fan1_enable, S_IRUGO | S_IWUSR, amdgpu_hwmon_get_fan1_enable, amdgpu_hwmon_set_fan1_enable, 0);
3188 static SENSOR_DEVICE_ATTR(in0_input, S_IRUGO, amdgpu_hwmon_show_vddgfx, NULL, 0);
3189 static SENSOR_DEVICE_ATTR(in0_label, S_IRUGO, amdgpu_hwmon_show_vddgfx_label, NULL, 0);
3190 static SENSOR_DEVICE_ATTR(in1_input, S_IRUGO, amdgpu_hwmon_show_vddnb, NULL, 0);
3191 static SENSOR_DEVICE_ATTR(in1_label, S_IRUGO, amdgpu_hwmon_show_vddnb_label, NULL, 0);
3192 static SENSOR_DEVICE_ATTR(power1_average, S_IRUGO, amdgpu_hwmon_show_power_avg, NULL, 0);
3193 static SENSOR_DEVICE_ATTR(power1_cap_max, S_IRUGO, amdgpu_hwmon_show_power_cap_max, NULL, 0);
3194 static SENSOR_DEVICE_ATTR(power1_cap_min, S_IRUGO, amdgpu_hwmon_show_power_cap_min, NULL, 0);
3195 static SENSOR_DEVICE_ATTR(power1_cap, S_IRUGO | S_IWUSR, amdgpu_hwmon_show_power_cap, amdgpu_hwmon_set_power_cap, 0);
3196 static SENSOR_DEVICE_ATTR(freq1_input, S_IRUGO, amdgpu_hwmon_show_sclk, NULL, 0);
3197 static SENSOR_DEVICE_ATTR(freq1_label, S_IRUGO, amdgpu_hwmon_show_sclk_label, NULL, 0);
3198 static SENSOR_DEVICE_ATTR(freq2_input, S_IRUGO, amdgpu_hwmon_show_mclk, NULL, 0);
3199 static SENSOR_DEVICE_ATTR(freq2_label, S_IRUGO, amdgpu_hwmon_show_mclk_label, NULL, 0);
3201 static struct attribute *hwmon_attributes[] = {
3202 &sensor_dev_attr_temp1_input.dev_attr.attr,
3203 &sensor_dev_attr_temp1_crit.dev_attr.attr,
3204 &sensor_dev_attr_temp1_crit_hyst.dev_attr.attr,
3205 &sensor_dev_attr_temp2_input.dev_attr.attr,
3206 &sensor_dev_attr_temp2_crit.dev_attr.attr,
3207 &sensor_dev_attr_temp2_crit_hyst.dev_attr.attr,
3208 &sensor_dev_attr_temp3_input.dev_attr.attr,
3209 &sensor_dev_attr_temp3_crit.dev_attr.attr,
3210 &sensor_dev_attr_temp3_crit_hyst.dev_attr.attr,
3211 &sensor_dev_attr_temp1_emergency.dev_attr.attr,
3212 &sensor_dev_attr_temp2_emergency.dev_attr.attr,
3213 &sensor_dev_attr_temp3_emergency.dev_attr.attr,
3214 &sensor_dev_attr_temp1_label.dev_attr.attr,
3215 &sensor_dev_attr_temp2_label.dev_attr.attr,
3216 &sensor_dev_attr_temp3_label.dev_attr.attr,
3217 &sensor_dev_attr_pwm1.dev_attr.attr,
3218 &sensor_dev_attr_pwm1_enable.dev_attr.attr,
3219 &sensor_dev_attr_pwm1_min.dev_attr.attr,
3220 &sensor_dev_attr_pwm1_max.dev_attr.attr,
3221 &sensor_dev_attr_fan1_input.dev_attr.attr,
3222 &sensor_dev_attr_fan1_min.dev_attr.attr,
3223 &sensor_dev_attr_fan1_max.dev_attr.attr,
3224 &sensor_dev_attr_fan1_target.dev_attr.attr,
3225 &sensor_dev_attr_fan1_enable.dev_attr.attr,
3226 &sensor_dev_attr_in0_input.dev_attr.attr,
3227 &sensor_dev_attr_in0_label.dev_attr.attr,
3228 &sensor_dev_attr_in1_input.dev_attr.attr,
3229 &sensor_dev_attr_in1_label.dev_attr.attr,
3230 &sensor_dev_attr_power1_average.dev_attr.attr,
3231 &sensor_dev_attr_power1_cap_max.dev_attr.attr,
3232 &sensor_dev_attr_power1_cap_min.dev_attr.attr,
3233 &sensor_dev_attr_power1_cap.dev_attr.attr,
3234 &sensor_dev_attr_freq1_input.dev_attr.attr,
3235 &sensor_dev_attr_freq1_label.dev_attr.attr,
3236 &sensor_dev_attr_freq2_input.dev_attr.attr,
3237 &sensor_dev_attr_freq2_label.dev_attr.attr,
3241 static umode_t hwmon_attributes_visible(struct kobject *kobj,
3242 struct attribute *attr, int index)
3244 struct device *dev = kobj_to_dev(kobj);
3245 struct amdgpu_device *adev = dev_get_drvdata(dev);
3246 umode_t effective_mode = attr->mode;
3248 /* under multi-vf mode, the hwmon attributes are all not supported */
3249 if (amdgpu_sriov_vf(adev) && !amdgpu_sriov_is_pp_one_vf(adev))
3252 /* there is no fan under pp one vf mode */
3253 if (amdgpu_sriov_is_pp_one_vf(adev) &&
3254 (attr == &sensor_dev_attr_pwm1.dev_attr.attr ||
3255 attr == &sensor_dev_attr_pwm1_enable.dev_attr.attr ||
3256 attr == &sensor_dev_attr_pwm1_max.dev_attr.attr ||
3257 attr == &sensor_dev_attr_pwm1_min.dev_attr.attr ||
3258 attr == &sensor_dev_attr_fan1_input.dev_attr.attr ||
3259 attr == &sensor_dev_attr_fan1_min.dev_attr.attr ||
3260 attr == &sensor_dev_attr_fan1_max.dev_attr.attr ||
3261 attr == &sensor_dev_attr_fan1_target.dev_attr.attr ||
3262 attr == &sensor_dev_attr_fan1_enable.dev_attr.attr))
3265 /* Skip fan attributes if fan is not present */
3266 if (adev->pm.no_fan && (attr == &sensor_dev_attr_pwm1.dev_attr.attr ||
3267 attr == &sensor_dev_attr_pwm1_enable.dev_attr.attr ||
3268 attr == &sensor_dev_attr_pwm1_max.dev_attr.attr ||
3269 attr == &sensor_dev_attr_pwm1_min.dev_attr.attr ||
3270 attr == &sensor_dev_attr_fan1_input.dev_attr.attr ||
3271 attr == &sensor_dev_attr_fan1_min.dev_attr.attr ||
3272 attr == &sensor_dev_attr_fan1_max.dev_attr.attr ||
3273 attr == &sensor_dev_attr_fan1_target.dev_attr.attr ||
3274 attr == &sensor_dev_attr_fan1_enable.dev_attr.attr))
3277 /* Skip fan attributes on APU */
3278 if ((adev->flags & AMD_IS_APU) &&
3279 (attr == &sensor_dev_attr_pwm1.dev_attr.attr ||
3280 attr == &sensor_dev_attr_pwm1_enable.dev_attr.attr ||
3281 attr == &sensor_dev_attr_pwm1_max.dev_attr.attr ||
3282 attr == &sensor_dev_attr_pwm1_min.dev_attr.attr ||
3283 attr == &sensor_dev_attr_fan1_input.dev_attr.attr ||
3284 attr == &sensor_dev_attr_fan1_min.dev_attr.attr ||
3285 attr == &sensor_dev_attr_fan1_max.dev_attr.attr ||
3286 attr == &sensor_dev_attr_fan1_target.dev_attr.attr ||
3287 attr == &sensor_dev_attr_fan1_enable.dev_attr.attr))
3290 /* Skip crit temp on APU */
3291 if ((adev->flags & AMD_IS_APU) && (adev->family >= AMDGPU_FAMILY_CZ) &&
3292 (attr == &sensor_dev_attr_temp1_crit.dev_attr.attr ||
3293 attr == &sensor_dev_attr_temp1_crit_hyst.dev_attr.attr))
3296 /* Skip limit attributes if DPM is not enabled */
3297 if (!adev->pm.dpm_enabled &&
3298 (attr == &sensor_dev_attr_temp1_crit.dev_attr.attr ||
3299 attr == &sensor_dev_attr_temp1_crit_hyst.dev_attr.attr ||
3300 attr == &sensor_dev_attr_pwm1.dev_attr.attr ||
3301 attr == &sensor_dev_attr_pwm1_enable.dev_attr.attr ||
3302 attr == &sensor_dev_attr_pwm1_max.dev_attr.attr ||
3303 attr == &sensor_dev_attr_pwm1_min.dev_attr.attr ||
3304 attr == &sensor_dev_attr_fan1_input.dev_attr.attr ||
3305 attr == &sensor_dev_attr_fan1_min.dev_attr.attr ||
3306 attr == &sensor_dev_attr_fan1_max.dev_attr.attr ||
3307 attr == &sensor_dev_attr_fan1_target.dev_attr.attr ||
3308 attr == &sensor_dev_attr_fan1_enable.dev_attr.attr))
3311 if (!is_support_sw_smu(adev)) {
3312 /* mask fan attributes if we have no bindings for this asic to expose */
3313 if ((!adev->powerplay.pp_funcs->get_fan_speed_percent &&
3314 attr == &sensor_dev_attr_pwm1.dev_attr.attr) || /* can't query fan */
3315 (!adev->powerplay.pp_funcs->get_fan_control_mode &&
3316 attr == &sensor_dev_attr_pwm1_enable.dev_attr.attr)) /* can't query state */
3317 effective_mode &= ~S_IRUGO;
3319 if ((!adev->powerplay.pp_funcs->set_fan_speed_percent &&
3320 attr == &sensor_dev_attr_pwm1.dev_attr.attr) || /* can't manage fan */
3321 (!adev->powerplay.pp_funcs->set_fan_control_mode &&
3322 attr == &sensor_dev_attr_pwm1_enable.dev_attr.attr)) /* can't manage state */
3323 effective_mode &= ~S_IWUSR;
3326 if (((adev->flags & AMD_IS_APU) ||
3327 adev->family == AMDGPU_FAMILY_SI) && /* not implemented yet */
3328 (attr == &sensor_dev_attr_power1_cap_max.dev_attr.attr ||
3329 attr == &sensor_dev_attr_power1_cap_min.dev_attr.attr||
3330 attr == &sensor_dev_attr_power1_cap.dev_attr.attr))
3333 if (((adev->family == AMDGPU_FAMILY_SI) ||
3334 ((adev->flags & AMD_IS_APU) &&
3335 (adev->asic_type < CHIP_RENOIR))) && /* not implemented yet */
3336 (attr == &sensor_dev_attr_power1_average.dev_attr.attr))
3339 if (!is_support_sw_smu(adev)) {
3340 /* hide max/min values if we can't both query and manage the fan */
3341 if ((!adev->powerplay.pp_funcs->set_fan_speed_percent &&
3342 !adev->powerplay.pp_funcs->get_fan_speed_percent) &&
3343 (!adev->powerplay.pp_funcs->set_fan_speed_rpm &&
3344 !adev->powerplay.pp_funcs->get_fan_speed_rpm) &&
3345 (attr == &sensor_dev_attr_pwm1_max.dev_attr.attr ||
3346 attr == &sensor_dev_attr_pwm1_min.dev_attr.attr))
3349 if ((!adev->powerplay.pp_funcs->set_fan_speed_rpm &&
3350 !adev->powerplay.pp_funcs->get_fan_speed_rpm) &&
3351 (attr == &sensor_dev_attr_fan1_max.dev_attr.attr ||
3352 attr == &sensor_dev_attr_fan1_min.dev_attr.attr))
3356 if ((adev->family == AMDGPU_FAMILY_SI || /* not implemented yet */
3357 adev->family == AMDGPU_FAMILY_KV) && /* not implemented yet */
3358 (attr == &sensor_dev_attr_in0_input.dev_attr.attr ||
3359 attr == &sensor_dev_attr_in0_label.dev_attr.attr))
3362 /* only APUs have vddnb */
3363 if (!(adev->flags & AMD_IS_APU) &&
3364 (attr == &sensor_dev_attr_in1_input.dev_attr.attr ||
3365 attr == &sensor_dev_attr_in1_label.dev_attr.attr))
3368 /* no mclk on APUs */
3369 if ((adev->flags & AMD_IS_APU) &&
3370 (attr == &sensor_dev_attr_freq2_input.dev_attr.attr ||
3371 attr == &sensor_dev_attr_freq2_label.dev_attr.attr))
3374 /* only SOC15 dGPUs support hotspot and mem temperatures */
3375 if (((adev->flags & AMD_IS_APU) ||
3376 adev->asic_type < CHIP_VEGA10) &&
3377 (attr == &sensor_dev_attr_temp2_crit.dev_attr.attr ||
3378 attr == &sensor_dev_attr_temp2_crit_hyst.dev_attr.attr ||
3379 attr == &sensor_dev_attr_temp3_crit.dev_attr.attr ||
3380 attr == &sensor_dev_attr_temp3_crit_hyst.dev_attr.attr ||
3381 attr == &sensor_dev_attr_temp1_emergency.dev_attr.attr ||
3382 attr == &sensor_dev_attr_temp2_emergency.dev_attr.attr ||
3383 attr == &sensor_dev_attr_temp3_emergency.dev_attr.attr ||
3384 attr == &sensor_dev_attr_temp2_input.dev_attr.attr ||
3385 attr == &sensor_dev_attr_temp3_input.dev_attr.attr ||
3386 attr == &sensor_dev_attr_temp2_label.dev_attr.attr ||
3387 attr == &sensor_dev_attr_temp3_label.dev_attr.attr))
3390 return effective_mode;
3393 static const struct attribute_group hwmon_attrgroup = {
3394 .attrs = hwmon_attributes,
3395 .is_visible = hwmon_attributes_visible,
3398 static const struct attribute_group *hwmon_groups[] = {
3403 int amdgpu_pm_sysfs_init(struct amdgpu_device *adev)
3408 if (adev->pm.sysfs_initialized)
3411 if (adev->pm.dpm_enabled == 0)
3414 INIT_LIST_HEAD(&adev->pm.pm_attr_list);
3416 adev->pm.int_hwmon_dev = hwmon_device_register_with_groups(adev->dev,
3419 if (IS_ERR(adev->pm.int_hwmon_dev)) {
3420 ret = PTR_ERR(adev->pm.int_hwmon_dev);
3422 "Unable to register hwmon device: %d\n", ret);
3426 switch (amdgpu_virt_get_sriov_vf_mode(adev)) {
3427 case SRIOV_VF_MODE_ONE_VF:
3428 mask = ATTR_FLAG_ONEVF;
3430 case SRIOV_VF_MODE_MULTI_VF:
3433 case SRIOV_VF_MODE_BARE_METAL:
3435 mask = ATTR_FLAG_MASK_ALL;
3439 ret = amdgpu_device_attr_create_groups(adev,
3440 amdgpu_device_attrs,
3441 ARRAY_SIZE(amdgpu_device_attrs),
3443 &adev->pm.pm_attr_list);
3447 adev->pm.sysfs_initialized = true;
3452 void amdgpu_pm_sysfs_fini(struct amdgpu_device *adev)
3454 if (adev->pm.dpm_enabled == 0)
3457 if (adev->pm.int_hwmon_dev)
3458 hwmon_device_unregister(adev->pm.int_hwmon_dev);
3460 amdgpu_device_attr_remove_groups(adev, &adev->pm.pm_attr_list);
3466 #if defined(CONFIG_DEBUG_FS)
3468 static int amdgpu_debugfs_pm_info_pp(struct seq_file *m, struct amdgpu_device *adev)
3471 uint64_t value64 = 0;
3476 size = sizeof(value);
3477 seq_printf(m, "GFX Clocks and Power:\n");
3478 if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_GFX_MCLK, (void *)&value, &size))
3479 seq_printf(m, "\t%u MHz (MCLK)\n", value/100);
3480 if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_GFX_SCLK, (void *)&value, &size))
3481 seq_printf(m, "\t%u MHz (SCLK)\n", value/100);
3482 if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_STABLE_PSTATE_SCLK, (void *)&value, &size))
3483 seq_printf(m, "\t%u MHz (PSTATE_SCLK)\n", value/100);
3484 if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_STABLE_PSTATE_MCLK, (void *)&value, &size))
3485 seq_printf(m, "\t%u MHz (PSTATE_MCLK)\n", value/100);
3486 if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_VDDGFX, (void *)&value, &size))
3487 seq_printf(m, "\t%u mV (VDDGFX)\n", value);
3488 if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_VDDNB, (void *)&value, &size))
3489 seq_printf(m, "\t%u mV (VDDNB)\n", value);
3490 size = sizeof(uint32_t);
3491 if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_GPU_POWER, (void *)&query, &size))
3492 seq_printf(m, "\t%u.%u W (average GPU)\n", query >> 8, query & 0xff);
3493 size = sizeof(value);
3494 seq_printf(m, "\n");
3497 if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_GPU_TEMP, (void *)&value, &size))
3498 seq_printf(m, "GPU Temperature: %u C\n", value/1000);
3501 if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_GPU_LOAD, (void *)&value, &size))
3502 seq_printf(m, "GPU Load: %u %%\n", value);
3504 if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_MEM_LOAD, (void *)&value, &size))
3505 seq_printf(m, "MEM Load: %u %%\n", value);
3507 seq_printf(m, "\n");
3509 /* SMC feature mask */
3510 if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_ENABLED_SMC_FEATURES_MASK, (void *)&value64, &size))
3511 seq_printf(m, "SMC Feature Mask: 0x%016llx\n", value64);
3513 if (adev->asic_type > CHIP_VEGA20) {
3515 if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_VCN_POWER_STATE, (void *)&value, &size)) {
3517 seq_printf(m, "VCN: Disabled\n");
3519 seq_printf(m, "VCN: Enabled\n");
3520 if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_UVD_DCLK, (void *)&value, &size))
3521 seq_printf(m, "\t%u MHz (DCLK)\n", value/100);
3522 if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_UVD_VCLK, (void *)&value, &size))
3523 seq_printf(m, "\t%u MHz (VCLK)\n", value/100);
3526 seq_printf(m, "\n");
3529 if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_UVD_POWER, (void *)&value, &size)) {
3531 seq_printf(m, "UVD: Disabled\n");
3533 seq_printf(m, "UVD: Enabled\n");
3534 if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_UVD_DCLK, (void *)&value, &size))
3535 seq_printf(m, "\t%u MHz (DCLK)\n", value/100);
3536 if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_UVD_VCLK, (void *)&value, &size))
3537 seq_printf(m, "\t%u MHz (VCLK)\n", value/100);
3540 seq_printf(m, "\n");
3543 if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_VCE_POWER, (void *)&value, &size)) {
3545 seq_printf(m, "VCE: Disabled\n");
3547 seq_printf(m, "VCE: Enabled\n");
3548 if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_VCE_ECCLK, (void *)&value, &size))
3549 seq_printf(m, "\t%u MHz (ECCLK)\n", value/100);
3557 static void amdgpu_parse_cg_state(struct seq_file *m, u32 flags)
3561 for (i = 0; clocks[i].flag; i++)
3562 seq_printf(m, "\t%s: %s\n", clocks[i].name,
3563 (flags & clocks[i].flag) ? "On" : "Off");
3566 static int amdgpu_debugfs_pm_info(struct seq_file *m, void *data)
3568 struct drm_info_node *node = (struct drm_info_node *) m->private;
3569 struct drm_device *dev = node->minor->dev;
3570 struct amdgpu_device *adev = drm_to_adev(dev);
3574 if (amdgpu_in_reset(adev))
3577 r = pm_runtime_get_sync(dev->dev);
3579 pm_runtime_put_autosuspend(dev->dev);
3583 if (!adev->pm.dpm_enabled) {
3584 seq_printf(m, "dpm not enabled\n");
3585 pm_runtime_mark_last_busy(dev->dev);
3586 pm_runtime_put_autosuspend(dev->dev);
3590 if (!is_support_sw_smu(adev) &&
3591 adev->powerplay.pp_funcs->debugfs_print_current_performance_level) {
3592 mutex_lock(&adev->pm.mutex);
3593 if (adev->powerplay.pp_funcs->debugfs_print_current_performance_level)
3594 adev->powerplay.pp_funcs->debugfs_print_current_performance_level(adev, m);
3596 seq_printf(m, "Debugfs support not implemented for this asic\n");
3597 mutex_unlock(&adev->pm.mutex);
3600 r = amdgpu_debugfs_pm_info_pp(m, adev);
3605 amdgpu_device_ip_get_clockgating_state(adev, &flags);
3607 seq_printf(m, "Clock Gating Flags Mask: 0x%x\n", flags);
3608 amdgpu_parse_cg_state(m, flags);
3609 seq_printf(m, "\n");
3612 pm_runtime_mark_last_busy(dev->dev);
3613 pm_runtime_put_autosuspend(dev->dev);
3618 static const struct drm_info_list amdgpu_pm_info_list[] = {
3619 {"amdgpu_pm_info", amdgpu_debugfs_pm_info, 0, NULL},
3623 int amdgpu_debugfs_pm_init(struct amdgpu_device *adev)
3625 #if defined(CONFIG_DEBUG_FS)
3626 return amdgpu_debugfs_add_files(adev, amdgpu_pm_info_list, ARRAY_SIZE(amdgpu_pm_info_list));