]> Git Repo - linux.git/blob - drivers/gpu/drm/amd/pm/amdgpu_pm.c
Linux 6.14-rc3
[linux.git] / drivers / gpu / drm / amd / pm / amdgpu_pm.c
1 /*
2  * Copyright 2017 Advanced Micro Devices, Inc.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20  * OTHER DEALINGS IN THE SOFTWARE.
21  *
22  * Authors: Rafał Miłecki <[email protected]>
23  *          Alex Deucher <[email protected]>
24  */
25
26 #include "amdgpu.h"
27 #include "amdgpu_drv.h"
28 #include "amdgpu_pm.h"
29 #include "amdgpu_dpm.h"
30 #include "atom.h"
31 #include <linux/pci.h>
32 #include <linux/hwmon.h>
33 #include <linux/hwmon-sysfs.h>
34 #include <linux/nospec.h>
35 #include <linux/pm_runtime.h>
36 #include <asm/processor.h>
37
38 #define MAX_NUM_OF_FEATURES_PER_SUBSET          8
39 #define MAX_NUM_OF_SUBSETS                      8
40
41 #define DEVICE_ATTR_IS(_name)           (attr_id == device_attr_id__##_name)
42
43 struct od_attribute {
44         struct kobj_attribute   attribute;
45         struct list_head        entry;
46 };
47
48 struct od_kobj {
49         struct kobject          kobj;
50         struct list_head        entry;
51         struct list_head        attribute;
52         void                    *priv;
53 };
54
55 struct od_feature_ops {
56         umode_t (*is_visible)(struct amdgpu_device *adev);
57         ssize_t (*show)(struct kobject *kobj, struct kobj_attribute *attr,
58                         char *buf);
59         ssize_t (*store)(struct kobject *kobj, struct kobj_attribute *attr,
60                          const char *buf, size_t count);
61 };
62
63 struct od_feature_item {
64         const char              *name;
65         struct od_feature_ops   ops;
66 };
67
68 struct od_feature_container {
69         char                            *name;
70         struct od_feature_ops           ops;
71         struct od_feature_item          sub_feature[MAX_NUM_OF_FEATURES_PER_SUBSET];
72 };
73
74 struct od_feature_set {
75         struct od_feature_container     containers[MAX_NUM_OF_SUBSETS];
76 };
77
78 static const struct hwmon_temp_label {
79         enum PP_HWMON_TEMP channel;
80         const char *label;
81 } temp_label[] = {
82         {PP_TEMP_EDGE, "edge"},
83         {PP_TEMP_JUNCTION, "junction"},
84         {PP_TEMP_MEM, "mem"},
85 };
86
87 const char * const amdgpu_pp_profile_name[] = {
88         "BOOTUP_DEFAULT",
89         "3D_FULL_SCREEN",
90         "POWER_SAVING",
91         "VIDEO",
92         "VR",
93         "COMPUTE",
94         "CUSTOM",
95         "WINDOW_3D",
96         "CAPPED",
97         "UNCAPPED",
98 };
99
100 /**
101  * DOC: power_dpm_state
102  *
103  * The power_dpm_state file is a legacy interface and is only provided for
104  * backwards compatibility. The amdgpu driver provides a sysfs API for adjusting
105  * certain power related parameters.  The file power_dpm_state is used for this.
106  * It accepts the following arguments:
107  *
108  * - battery
109  *
110  * - balanced
111  *
112  * - performance
113  *
114  * battery
115  *
116  * On older GPUs, the vbios provided a special power state for battery
117  * operation.  Selecting battery switched to this state.  This is no
118  * longer provided on newer GPUs so the option does nothing in that case.
119  *
120  * balanced
121  *
122  * On older GPUs, the vbios provided a special power state for balanced
123  * operation.  Selecting balanced switched to this state.  This is no
124  * longer provided on newer GPUs so the option does nothing in that case.
125  *
126  * performance
127  *
128  * On older GPUs, the vbios provided a special power state for performance
129  * operation.  Selecting performance switched to this state.  This is no
130  * longer provided on newer GPUs so the option does nothing in that case.
131  *
132  */
133
134 static ssize_t amdgpu_get_power_dpm_state(struct device *dev,
135                                           struct device_attribute *attr,
136                                           char *buf)
137 {
138         struct drm_device *ddev = dev_get_drvdata(dev);
139         struct amdgpu_device *adev = drm_to_adev(ddev);
140         enum amd_pm_state_type pm;
141         int ret;
142
143         if (amdgpu_in_reset(adev))
144                 return -EPERM;
145         if (adev->in_suspend && !adev->in_runpm)
146                 return -EPERM;
147
148         ret = pm_runtime_get_if_active(ddev->dev);
149         if (ret <= 0)
150                 return ret ?: -EPERM;
151
152         amdgpu_dpm_get_current_power_state(adev, &pm);
153
154         pm_runtime_put_autosuspend(ddev->dev);
155
156         return sysfs_emit(buf, "%s\n",
157                           (pm == POWER_STATE_TYPE_BATTERY) ? "battery" :
158                           (pm == POWER_STATE_TYPE_BALANCED) ? "balanced" : "performance");
159 }
160
161 static ssize_t amdgpu_set_power_dpm_state(struct device *dev,
162                                           struct device_attribute *attr,
163                                           const char *buf,
164                                           size_t count)
165 {
166         struct drm_device *ddev = dev_get_drvdata(dev);
167         struct amdgpu_device *adev = drm_to_adev(ddev);
168         enum amd_pm_state_type  state;
169         int ret;
170
171         if (amdgpu_in_reset(adev))
172                 return -EPERM;
173         if (adev->in_suspend && !adev->in_runpm)
174                 return -EPERM;
175
176         if (strncmp("battery", buf, strlen("battery")) == 0)
177                 state = POWER_STATE_TYPE_BATTERY;
178         else if (strncmp("balanced", buf, strlen("balanced")) == 0)
179                 state = POWER_STATE_TYPE_BALANCED;
180         else if (strncmp("performance", buf, strlen("performance")) == 0)
181                 state = POWER_STATE_TYPE_PERFORMANCE;
182         else
183                 return -EINVAL;
184
185         ret = pm_runtime_resume_and_get(ddev->dev);
186         if (ret < 0)
187                 return ret;
188
189         amdgpu_dpm_set_power_state(adev, state);
190
191         pm_runtime_mark_last_busy(ddev->dev);
192         pm_runtime_put_autosuspend(ddev->dev);
193
194         return count;
195 }
196
197
198 /**
199  * DOC: power_dpm_force_performance_level
200  *
201  * The amdgpu driver provides a sysfs API for adjusting certain power
202  * related parameters.  The file power_dpm_force_performance_level is
203  * used for this.  It accepts the following arguments:
204  *
205  * - auto
206  *
207  * - low
208  *
209  * - high
210  *
211  * - manual
212  *
213  * - profile_standard
214  *
215  * - profile_min_sclk
216  *
217  * - profile_min_mclk
218  *
219  * - profile_peak
220  *
221  * auto
222  *
223  * When auto is selected, the driver will attempt to dynamically select
224  * the optimal power profile for current conditions in the driver.
225  *
226  * low
227  *
228  * When low is selected, the clocks are forced to the lowest power state.
229  *
230  * high
231  *
232  * When high is selected, the clocks are forced to the highest power state.
233  *
234  * manual
235  *
236  * When manual is selected, the user can manually adjust which power states
237  * are enabled for each clock domain via the sysfs pp_dpm_mclk, pp_dpm_sclk,
238  * and pp_dpm_pcie files and adjust the power state transition heuristics
239  * via the pp_power_profile_mode sysfs file.
240  *
241  * profile_standard
242  * profile_min_sclk
243  * profile_min_mclk
244  * profile_peak
245  *
246  * When the profiling modes are selected, clock and power gating are
247  * disabled and the clocks are set for different profiling cases. This
248  * mode is recommended for profiling specific work loads where you do
249  * not want clock or power gating for clock fluctuation to interfere
250  * with your results. profile_standard sets the clocks to a fixed clock
251  * level which varies from asic to asic.  profile_min_sclk forces the sclk
252  * to the lowest level.  profile_min_mclk forces the mclk to the lowest level.
253  * profile_peak sets all clocks (mclk, sclk, pcie) to the highest levels.
254  *
255  */
256
257 static ssize_t amdgpu_get_power_dpm_force_performance_level(struct device *dev,
258                                                             struct device_attribute *attr,
259                                                             char *buf)
260 {
261         struct drm_device *ddev = dev_get_drvdata(dev);
262         struct amdgpu_device *adev = drm_to_adev(ddev);
263         enum amd_dpm_forced_level level = 0xff;
264         int ret;
265
266         if (amdgpu_in_reset(adev))
267                 return -EPERM;
268         if (adev->in_suspend && !adev->in_runpm)
269                 return -EPERM;
270
271         ret = pm_runtime_get_if_active(ddev->dev);
272         if (ret <= 0)
273                 return ret ?: -EPERM;
274
275         level = amdgpu_dpm_get_performance_level(adev);
276
277         pm_runtime_put_autosuspend(ddev->dev);
278
279         return sysfs_emit(buf, "%s\n",
280                           (level == AMD_DPM_FORCED_LEVEL_AUTO) ? "auto" :
281                           (level == AMD_DPM_FORCED_LEVEL_LOW) ? "low" :
282                           (level == AMD_DPM_FORCED_LEVEL_HIGH) ? "high" :
283                           (level == AMD_DPM_FORCED_LEVEL_MANUAL) ? "manual" :
284                           (level == AMD_DPM_FORCED_LEVEL_PROFILE_STANDARD) ? "profile_standard" :
285                           (level == AMD_DPM_FORCED_LEVEL_PROFILE_MIN_SCLK) ? "profile_min_sclk" :
286                           (level == AMD_DPM_FORCED_LEVEL_PROFILE_MIN_MCLK) ? "profile_min_mclk" :
287                           (level == AMD_DPM_FORCED_LEVEL_PROFILE_PEAK) ? "profile_peak" :
288                           (level == AMD_DPM_FORCED_LEVEL_PERF_DETERMINISM) ? "perf_determinism" :
289                           "unknown");
290 }
291
292 static ssize_t amdgpu_set_power_dpm_force_performance_level(struct device *dev,
293                                                             struct device_attribute *attr,
294                                                             const char *buf,
295                                                             size_t count)
296 {
297         struct drm_device *ddev = dev_get_drvdata(dev);
298         struct amdgpu_device *adev = drm_to_adev(ddev);
299         enum amd_dpm_forced_level level;
300         int ret = 0;
301
302         if (amdgpu_in_reset(adev))
303                 return -EPERM;
304         if (adev->in_suspend && !adev->in_runpm)
305                 return -EPERM;
306
307         if (strncmp("low", buf, strlen("low")) == 0) {
308                 level = AMD_DPM_FORCED_LEVEL_LOW;
309         } else if (strncmp("high", buf, strlen("high")) == 0) {
310                 level = AMD_DPM_FORCED_LEVEL_HIGH;
311         } else if (strncmp("auto", buf, strlen("auto")) == 0) {
312                 level = AMD_DPM_FORCED_LEVEL_AUTO;
313         } else if (strncmp("manual", buf, strlen("manual")) == 0) {
314                 level = AMD_DPM_FORCED_LEVEL_MANUAL;
315         } else if (strncmp("profile_exit", buf, strlen("profile_exit")) == 0) {
316                 level = AMD_DPM_FORCED_LEVEL_PROFILE_EXIT;
317         } else if (strncmp("profile_standard", buf, strlen("profile_standard")) == 0) {
318                 level = AMD_DPM_FORCED_LEVEL_PROFILE_STANDARD;
319         } else if (strncmp("profile_min_sclk", buf, strlen("profile_min_sclk")) == 0) {
320                 level = AMD_DPM_FORCED_LEVEL_PROFILE_MIN_SCLK;
321         } else if (strncmp("profile_min_mclk", buf, strlen("profile_min_mclk")) == 0) {
322                 level = AMD_DPM_FORCED_LEVEL_PROFILE_MIN_MCLK;
323         } else if (strncmp("profile_peak", buf, strlen("profile_peak")) == 0) {
324                 level = AMD_DPM_FORCED_LEVEL_PROFILE_PEAK;
325         } else if (strncmp("perf_determinism", buf, strlen("perf_determinism")) == 0) {
326                 level = AMD_DPM_FORCED_LEVEL_PERF_DETERMINISM;
327         }  else {
328                 return -EINVAL;
329         }
330
331         ret = pm_runtime_resume_and_get(ddev->dev);
332         if (ret < 0)
333                 return ret;
334
335         mutex_lock(&adev->pm.stable_pstate_ctx_lock);
336         if (amdgpu_dpm_force_performance_level(adev, level)) {
337                 pm_runtime_mark_last_busy(ddev->dev);
338                 pm_runtime_put_autosuspend(ddev->dev);
339                 mutex_unlock(&adev->pm.stable_pstate_ctx_lock);
340                 return -EINVAL;
341         }
342         /* override whatever a user ctx may have set */
343         adev->pm.stable_pstate_ctx = NULL;
344         mutex_unlock(&adev->pm.stable_pstate_ctx_lock);
345
346         pm_runtime_mark_last_busy(ddev->dev);
347         pm_runtime_put_autosuspend(ddev->dev);
348
349         return count;
350 }
351
352 static ssize_t amdgpu_get_pp_num_states(struct device *dev,
353                 struct device_attribute *attr,
354                 char *buf)
355 {
356         struct drm_device *ddev = dev_get_drvdata(dev);
357         struct amdgpu_device *adev = drm_to_adev(ddev);
358         struct pp_states_info data;
359         uint32_t i;
360         int buf_len, ret;
361
362         if (amdgpu_in_reset(adev))
363                 return -EPERM;
364         if (adev->in_suspend && !adev->in_runpm)
365                 return -EPERM;
366
367         ret = pm_runtime_get_if_active(ddev->dev);
368         if (ret <= 0)
369                 return ret ?: -EPERM;
370
371         if (amdgpu_dpm_get_pp_num_states(adev, &data))
372                 memset(&data, 0, sizeof(data));
373
374         pm_runtime_put_autosuspend(ddev->dev);
375
376         buf_len = sysfs_emit(buf, "states: %d\n", data.nums);
377         for (i = 0; i < data.nums; i++)
378                 buf_len += sysfs_emit_at(buf, buf_len, "%d %s\n", i,
379                                 (data.states[i] == POWER_STATE_TYPE_INTERNAL_BOOT) ? "boot" :
380                                 (data.states[i] == POWER_STATE_TYPE_BATTERY) ? "battery" :
381                                 (data.states[i] == POWER_STATE_TYPE_BALANCED) ? "balanced" :
382                                 (data.states[i] == POWER_STATE_TYPE_PERFORMANCE) ? "performance" : "default");
383
384         return buf_len;
385 }
386
387 static ssize_t amdgpu_get_pp_cur_state(struct device *dev,
388                 struct device_attribute *attr,
389                 char *buf)
390 {
391         struct drm_device *ddev = dev_get_drvdata(dev);
392         struct amdgpu_device *adev = drm_to_adev(ddev);
393         struct pp_states_info data = {0};
394         enum amd_pm_state_type pm = 0;
395         int i = 0, ret = 0;
396
397         if (amdgpu_in_reset(adev))
398                 return -EPERM;
399         if (adev->in_suspend && !adev->in_runpm)
400                 return -EPERM;
401
402         ret = pm_runtime_get_if_active(ddev->dev);
403         if (ret <= 0)
404                 return ret ?: -EPERM;
405
406         amdgpu_dpm_get_current_power_state(adev, &pm);
407
408         ret = amdgpu_dpm_get_pp_num_states(adev, &data);
409
410         pm_runtime_put_autosuspend(ddev->dev);
411
412         if (ret)
413                 return ret;
414
415         for (i = 0; i < data.nums; i++) {
416                 if (pm == data.states[i])
417                         break;
418         }
419
420         if (i == data.nums)
421                 i = -EINVAL;
422
423         return sysfs_emit(buf, "%d\n", i);
424 }
425
426 static ssize_t amdgpu_get_pp_force_state(struct device *dev,
427                 struct device_attribute *attr,
428                 char *buf)
429 {
430         struct drm_device *ddev = dev_get_drvdata(dev);
431         struct amdgpu_device *adev = drm_to_adev(ddev);
432
433         if (amdgpu_in_reset(adev))
434                 return -EPERM;
435         if (adev->in_suspend && !adev->in_runpm)
436                 return -EPERM;
437
438         if (adev->pm.pp_force_state_enabled)
439                 return amdgpu_get_pp_cur_state(dev, attr, buf);
440         else
441                 return sysfs_emit(buf, "\n");
442 }
443
444 static ssize_t amdgpu_set_pp_force_state(struct device *dev,
445                 struct device_attribute *attr,
446                 const char *buf,
447                 size_t count)
448 {
449         struct drm_device *ddev = dev_get_drvdata(dev);
450         struct amdgpu_device *adev = drm_to_adev(ddev);
451         enum amd_pm_state_type state = 0;
452         struct pp_states_info data;
453         unsigned long idx;
454         int ret;
455
456         if (amdgpu_in_reset(adev))
457                 return -EPERM;
458         if (adev->in_suspend && !adev->in_runpm)
459                 return -EPERM;
460
461         adev->pm.pp_force_state_enabled = false;
462
463         if (strlen(buf) == 1)
464                 return count;
465
466         ret = kstrtoul(buf, 0, &idx);
467         if (ret || idx >= ARRAY_SIZE(data.states))
468                 return -EINVAL;
469
470         idx = array_index_nospec(idx, ARRAY_SIZE(data.states));
471
472         ret = pm_runtime_resume_and_get(ddev->dev);
473         if (ret < 0)
474                 return ret;
475
476         ret = amdgpu_dpm_get_pp_num_states(adev, &data);
477         if (ret)
478                 goto err_out;
479
480         state = data.states[idx];
481
482         /* only set user selected power states */
483         if (state != POWER_STATE_TYPE_INTERNAL_BOOT &&
484             state != POWER_STATE_TYPE_DEFAULT) {
485                 ret = amdgpu_dpm_dispatch_task(adev,
486                                 AMD_PP_TASK_ENABLE_USER_STATE, &state);
487                 if (ret)
488                         goto err_out;
489
490                 adev->pm.pp_force_state_enabled = true;
491         }
492
493         pm_runtime_mark_last_busy(ddev->dev);
494         pm_runtime_put_autosuspend(ddev->dev);
495
496         return count;
497
498 err_out:
499         pm_runtime_mark_last_busy(ddev->dev);
500         pm_runtime_put_autosuspend(ddev->dev);
501         return ret;
502 }
503
504 /**
505  * DOC: pp_table
506  *
507  * The amdgpu driver provides a sysfs API for uploading new powerplay
508  * tables.  The file pp_table is used for this.  Reading the file
509  * will dump the current power play table.  Writing to the file
510  * will attempt to upload a new powerplay table and re-initialize
511  * powerplay using that new table.
512  *
513  */
514
515 static ssize_t amdgpu_get_pp_table(struct device *dev,
516                 struct device_attribute *attr,
517                 char *buf)
518 {
519         struct drm_device *ddev = dev_get_drvdata(dev);
520         struct amdgpu_device *adev = drm_to_adev(ddev);
521         char *table = NULL;
522         int size, ret;
523
524         if (amdgpu_in_reset(adev))
525                 return -EPERM;
526         if (adev->in_suspend && !adev->in_runpm)
527                 return -EPERM;
528
529         ret = pm_runtime_get_if_active(ddev->dev);
530         if (ret <= 0)
531                 return ret ?: -EPERM;
532
533         size = amdgpu_dpm_get_pp_table(adev, &table);
534
535         pm_runtime_put_autosuspend(ddev->dev);
536
537         if (size <= 0)
538                 return size;
539
540         if (size >= PAGE_SIZE)
541                 size = PAGE_SIZE - 1;
542
543         memcpy(buf, table, size);
544
545         return size;
546 }
547
548 static ssize_t amdgpu_set_pp_table(struct device *dev,
549                 struct device_attribute *attr,
550                 const char *buf,
551                 size_t count)
552 {
553         struct drm_device *ddev = dev_get_drvdata(dev);
554         struct amdgpu_device *adev = drm_to_adev(ddev);
555         int ret = 0;
556
557         if (amdgpu_in_reset(adev))
558                 return -EPERM;
559         if (adev->in_suspend && !adev->in_runpm)
560                 return -EPERM;
561
562         ret = pm_runtime_resume_and_get(ddev->dev);
563         if (ret < 0)
564                 return ret;
565
566         ret = amdgpu_dpm_set_pp_table(adev, buf, count);
567
568         pm_runtime_mark_last_busy(ddev->dev);
569         pm_runtime_put_autosuspend(ddev->dev);
570
571         if (ret)
572                 return ret;
573
574         return count;
575 }
576
577 /**
578  * DOC: pp_od_clk_voltage
579  *
580  * The amdgpu driver provides a sysfs API for adjusting the clocks and voltages
581  * in each power level within a power state.  The pp_od_clk_voltage is used for
582  * this.
583  *
584  * Note that the actual memory controller clock rate are exposed, not
585  * the effective memory clock of the DRAMs. To translate it, use the
586  * following formula:
587  *
588  * Clock conversion (Mhz):
589  *
590  * HBM: effective_memory_clock = memory_controller_clock * 1
591  *
592  * G5: effective_memory_clock = memory_controller_clock * 1
593  *
594  * G6: effective_memory_clock = memory_controller_clock * 2
595  *
596  * DRAM data rate (MT/s):
597  *
598  * HBM: effective_memory_clock * 2 = data_rate
599  *
600  * G5: effective_memory_clock * 4 = data_rate
601  *
602  * G6: effective_memory_clock * 8 = data_rate
603  *
604  * Bandwidth (MB/s):
605  *
606  * data_rate * vram_bit_width / 8 = memory_bandwidth
607  *
608  * Some examples:
609  *
610  * G5 on RX460:
611  *
612  * memory_controller_clock = 1750 Mhz
613  *
614  * effective_memory_clock = 1750 Mhz * 1 = 1750 Mhz
615  *
616  * data rate = 1750 * 4 = 7000 MT/s
617  *
618  * memory_bandwidth = 7000 * 128 bits / 8 = 112000 MB/s
619  *
620  * G6 on RX5700:
621  *
622  * memory_controller_clock = 875 Mhz
623  *
624  * effective_memory_clock = 875 Mhz * 2 = 1750 Mhz
625  *
626  * data rate = 1750 * 8 = 14000 MT/s
627  *
628  * memory_bandwidth = 14000 * 256 bits / 8 = 448000 MB/s
629  *
630  * < For Vega10 and previous ASICs >
631  *
632  * Reading the file will display:
633  *
634  * - a list of engine clock levels and voltages labeled OD_SCLK
635  *
636  * - a list of memory clock levels and voltages labeled OD_MCLK
637  *
638  * - a list of valid ranges for sclk, mclk, and voltage labeled OD_RANGE
639  *
640  * To manually adjust these settings, first select manual using
641  * power_dpm_force_performance_level. Enter a new value for each
642  * level by writing a string that contains "s/m level clock voltage" to
643  * the file.  E.g., "s 1 500 820" will update sclk level 1 to be 500 MHz
644  * at 820 mV; "m 0 350 810" will update mclk level 0 to be 350 MHz at
645  * 810 mV.  When you have edited all of the states as needed, write
646  * "c" (commit) to the file to commit your changes.  If you want to reset to the
647  * default power levels, write "r" (reset) to the file to reset them.
648  *
649  *
650  * < For Vega20 and newer ASICs >
651  *
652  * Reading the file will display:
653  *
654  * - minimum and maximum engine clock labeled OD_SCLK
655  *
656  * - minimum(not available for Vega20 and Navi1x) and maximum memory
657  *   clock labeled OD_MCLK
658  *
659  * - three <frequency, voltage> points labeled OD_VDDC_CURVE.
660  *   They can be used to calibrate the sclk voltage curve. This is
661  *   available for Vega20 and NV1X.
662  *
663  * - voltage offset(in mV) applied on target voltage calculation.
664  *   This is available for Sienna Cichlid, Navy Flounder, Dimgrey
665  *   Cavefish and some later SMU13 ASICs. For these ASICs, the target
666  *   voltage calculation can be illustrated by "voltage = voltage
667  *   calculated from v/f curve + overdrive vddgfx offset"
668  *
669  * - a list of valid ranges for sclk, mclk, voltage curve points
670  *   or voltage offset labeled OD_RANGE
671  *
672  * < For APUs >
673  *
674  * Reading the file will display:
675  *
676  * - minimum and maximum engine clock labeled OD_SCLK
677  *
678  * - a list of valid ranges for sclk labeled OD_RANGE
679  *
680  * < For VanGogh >
681  *
682  * Reading the file will display:
683  *
684  * - minimum and maximum engine clock labeled OD_SCLK
685  * - minimum and maximum core clocks labeled OD_CCLK
686  *
687  * - a list of valid ranges for sclk and cclk labeled OD_RANGE
688  *
689  * To manually adjust these settings:
690  *
691  * - First select manual using power_dpm_force_performance_level
692  *
693  * - For clock frequency setting, enter a new value by writing a
694  *   string that contains "s/m index clock" to the file. The index
695  *   should be 0 if to set minimum clock. And 1 if to set maximum
696  *   clock. E.g., "s 0 500" will update minimum sclk to be 500 MHz.
697  *   "m 1 800" will update maximum mclk to be 800Mhz. For core
698  *   clocks on VanGogh, the string contains "p core index clock".
699  *   E.g., "p 2 0 800" would set the minimum core clock on core
700  *   2 to 800Mhz.
701  *
702  *   For sclk voltage curve supported by Vega20 and NV1X, enter the new
703  *   values by writing a string that contains "vc point clock voltage"
704  *   to the file. The points are indexed by 0, 1 and 2. E.g., "vc 0 300
705  *   600" will update point1 with clock set as 300Mhz and voltage as 600mV.
706  *   "vc 2 1000 1000" will update point3 with clock set as 1000Mhz and
707  *   voltage 1000mV.
708  *
709  *   For voltage offset supported by Sienna Cichlid, Navy Flounder, Dimgrey
710  *   Cavefish and some later SMU13 ASICs, enter the new value by writing a
711  *   string that contains "vo offset". E.g., "vo -10" will update the extra
712  *   voltage offset applied to the whole v/f curve line as -10mv.
713  *
714  * - When you have edited all of the states as needed, write "c" (commit)
715  *   to the file to commit your changes
716  *
717  * - If you want to reset to the default power levels, write "r" (reset)
718  *   to the file to reset them
719  *
720  */
721
722 static ssize_t amdgpu_set_pp_od_clk_voltage(struct device *dev,
723                 struct device_attribute *attr,
724                 const char *buf,
725                 size_t count)
726 {
727         struct drm_device *ddev = dev_get_drvdata(dev);
728         struct amdgpu_device *adev = drm_to_adev(ddev);
729         int ret;
730         uint32_t parameter_size = 0;
731         long parameter[64];
732         char buf_cpy[128];
733         char *tmp_str;
734         char *sub_str;
735         const char delimiter[3] = {' ', '\n', '\0'};
736         uint32_t type;
737
738         if (amdgpu_in_reset(adev))
739                 return -EPERM;
740         if (adev->in_suspend && !adev->in_runpm)
741                 return -EPERM;
742
743         if (count > 127 || count == 0)
744                 return -EINVAL;
745
746         if (*buf == 's')
747                 type = PP_OD_EDIT_SCLK_VDDC_TABLE;
748         else if (*buf == 'p')
749                 type = PP_OD_EDIT_CCLK_VDDC_TABLE;
750         else if (*buf == 'm')
751                 type = PP_OD_EDIT_MCLK_VDDC_TABLE;
752         else if (*buf == 'r')
753                 type = PP_OD_RESTORE_DEFAULT_TABLE;
754         else if (*buf == 'c')
755                 type = PP_OD_COMMIT_DPM_TABLE;
756         else if (!strncmp(buf, "vc", 2))
757                 type = PP_OD_EDIT_VDDC_CURVE;
758         else if (!strncmp(buf, "vo", 2))
759                 type = PP_OD_EDIT_VDDGFX_OFFSET;
760         else
761                 return -EINVAL;
762
763         memcpy(buf_cpy, buf, count);
764         buf_cpy[count] = 0;
765
766         tmp_str = buf_cpy;
767
768         if ((type == PP_OD_EDIT_VDDC_CURVE) ||
769              (type == PP_OD_EDIT_VDDGFX_OFFSET))
770                 tmp_str++;
771         while (isspace(*++tmp_str));
772
773         while ((sub_str = strsep(&tmp_str, delimiter)) != NULL) {
774                 if (strlen(sub_str) == 0)
775                         continue;
776                 ret = kstrtol(sub_str, 0, &parameter[parameter_size]);
777                 if (ret)
778                         return -EINVAL;
779                 parameter_size++;
780
781                 if (!tmp_str)
782                         break;
783
784                 while (isspace(*tmp_str))
785                         tmp_str++;
786         }
787
788         ret = pm_runtime_resume_and_get(ddev->dev);
789         if (ret < 0)
790                 return ret;
791
792         if (amdgpu_dpm_set_fine_grain_clk_vol(adev,
793                                               type,
794                                               parameter,
795                                               parameter_size))
796                 goto err_out;
797
798         if (amdgpu_dpm_odn_edit_dpm_table(adev, type,
799                                           parameter, parameter_size))
800                 goto err_out;
801
802         if (type == PP_OD_COMMIT_DPM_TABLE) {
803                 if (amdgpu_dpm_dispatch_task(adev,
804                                              AMD_PP_TASK_READJUST_POWER_STATE,
805                                              NULL))
806                         goto err_out;
807         }
808
809         pm_runtime_mark_last_busy(ddev->dev);
810         pm_runtime_put_autosuspend(ddev->dev);
811
812         return count;
813
814 err_out:
815         pm_runtime_mark_last_busy(ddev->dev);
816         pm_runtime_put_autosuspend(ddev->dev);
817         return -EINVAL;
818 }
819
820 static ssize_t amdgpu_get_pp_od_clk_voltage(struct device *dev,
821                 struct device_attribute *attr,
822                 char *buf)
823 {
824         struct drm_device *ddev = dev_get_drvdata(dev);
825         struct amdgpu_device *adev = drm_to_adev(ddev);
826         int size = 0;
827         int ret;
828         enum pp_clock_type od_clocks[6] = {
829                 OD_SCLK,
830                 OD_MCLK,
831                 OD_VDDC_CURVE,
832                 OD_RANGE,
833                 OD_VDDGFX_OFFSET,
834                 OD_CCLK,
835         };
836         uint clk_index;
837
838         if (amdgpu_in_reset(adev))
839                 return -EPERM;
840         if (adev->in_suspend && !adev->in_runpm)
841                 return -EPERM;
842
843         ret = pm_runtime_get_if_active(ddev->dev);
844         if (ret <= 0)
845                 return ret ?: -EPERM;
846
847         for (clk_index = 0 ; clk_index < 6 ; clk_index++) {
848                 ret = amdgpu_dpm_emit_clock_levels(adev, od_clocks[clk_index], buf, &size);
849                 if (ret)
850                         break;
851         }
852         if (ret == -ENOENT) {
853                 size = amdgpu_dpm_print_clock_levels(adev, OD_SCLK, buf);
854                 size += amdgpu_dpm_print_clock_levels(adev, OD_MCLK, buf + size);
855                 size += amdgpu_dpm_print_clock_levels(adev, OD_VDDC_CURVE, buf + size);
856                 size += amdgpu_dpm_print_clock_levels(adev, OD_VDDGFX_OFFSET, buf + size);
857                 size += amdgpu_dpm_print_clock_levels(adev, OD_RANGE, buf + size);
858                 size += amdgpu_dpm_print_clock_levels(adev, OD_CCLK, buf + size);
859         }
860
861         if (size == 0)
862                 size = sysfs_emit(buf, "\n");
863
864         pm_runtime_put_autosuspend(ddev->dev);
865
866         return size;
867 }
868
869 /**
870  * DOC: pp_features
871  *
872  * The amdgpu driver provides a sysfs API for adjusting what powerplay
873  * features to be enabled. The file pp_features is used for this. And
874  * this is only available for Vega10 and later dGPUs.
875  *
876  * Reading back the file will show you the followings:
877  * - Current ppfeature masks
878  * - List of the all supported powerplay features with their naming,
879  *   bitmasks and enablement status('Y'/'N' means "enabled"/"disabled").
880  *
881  * To manually enable or disable a specific feature, just set or clear
882  * the corresponding bit from original ppfeature masks and input the
883  * new ppfeature masks.
884  */
885 static ssize_t amdgpu_set_pp_features(struct device *dev,
886                                       struct device_attribute *attr,
887                                       const char *buf,
888                                       size_t count)
889 {
890         struct drm_device *ddev = dev_get_drvdata(dev);
891         struct amdgpu_device *adev = drm_to_adev(ddev);
892         uint64_t featuremask;
893         int ret;
894
895         if (amdgpu_in_reset(adev))
896                 return -EPERM;
897         if (adev->in_suspend && !adev->in_runpm)
898                 return -EPERM;
899
900         ret = kstrtou64(buf, 0, &featuremask);
901         if (ret)
902                 return -EINVAL;
903
904         ret = pm_runtime_resume_and_get(ddev->dev);
905         if (ret < 0)
906                 return ret;
907
908         ret = amdgpu_dpm_set_ppfeature_status(adev, featuremask);
909
910         pm_runtime_mark_last_busy(ddev->dev);
911         pm_runtime_put_autosuspend(ddev->dev);
912
913         if (ret)
914                 return -EINVAL;
915
916         return count;
917 }
918
919 static ssize_t amdgpu_get_pp_features(struct device *dev,
920                                       struct device_attribute *attr,
921                                       char *buf)
922 {
923         struct drm_device *ddev = dev_get_drvdata(dev);
924         struct amdgpu_device *adev = drm_to_adev(ddev);
925         ssize_t size;
926         int ret;
927
928         if (amdgpu_in_reset(adev))
929                 return -EPERM;
930         if (adev->in_suspend && !adev->in_runpm)
931                 return -EPERM;
932
933         ret = pm_runtime_get_if_active(ddev->dev);
934         if (ret <= 0)
935                 return ret ?: -EPERM;
936
937         size = amdgpu_dpm_get_ppfeature_status(adev, buf);
938         if (size <= 0)
939                 size = sysfs_emit(buf, "\n");
940
941         pm_runtime_put_autosuspend(ddev->dev);
942
943         return size;
944 }
945
946 /**
947  * DOC: pp_dpm_sclk pp_dpm_mclk pp_dpm_socclk pp_dpm_fclk pp_dpm_dcefclk pp_dpm_pcie
948  *
949  * The amdgpu driver provides a sysfs API for adjusting what power levels
950  * are enabled for a given power state.  The files pp_dpm_sclk, pp_dpm_mclk,
951  * pp_dpm_socclk, pp_dpm_fclk, pp_dpm_dcefclk and pp_dpm_pcie are used for
952  * this.
953  *
954  * pp_dpm_socclk and pp_dpm_dcefclk interfaces are only available for
955  * Vega10 and later ASICs.
956  * pp_dpm_fclk interface is only available for Vega20 and later ASICs.
957  *
958  * Reading back the files will show you the available power levels within
959  * the power state and the clock information for those levels. If deep sleep is
960  * applied to a clock, the level will be denoted by a special level 'S:'
961  * E.g., ::
962  *
963  *  S: 19Mhz *
964  *  0: 615Mhz
965  *  1: 800Mhz
966  *  2: 888Mhz
967  *  3: 1000Mhz
968  *
969  *
970  * To manually adjust these states, first select manual using
971  * power_dpm_force_performance_level.
972  * Secondly, enter a new value for each level by inputing a string that
973  * contains " echo xx xx xx > pp_dpm_sclk/mclk/pcie"
974  * E.g.,
975  *
976  * .. code-block:: bash
977  *
978  *      echo "4 5 6" > pp_dpm_sclk
979  *
980  * will enable sclk levels 4, 5, and 6.
981  *
982  * NOTE: change to the dcefclk max dpm level is not supported now
983  */
984
985 static ssize_t amdgpu_get_pp_dpm_clock(struct device *dev,
986                 enum pp_clock_type type,
987                 char *buf)
988 {
989         struct drm_device *ddev = dev_get_drvdata(dev);
990         struct amdgpu_device *adev = drm_to_adev(ddev);
991         int size = 0;
992         int ret = 0;
993
994         if (amdgpu_in_reset(adev))
995                 return -EPERM;
996         if (adev->in_suspend && !adev->in_runpm)
997                 return -EPERM;
998
999         ret = pm_runtime_get_if_active(ddev->dev);
1000         if (ret <= 0)
1001                 return ret ?: -EPERM;
1002
1003         ret = amdgpu_dpm_emit_clock_levels(adev, type, buf, &size);
1004         if (ret == -ENOENT)
1005                 size = amdgpu_dpm_print_clock_levels(adev, type, buf);
1006
1007         if (size == 0)
1008                 size = sysfs_emit(buf, "\n");
1009
1010         pm_runtime_put_autosuspend(ddev->dev);
1011
1012         return size;
1013 }
1014
1015 /*
1016  * Worst case: 32 bits individually specified, in octal at 12 characters
1017  * per line (+1 for \n).
1018  */
1019 #define AMDGPU_MASK_BUF_MAX     (32 * 13)
1020
1021 static ssize_t amdgpu_read_mask(const char *buf, size_t count, uint32_t *mask)
1022 {
1023         int ret;
1024         unsigned long level;
1025         char *sub_str = NULL;
1026         char *tmp;
1027         char buf_cpy[AMDGPU_MASK_BUF_MAX + 1];
1028         const char delimiter[3] = {' ', '\n', '\0'};
1029         size_t bytes;
1030
1031         *mask = 0;
1032
1033         bytes = min(count, sizeof(buf_cpy) - 1);
1034         memcpy(buf_cpy, buf, bytes);
1035         buf_cpy[bytes] = '\0';
1036         tmp = buf_cpy;
1037         while ((sub_str = strsep(&tmp, delimiter)) != NULL) {
1038                 if (strlen(sub_str)) {
1039                         ret = kstrtoul(sub_str, 0, &level);
1040                         if (ret || level > 31)
1041                                 return -EINVAL;
1042                         *mask |= 1 << level;
1043                 } else
1044                         break;
1045         }
1046
1047         return 0;
1048 }
1049
1050 static ssize_t amdgpu_set_pp_dpm_clock(struct device *dev,
1051                 enum pp_clock_type type,
1052                 const char *buf,
1053                 size_t count)
1054 {
1055         struct drm_device *ddev = dev_get_drvdata(dev);
1056         struct amdgpu_device *adev = drm_to_adev(ddev);
1057         int ret;
1058         uint32_t mask = 0;
1059
1060         if (amdgpu_in_reset(adev))
1061                 return -EPERM;
1062         if (adev->in_suspend && !adev->in_runpm)
1063                 return -EPERM;
1064
1065         ret = amdgpu_read_mask(buf, count, &mask);
1066         if (ret)
1067                 return ret;
1068
1069         ret = pm_runtime_resume_and_get(ddev->dev);
1070         if (ret < 0)
1071                 return ret;
1072
1073         ret = amdgpu_dpm_force_clock_level(adev, type, mask);
1074
1075         pm_runtime_mark_last_busy(ddev->dev);
1076         pm_runtime_put_autosuspend(ddev->dev);
1077
1078         if (ret)
1079                 return -EINVAL;
1080
1081         return count;
1082 }
1083
1084 static ssize_t amdgpu_get_pp_dpm_sclk(struct device *dev,
1085                 struct device_attribute *attr,
1086                 char *buf)
1087 {
1088         return amdgpu_get_pp_dpm_clock(dev, PP_SCLK, buf);
1089 }
1090
1091 static ssize_t amdgpu_set_pp_dpm_sclk(struct device *dev,
1092                 struct device_attribute *attr,
1093                 const char *buf,
1094                 size_t count)
1095 {
1096         return amdgpu_set_pp_dpm_clock(dev, PP_SCLK, buf, count);
1097 }
1098
1099 static ssize_t amdgpu_get_pp_dpm_mclk(struct device *dev,
1100                 struct device_attribute *attr,
1101                 char *buf)
1102 {
1103         return amdgpu_get_pp_dpm_clock(dev, PP_MCLK, buf);
1104 }
1105
1106 static ssize_t amdgpu_set_pp_dpm_mclk(struct device *dev,
1107                 struct device_attribute *attr,
1108                 const char *buf,
1109                 size_t count)
1110 {
1111         return amdgpu_set_pp_dpm_clock(dev, PP_MCLK, buf, count);
1112 }
1113
1114 static ssize_t amdgpu_get_pp_dpm_socclk(struct device *dev,
1115                 struct device_attribute *attr,
1116                 char *buf)
1117 {
1118         return amdgpu_get_pp_dpm_clock(dev, PP_SOCCLK, buf);
1119 }
1120
1121 static ssize_t amdgpu_set_pp_dpm_socclk(struct device *dev,
1122                 struct device_attribute *attr,
1123                 const char *buf,
1124                 size_t count)
1125 {
1126         return amdgpu_set_pp_dpm_clock(dev, PP_SOCCLK, buf, count);
1127 }
1128
1129 static ssize_t amdgpu_get_pp_dpm_fclk(struct device *dev,
1130                 struct device_attribute *attr,
1131                 char *buf)
1132 {
1133         return amdgpu_get_pp_dpm_clock(dev, PP_FCLK, buf);
1134 }
1135
1136 static ssize_t amdgpu_set_pp_dpm_fclk(struct device *dev,
1137                 struct device_attribute *attr,
1138                 const char *buf,
1139                 size_t count)
1140 {
1141         return amdgpu_set_pp_dpm_clock(dev, PP_FCLK, buf, count);
1142 }
1143
1144 static ssize_t amdgpu_get_pp_dpm_vclk(struct device *dev,
1145                 struct device_attribute *attr,
1146                 char *buf)
1147 {
1148         return amdgpu_get_pp_dpm_clock(dev, PP_VCLK, buf);
1149 }
1150
1151 static ssize_t amdgpu_set_pp_dpm_vclk(struct device *dev,
1152                 struct device_attribute *attr,
1153                 const char *buf,
1154                 size_t count)
1155 {
1156         return amdgpu_set_pp_dpm_clock(dev, PP_VCLK, buf, count);
1157 }
1158
1159 static ssize_t amdgpu_get_pp_dpm_vclk1(struct device *dev,
1160                 struct device_attribute *attr,
1161                 char *buf)
1162 {
1163         return amdgpu_get_pp_dpm_clock(dev, PP_VCLK1, buf);
1164 }
1165
1166 static ssize_t amdgpu_set_pp_dpm_vclk1(struct device *dev,
1167                 struct device_attribute *attr,
1168                 const char *buf,
1169                 size_t count)
1170 {
1171         return amdgpu_set_pp_dpm_clock(dev, PP_VCLK1, buf, count);
1172 }
1173
1174 static ssize_t amdgpu_get_pp_dpm_dclk(struct device *dev,
1175                 struct device_attribute *attr,
1176                 char *buf)
1177 {
1178         return amdgpu_get_pp_dpm_clock(dev, PP_DCLK, buf);
1179 }
1180
1181 static ssize_t amdgpu_set_pp_dpm_dclk(struct device *dev,
1182                 struct device_attribute *attr,
1183                 const char *buf,
1184                 size_t count)
1185 {
1186         return amdgpu_set_pp_dpm_clock(dev, PP_DCLK, buf, count);
1187 }
1188
1189 static ssize_t amdgpu_get_pp_dpm_dclk1(struct device *dev,
1190                 struct device_attribute *attr,
1191                 char *buf)
1192 {
1193         return amdgpu_get_pp_dpm_clock(dev, PP_DCLK1, buf);
1194 }
1195
1196 static ssize_t amdgpu_set_pp_dpm_dclk1(struct device *dev,
1197                 struct device_attribute *attr,
1198                 const char *buf,
1199                 size_t count)
1200 {
1201         return amdgpu_set_pp_dpm_clock(dev, PP_DCLK1, buf, count);
1202 }
1203
1204 static ssize_t amdgpu_get_pp_dpm_dcefclk(struct device *dev,
1205                 struct device_attribute *attr,
1206                 char *buf)
1207 {
1208         return amdgpu_get_pp_dpm_clock(dev, PP_DCEFCLK, buf);
1209 }
1210
1211 static ssize_t amdgpu_set_pp_dpm_dcefclk(struct device *dev,
1212                 struct device_attribute *attr,
1213                 const char *buf,
1214                 size_t count)
1215 {
1216         return amdgpu_set_pp_dpm_clock(dev, PP_DCEFCLK, buf, count);
1217 }
1218
1219 static ssize_t amdgpu_get_pp_dpm_pcie(struct device *dev,
1220                 struct device_attribute *attr,
1221                 char *buf)
1222 {
1223         return amdgpu_get_pp_dpm_clock(dev, PP_PCIE, buf);
1224 }
1225
1226 static ssize_t amdgpu_set_pp_dpm_pcie(struct device *dev,
1227                 struct device_attribute *attr,
1228                 const char *buf,
1229                 size_t count)
1230 {
1231         return amdgpu_set_pp_dpm_clock(dev, PP_PCIE, buf, count);
1232 }
1233
1234 static ssize_t amdgpu_get_pp_sclk_od(struct device *dev,
1235                 struct device_attribute *attr,
1236                 char *buf)
1237 {
1238         struct drm_device *ddev = dev_get_drvdata(dev);
1239         struct amdgpu_device *adev = drm_to_adev(ddev);
1240         uint32_t value = 0;
1241         int ret;
1242
1243         if (amdgpu_in_reset(adev))
1244                 return -EPERM;
1245         if (adev->in_suspend && !adev->in_runpm)
1246                 return -EPERM;
1247
1248         ret = pm_runtime_get_if_active(ddev->dev);
1249         if (ret <= 0)
1250                 return ret ?: -EPERM;
1251
1252         value = amdgpu_dpm_get_sclk_od(adev);
1253
1254         pm_runtime_put_autosuspend(ddev->dev);
1255
1256         return sysfs_emit(buf, "%d\n", value);
1257 }
1258
1259 static ssize_t amdgpu_set_pp_sclk_od(struct device *dev,
1260                 struct device_attribute *attr,
1261                 const char *buf,
1262                 size_t count)
1263 {
1264         struct drm_device *ddev = dev_get_drvdata(dev);
1265         struct amdgpu_device *adev = drm_to_adev(ddev);
1266         int ret;
1267         long int value;
1268
1269         if (amdgpu_in_reset(adev))
1270                 return -EPERM;
1271         if (adev->in_suspend && !adev->in_runpm)
1272                 return -EPERM;
1273
1274         ret = kstrtol(buf, 0, &value);
1275
1276         if (ret)
1277                 return -EINVAL;
1278
1279         ret = pm_runtime_resume_and_get(ddev->dev);
1280         if (ret < 0)
1281                 return ret;
1282
1283         amdgpu_dpm_set_sclk_od(adev, (uint32_t)value);
1284
1285         pm_runtime_mark_last_busy(ddev->dev);
1286         pm_runtime_put_autosuspend(ddev->dev);
1287
1288         return count;
1289 }
1290
1291 static ssize_t amdgpu_get_pp_mclk_od(struct device *dev,
1292                 struct device_attribute *attr,
1293                 char *buf)
1294 {
1295         struct drm_device *ddev = dev_get_drvdata(dev);
1296         struct amdgpu_device *adev = drm_to_adev(ddev);
1297         uint32_t value = 0;
1298         int ret;
1299
1300         if (amdgpu_in_reset(adev))
1301                 return -EPERM;
1302         if (adev->in_suspend && !adev->in_runpm)
1303                 return -EPERM;
1304
1305         ret = pm_runtime_get_if_active(ddev->dev);
1306         if (ret <= 0)
1307                 return ret ?: -EPERM;
1308
1309         value = amdgpu_dpm_get_mclk_od(adev);
1310
1311         pm_runtime_put_autosuspend(ddev->dev);
1312
1313         return sysfs_emit(buf, "%d\n", value);
1314 }
1315
1316 static ssize_t amdgpu_set_pp_mclk_od(struct device *dev,
1317                 struct device_attribute *attr,
1318                 const char *buf,
1319                 size_t count)
1320 {
1321         struct drm_device *ddev = dev_get_drvdata(dev);
1322         struct amdgpu_device *adev = drm_to_adev(ddev);
1323         int ret;
1324         long int value;
1325
1326         if (amdgpu_in_reset(adev))
1327                 return -EPERM;
1328         if (adev->in_suspend && !adev->in_runpm)
1329                 return -EPERM;
1330
1331         ret = kstrtol(buf, 0, &value);
1332
1333         if (ret)
1334                 return -EINVAL;
1335
1336         ret = pm_runtime_resume_and_get(ddev->dev);
1337         if (ret < 0)
1338                 return ret;
1339
1340         amdgpu_dpm_set_mclk_od(adev, (uint32_t)value);
1341
1342         pm_runtime_mark_last_busy(ddev->dev);
1343         pm_runtime_put_autosuspend(ddev->dev);
1344
1345         return count;
1346 }
1347
1348 /**
1349  * DOC: pp_power_profile_mode
1350  *
1351  * The amdgpu driver provides a sysfs API for adjusting the heuristics
1352  * related to switching between power levels in a power state.  The file
1353  * pp_power_profile_mode is used for this.
1354  *
1355  * Reading this file outputs a list of all of the predefined power profiles
1356  * and the relevant heuristics settings for that profile.
1357  *
1358  * To select a profile or create a custom profile, first select manual using
1359  * power_dpm_force_performance_level.  Writing the number of a predefined
1360  * profile to pp_power_profile_mode will enable those heuristics.  To
1361  * create a custom set of heuristics, write a string of numbers to the file
1362  * starting with the number of the custom profile along with a setting
1363  * for each heuristic parameter.  Due to differences across asic families
1364  * the heuristic parameters vary from family to family. Additionally,
1365  * you can apply the custom heuristics to different clock domains.  Each
1366  * clock domain is considered a distinct operation so if you modify the
1367  * gfxclk heuristics and then the memclk heuristics, the all of the
1368  * custom heuristics will be retained until you switch to another profile.
1369  *
1370  */
1371
1372 static ssize_t amdgpu_get_pp_power_profile_mode(struct device *dev,
1373                 struct device_attribute *attr,
1374                 char *buf)
1375 {
1376         struct drm_device *ddev = dev_get_drvdata(dev);
1377         struct amdgpu_device *adev = drm_to_adev(ddev);
1378         ssize_t size;
1379         int ret;
1380
1381         if (amdgpu_in_reset(adev))
1382                 return -EPERM;
1383         if (adev->in_suspend && !adev->in_runpm)
1384                 return -EPERM;
1385
1386         ret = pm_runtime_get_if_active(ddev->dev);
1387         if (ret <= 0)
1388                 return ret ?: -EPERM;
1389
1390         size = amdgpu_dpm_get_power_profile_mode(adev, buf);
1391         if (size <= 0)
1392                 size = sysfs_emit(buf, "\n");
1393
1394         pm_runtime_put_autosuspend(ddev->dev);
1395
1396         return size;
1397 }
1398
1399
1400 static ssize_t amdgpu_set_pp_power_profile_mode(struct device *dev,
1401                 struct device_attribute *attr,
1402                 const char *buf,
1403                 size_t count)
1404 {
1405         int ret;
1406         struct drm_device *ddev = dev_get_drvdata(dev);
1407         struct amdgpu_device *adev = drm_to_adev(ddev);
1408         uint32_t parameter_size = 0;
1409         long parameter[64];
1410         char *sub_str, buf_cpy[128];
1411         char *tmp_str;
1412         uint32_t i = 0;
1413         char tmp[2];
1414         long int profile_mode = 0;
1415         const char delimiter[3] = {' ', '\n', '\0'};
1416
1417         if (amdgpu_in_reset(adev))
1418                 return -EPERM;
1419         if (adev->in_suspend && !adev->in_runpm)
1420                 return -EPERM;
1421
1422         tmp[0] = *(buf);
1423         tmp[1] = '\0';
1424         ret = kstrtol(tmp, 0, &profile_mode);
1425         if (ret)
1426                 return -EINVAL;
1427
1428         if (profile_mode == PP_SMC_POWER_PROFILE_CUSTOM) {
1429                 if (count < 2 || count > 127)
1430                         return -EINVAL;
1431                 while (isspace(*++buf))
1432                         i++;
1433                 memcpy(buf_cpy, buf, count-i);
1434                 tmp_str = buf_cpy;
1435                 while ((sub_str = strsep(&tmp_str, delimiter)) != NULL) {
1436                         if (strlen(sub_str) == 0)
1437                                 continue;
1438                         ret = kstrtol(sub_str, 0, &parameter[parameter_size]);
1439                         if (ret)
1440                                 return -EINVAL;
1441                         parameter_size++;
1442                         while (isspace(*tmp_str))
1443                                 tmp_str++;
1444                 }
1445         }
1446         parameter[parameter_size] = profile_mode;
1447
1448         ret = pm_runtime_resume_and_get(ddev->dev);
1449         if (ret < 0)
1450                 return ret;
1451
1452         ret = amdgpu_dpm_set_power_profile_mode(adev, parameter, parameter_size);
1453
1454         pm_runtime_mark_last_busy(ddev->dev);
1455         pm_runtime_put_autosuspend(ddev->dev);
1456
1457         if (!ret)
1458                 return count;
1459
1460         return -EINVAL;
1461 }
1462
1463 static int amdgpu_hwmon_get_sensor_generic(struct amdgpu_device *adev,
1464                                            enum amd_pp_sensors sensor,
1465                                            void *query)
1466 {
1467         int r, size = sizeof(uint32_t);
1468
1469         if (amdgpu_in_reset(adev))
1470                 return -EPERM;
1471         if (adev->in_suspend && !adev->in_runpm)
1472                 return -EPERM;
1473
1474         r = pm_runtime_get_if_active(adev->dev);
1475         if (r <= 0)
1476                 return r ?: -EPERM;
1477
1478         /* get the sensor value */
1479         r = amdgpu_dpm_read_sensor(adev, sensor, query, &size);
1480
1481         pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
1482
1483         return r;
1484 }
1485
1486 /**
1487  * DOC: gpu_busy_percent
1488  *
1489  * The amdgpu driver provides a sysfs API for reading how busy the GPU
1490  * is as a percentage.  The file gpu_busy_percent is used for this.
1491  * The SMU firmware computes a percentage of load based on the
1492  * aggregate activity level in the IP cores.
1493  */
1494 static ssize_t amdgpu_get_gpu_busy_percent(struct device *dev,
1495                                            struct device_attribute *attr,
1496                                            char *buf)
1497 {
1498         struct drm_device *ddev = dev_get_drvdata(dev);
1499         struct amdgpu_device *adev = drm_to_adev(ddev);
1500         unsigned int value;
1501         int r;
1502
1503         r = amdgpu_hwmon_get_sensor_generic(adev, AMDGPU_PP_SENSOR_GPU_LOAD, &value);
1504         if (r)
1505                 return r;
1506
1507         return sysfs_emit(buf, "%d\n", value);
1508 }
1509
1510 /**
1511  * DOC: mem_busy_percent
1512  *
1513  * The amdgpu driver provides a sysfs API for reading how busy the VRAM
1514  * is as a percentage.  The file mem_busy_percent is used for this.
1515  * The SMU firmware computes a percentage of load based on the
1516  * aggregate activity level in the IP cores.
1517  */
1518 static ssize_t amdgpu_get_mem_busy_percent(struct device *dev,
1519                                            struct device_attribute *attr,
1520                                            char *buf)
1521 {
1522         struct drm_device *ddev = dev_get_drvdata(dev);
1523         struct amdgpu_device *adev = drm_to_adev(ddev);
1524         unsigned int value;
1525         int r;
1526
1527         r = amdgpu_hwmon_get_sensor_generic(adev, AMDGPU_PP_SENSOR_MEM_LOAD, &value);
1528         if (r)
1529                 return r;
1530
1531         return sysfs_emit(buf, "%d\n", value);
1532 }
1533
1534 /**
1535  * DOC: vcn_busy_percent
1536  *
1537  * The amdgpu driver provides a sysfs API for reading how busy the VCN
1538  * is as a percentage.  The file vcn_busy_percent is used for this.
1539  * The SMU firmware computes a percentage of load based on the
1540  * aggregate activity level in the IP cores.
1541  */
1542 static ssize_t amdgpu_get_vcn_busy_percent(struct device *dev,
1543                                                   struct device_attribute *attr,
1544                                                   char *buf)
1545 {
1546         struct drm_device *ddev = dev_get_drvdata(dev);
1547         struct amdgpu_device *adev = drm_to_adev(ddev);
1548         unsigned int value;
1549         int r;
1550
1551         r = amdgpu_hwmon_get_sensor_generic(adev, AMDGPU_PP_SENSOR_VCN_LOAD, &value);
1552         if (r)
1553                 return r;
1554
1555         return sysfs_emit(buf, "%d\n", value);
1556 }
1557
1558 /**
1559  * DOC: pcie_bw
1560  *
1561  * The amdgpu driver provides a sysfs API for estimating how much data
1562  * has been received and sent by the GPU in the last second through PCIe.
1563  * The file pcie_bw is used for this.
1564  * The Perf counters count the number of received and sent messages and return
1565  * those values, as well as the maximum payload size of a PCIe packet (mps).
1566  * Note that it is not possible to easily and quickly obtain the size of each
1567  * packet transmitted, so we output the max payload size (mps) to allow for
1568  * quick estimation of the PCIe bandwidth usage
1569  */
1570 static ssize_t amdgpu_get_pcie_bw(struct device *dev,
1571                 struct device_attribute *attr,
1572                 char *buf)
1573 {
1574         struct drm_device *ddev = dev_get_drvdata(dev);
1575         struct amdgpu_device *adev = drm_to_adev(ddev);
1576         uint64_t count0 = 0, count1 = 0;
1577         int ret;
1578
1579         if (amdgpu_in_reset(adev))
1580                 return -EPERM;
1581         if (adev->in_suspend && !adev->in_runpm)
1582                 return -EPERM;
1583
1584         if (adev->flags & AMD_IS_APU)
1585                 return -ENODATA;
1586
1587         if (!adev->asic_funcs->get_pcie_usage)
1588                 return -ENODATA;
1589
1590         ret = pm_runtime_get_if_active(ddev->dev);
1591         if (ret <= 0)
1592                 return ret ?: -EPERM;
1593
1594         amdgpu_asic_get_pcie_usage(adev, &count0, &count1);
1595
1596         pm_runtime_put_autosuspend(ddev->dev);
1597
1598         return sysfs_emit(buf, "%llu %llu %i\n",
1599                           count0, count1, pcie_get_mps(adev->pdev));
1600 }
1601
1602 /**
1603  * DOC: unique_id
1604  *
1605  * The amdgpu driver provides a sysfs API for providing a unique ID for the GPU
1606  * The file unique_id is used for this.
1607  * This will provide a Unique ID that will persist from machine to machine
1608  *
1609  * NOTE: This will only work for GFX9 and newer. This file will be absent
1610  * on unsupported ASICs (GFX8 and older)
1611  */
1612 static ssize_t amdgpu_get_unique_id(struct device *dev,
1613                 struct device_attribute *attr,
1614                 char *buf)
1615 {
1616         struct drm_device *ddev = dev_get_drvdata(dev);
1617         struct amdgpu_device *adev = drm_to_adev(ddev);
1618
1619         if (amdgpu_in_reset(adev))
1620                 return -EPERM;
1621         if (adev->in_suspend && !adev->in_runpm)
1622                 return -EPERM;
1623
1624         if (adev->unique_id)
1625                 return sysfs_emit(buf, "%016llx\n", adev->unique_id);
1626
1627         return 0;
1628 }
1629
1630 /**
1631  * DOC: thermal_throttling_logging
1632  *
1633  * Thermal throttling pulls down the clock frequency and thus the performance.
1634  * It's an useful mechanism to protect the chip from overheating. Since it
1635  * impacts performance, the user controls whether it is enabled and if so,
1636  * the log frequency.
1637  *
1638  * Reading back the file shows you the status(enabled or disabled) and
1639  * the interval(in seconds) between each thermal logging.
1640  *
1641  * Writing an integer to the file, sets a new logging interval, in seconds.
1642  * The value should be between 1 and 3600. If the value is less than 1,
1643  * thermal logging is disabled. Values greater than 3600 are ignored.
1644  */
1645 static ssize_t amdgpu_get_thermal_throttling_logging(struct device *dev,
1646                                                      struct device_attribute *attr,
1647                                                      char *buf)
1648 {
1649         struct drm_device *ddev = dev_get_drvdata(dev);
1650         struct amdgpu_device *adev = drm_to_adev(ddev);
1651
1652         return sysfs_emit(buf, "%s: thermal throttling logging %s, with interval %d seconds\n",
1653                           adev_to_drm(adev)->unique,
1654                           atomic_read(&adev->throttling_logging_enabled) ? "enabled" : "disabled",
1655                           adev->throttling_logging_rs.interval / HZ + 1);
1656 }
1657
1658 static ssize_t amdgpu_set_thermal_throttling_logging(struct device *dev,
1659                                                      struct device_attribute *attr,
1660                                                      const char *buf,
1661                                                      size_t count)
1662 {
1663         struct drm_device *ddev = dev_get_drvdata(dev);
1664         struct amdgpu_device *adev = drm_to_adev(ddev);
1665         long throttling_logging_interval;
1666         unsigned long flags;
1667         int ret = 0;
1668
1669         ret = kstrtol(buf, 0, &throttling_logging_interval);
1670         if (ret)
1671                 return ret;
1672
1673         if (throttling_logging_interval > 3600)
1674                 return -EINVAL;
1675
1676         if (throttling_logging_interval > 0) {
1677                 raw_spin_lock_irqsave(&adev->throttling_logging_rs.lock, flags);
1678                 /*
1679                  * Reset the ratelimit timer internals.
1680                  * This can effectively restart the timer.
1681                  */
1682                 adev->throttling_logging_rs.interval =
1683                         (throttling_logging_interval - 1) * HZ;
1684                 adev->throttling_logging_rs.begin = 0;
1685                 adev->throttling_logging_rs.printed = 0;
1686                 adev->throttling_logging_rs.missed = 0;
1687                 raw_spin_unlock_irqrestore(&adev->throttling_logging_rs.lock, flags);
1688
1689                 atomic_set(&adev->throttling_logging_enabled, 1);
1690         } else {
1691                 atomic_set(&adev->throttling_logging_enabled, 0);
1692         }
1693
1694         return count;
1695 }
1696
1697 /**
1698  * DOC: apu_thermal_cap
1699  *
1700  * The amdgpu driver provides a sysfs API for retrieving/updating thermal
1701  * limit temperature in millidegrees Celsius
1702  *
1703  * Reading back the file shows you core limit value
1704  *
1705  * Writing an integer to the file, sets a new thermal limit. The value
1706  * should be between 0 and 100. If the value is less than 0 or greater
1707  * than 100, then the write request will be ignored.
1708  */
1709 static ssize_t amdgpu_get_apu_thermal_cap(struct device *dev,
1710                                          struct device_attribute *attr,
1711                                          char *buf)
1712 {
1713         int ret, size;
1714         u32 limit;
1715         struct drm_device *ddev = dev_get_drvdata(dev);
1716         struct amdgpu_device *adev = drm_to_adev(ddev);
1717
1718         ret = pm_runtime_get_if_active(ddev->dev);
1719         if (ret <= 0)
1720                 return ret ?: -EPERM;
1721
1722         ret = amdgpu_dpm_get_apu_thermal_limit(adev, &limit);
1723         if (!ret)
1724                 size = sysfs_emit(buf, "%u\n", limit);
1725         else
1726                 size = sysfs_emit(buf, "failed to get thermal limit\n");
1727
1728         pm_runtime_put_autosuspend(ddev->dev);
1729
1730         return size;
1731 }
1732
1733 static ssize_t amdgpu_set_apu_thermal_cap(struct device *dev,
1734                                          struct device_attribute *attr,
1735                                          const char *buf,
1736                                          size_t count)
1737 {
1738         int ret;
1739         u32 value;
1740         struct drm_device *ddev = dev_get_drvdata(dev);
1741         struct amdgpu_device *adev = drm_to_adev(ddev);
1742
1743         ret = kstrtou32(buf, 10, &value);
1744         if (ret)
1745                 return ret;
1746
1747         if (value > 100) {
1748                 dev_err(dev, "Invalid argument !\n");
1749                 return -EINVAL;
1750         }
1751
1752         ret = pm_runtime_resume_and_get(ddev->dev);
1753         if (ret < 0)
1754                 return ret;
1755
1756         ret = amdgpu_dpm_set_apu_thermal_limit(adev, value);
1757         if (ret) {
1758                 pm_runtime_mark_last_busy(ddev->dev);
1759                 pm_runtime_put_autosuspend(ddev->dev);
1760                 dev_err(dev, "failed to update thermal limit\n");
1761                 return ret;
1762         }
1763
1764         pm_runtime_mark_last_busy(ddev->dev);
1765         pm_runtime_put_autosuspend(ddev->dev);
1766
1767         return count;
1768 }
1769
1770 static int amdgpu_pm_metrics_attr_update(struct amdgpu_device *adev,
1771                                          struct amdgpu_device_attr *attr,
1772                                          uint32_t mask,
1773                                          enum amdgpu_device_attr_states *states)
1774 {
1775         if (amdgpu_dpm_get_pm_metrics(adev, NULL, 0) == -EOPNOTSUPP)
1776                 *states = ATTR_STATE_UNSUPPORTED;
1777
1778         return 0;
1779 }
1780
1781 static ssize_t amdgpu_get_pm_metrics(struct device *dev,
1782                                      struct device_attribute *attr, char *buf)
1783 {
1784         struct drm_device *ddev = dev_get_drvdata(dev);
1785         struct amdgpu_device *adev = drm_to_adev(ddev);
1786         ssize_t size = 0;
1787         int ret;
1788
1789         if (amdgpu_in_reset(adev))
1790                 return -EPERM;
1791         if (adev->in_suspend && !adev->in_runpm)
1792                 return -EPERM;
1793
1794         ret = pm_runtime_get_if_active(ddev->dev);
1795         if (ret <= 0)
1796                 return ret ?: -EPERM;
1797
1798         size = amdgpu_dpm_get_pm_metrics(adev, buf, PAGE_SIZE);
1799
1800         pm_runtime_put_autosuspend(ddev->dev);
1801
1802         return size;
1803 }
1804
1805 /**
1806  * DOC: gpu_metrics
1807  *
1808  * The amdgpu driver provides a sysfs API for retrieving current gpu
1809  * metrics data. The file gpu_metrics is used for this. Reading the
1810  * file will dump all the current gpu metrics data.
1811  *
1812  * These data include temperature, frequency, engines utilization,
1813  * power consume, throttler status, fan speed and cpu core statistics(
1814  * available for APU only). That's it will give a snapshot of all sensors
1815  * at the same time.
1816  */
1817 static ssize_t amdgpu_get_gpu_metrics(struct device *dev,
1818                                       struct device_attribute *attr,
1819                                       char *buf)
1820 {
1821         struct drm_device *ddev = dev_get_drvdata(dev);
1822         struct amdgpu_device *adev = drm_to_adev(ddev);
1823         void *gpu_metrics;
1824         ssize_t size = 0;
1825         int ret;
1826
1827         if (amdgpu_in_reset(adev))
1828                 return -EPERM;
1829         if (adev->in_suspend && !adev->in_runpm)
1830                 return -EPERM;
1831
1832         ret = pm_runtime_get_if_active(ddev->dev);
1833         if (ret <= 0)
1834                 return ret ?: -EPERM;
1835
1836         size = amdgpu_dpm_get_gpu_metrics(adev, &gpu_metrics);
1837         if (size <= 0)
1838                 goto out;
1839
1840         if (size >= PAGE_SIZE)
1841                 size = PAGE_SIZE - 1;
1842
1843         memcpy(buf, gpu_metrics, size);
1844
1845 out:
1846         pm_runtime_put_autosuspend(ddev->dev);
1847
1848         return size;
1849 }
1850
1851 static int amdgpu_show_powershift_percent(struct device *dev,
1852                                         char *buf, enum amd_pp_sensors sensor)
1853 {
1854         struct drm_device *ddev = dev_get_drvdata(dev);
1855         struct amdgpu_device *adev = drm_to_adev(ddev);
1856         uint32_t ss_power;
1857         int r = 0, i;
1858
1859         r = amdgpu_hwmon_get_sensor_generic(adev, sensor, (void *)&ss_power);
1860         if (r == -EOPNOTSUPP) {
1861                 /* sensor not available on dGPU, try to read from APU */
1862                 adev = NULL;
1863                 mutex_lock(&mgpu_info.mutex);
1864                 for (i = 0; i < mgpu_info.num_gpu; i++) {
1865                         if (mgpu_info.gpu_ins[i].adev->flags & AMD_IS_APU) {
1866                                 adev = mgpu_info.gpu_ins[i].adev;
1867                                 break;
1868                         }
1869                 }
1870                 mutex_unlock(&mgpu_info.mutex);
1871                 if (adev)
1872                         r = amdgpu_hwmon_get_sensor_generic(adev, sensor, (void *)&ss_power);
1873         }
1874
1875         if (r)
1876                 return r;
1877
1878         return sysfs_emit(buf, "%u%%\n", ss_power);
1879 }
1880
1881 /**
1882  * DOC: smartshift_apu_power
1883  *
1884  * The amdgpu driver provides a sysfs API for reporting APU power
1885  * shift in percentage if platform supports smartshift. Value 0 means that
1886  * there is no powershift and values between [1-100] means that the power
1887  * is shifted to APU, the percentage of boost is with respect to APU power
1888  * limit on the platform.
1889  */
1890
1891 static ssize_t amdgpu_get_smartshift_apu_power(struct device *dev, struct device_attribute *attr,
1892                                                char *buf)
1893 {
1894         return amdgpu_show_powershift_percent(dev, buf, AMDGPU_PP_SENSOR_SS_APU_SHARE);
1895 }
1896
1897 /**
1898  * DOC: smartshift_dgpu_power
1899  *
1900  * The amdgpu driver provides a sysfs API for reporting dGPU power
1901  * shift in percentage if platform supports smartshift. Value 0 means that
1902  * there is no powershift and values between [1-100] means that the power is
1903  * shifted to dGPU, the percentage of boost is with respect to dGPU power
1904  * limit on the platform.
1905  */
1906
1907 static ssize_t amdgpu_get_smartshift_dgpu_power(struct device *dev, struct device_attribute *attr,
1908                                                 char *buf)
1909 {
1910         return amdgpu_show_powershift_percent(dev, buf, AMDGPU_PP_SENSOR_SS_DGPU_SHARE);
1911 }
1912
1913 /**
1914  * DOC: smartshift_bias
1915  *
1916  * The amdgpu driver provides a sysfs API for reporting the
1917  * smartshift(SS2.0) bias level. The value ranges from -100 to 100
1918  * and the default is 0. -100 sets maximum preference to APU
1919  * and 100 sets max perference to dGPU.
1920  */
1921
1922 static ssize_t amdgpu_get_smartshift_bias(struct device *dev,
1923                                           struct device_attribute *attr,
1924                                           char *buf)
1925 {
1926         int r = 0;
1927
1928         r = sysfs_emit(buf, "%d\n", amdgpu_smartshift_bias);
1929
1930         return r;
1931 }
1932
1933 static ssize_t amdgpu_set_smartshift_bias(struct device *dev,
1934                                           struct device_attribute *attr,
1935                                           const char *buf, size_t count)
1936 {
1937         struct drm_device *ddev = dev_get_drvdata(dev);
1938         struct amdgpu_device *adev = drm_to_adev(ddev);
1939         int r = 0;
1940         int bias = 0;
1941
1942         if (amdgpu_in_reset(adev))
1943                 return -EPERM;
1944         if (adev->in_suspend && !adev->in_runpm)
1945                 return -EPERM;
1946
1947         r = pm_runtime_resume_and_get(ddev->dev);
1948         if (r < 0)
1949                 return r;
1950
1951         r = kstrtoint(buf, 10, &bias);
1952         if (r)
1953                 goto out;
1954
1955         if (bias > AMDGPU_SMARTSHIFT_MAX_BIAS)
1956                 bias = AMDGPU_SMARTSHIFT_MAX_BIAS;
1957         else if (bias < AMDGPU_SMARTSHIFT_MIN_BIAS)
1958                 bias = AMDGPU_SMARTSHIFT_MIN_BIAS;
1959
1960         amdgpu_smartshift_bias = bias;
1961         r = count;
1962
1963         /* TODO: update bias level with SMU message */
1964
1965 out:
1966         pm_runtime_mark_last_busy(ddev->dev);
1967         pm_runtime_put_autosuspend(ddev->dev);
1968         return r;
1969 }
1970
1971 static int ss_power_attr_update(struct amdgpu_device *adev, struct amdgpu_device_attr *attr,
1972                                 uint32_t mask, enum amdgpu_device_attr_states *states)
1973 {
1974         if (!amdgpu_device_supports_smart_shift(adev_to_drm(adev)))
1975                 *states = ATTR_STATE_UNSUPPORTED;
1976
1977         return 0;
1978 }
1979
1980 static int ss_bias_attr_update(struct amdgpu_device *adev, struct amdgpu_device_attr *attr,
1981                                uint32_t mask, enum amdgpu_device_attr_states *states)
1982 {
1983         uint32_t ss_power;
1984
1985         if (!amdgpu_device_supports_smart_shift(adev_to_drm(adev)))
1986                 *states = ATTR_STATE_UNSUPPORTED;
1987         else if (amdgpu_hwmon_get_sensor_generic(adev, AMDGPU_PP_SENSOR_SS_APU_SHARE,
1988                  (void *)&ss_power))
1989                 *states = ATTR_STATE_UNSUPPORTED;
1990         else if (amdgpu_hwmon_get_sensor_generic(adev, AMDGPU_PP_SENSOR_SS_DGPU_SHARE,
1991                  (void *)&ss_power))
1992                 *states = ATTR_STATE_UNSUPPORTED;
1993
1994         return 0;
1995 }
1996
1997 static int pp_od_clk_voltage_attr_update(struct amdgpu_device *adev, struct amdgpu_device_attr *attr,
1998                                          uint32_t mask, enum amdgpu_device_attr_states *states)
1999 {
2000         uint32_t gc_ver = amdgpu_ip_version(adev, GC_HWIP, 0);
2001
2002         *states = ATTR_STATE_SUPPORTED;
2003
2004         if (!amdgpu_dpm_is_overdrive_supported(adev)) {
2005                 *states = ATTR_STATE_UNSUPPORTED;
2006                 return 0;
2007         }
2008
2009         /* Enable pp_od_clk_voltage node for gc 9.4.3 SRIOV/BM support */
2010         if (gc_ver == IP_VERSION(9, 4, 3) ||
2011             gc_ver == IP_VERSION(9, 4, 4)) {
2012                 if (amdgpu_sriov_vf(adev) && !amdgpu_sriov_is_pp_one_vf(adev))
2013                         *states = ATTR_STATE_UNSUPPORTED;
2014                 return 0;
2015         }
2016
2017         if (!(attr->flags & mask))
2018                 *states = ATTR_STATE_UNSUPPORTED;
2019
2020         return 0;
2021 }
2022
2023 static int pp_dpm_dcefclk_attr_update(struct amdgpu_device *adev, struct amdgpu_device_attr *attr,
2024                                       uint32_t mask, enum amdgpu_device_attr_states *states)
2025 {
2026         struct device_attribute *dev_attr = &attr->dev_attr;
2027         uint32_t gc_ver;
2028
2029         *states = ATTR_STATE_SUPPORTED;
2030
2031         if (!(attr->flags & mask)) {
2032                 *states = ATTR_STATE_UNSUPPORTED;
2033                 return 0;
2034         }
2035
2036         gc_ver = amdgpu_ip_version(adev, GC_HWIP, 0);
2037         /* dcefclk node is not available on gfx 11.0.3 sriov */
2038         if ((gc_ver == IP_VERSION(11, 0, 3) && amdgpu_sriov_is_pp_one_vf(adev)) ||
2039             gc_ver < IP_VERSION(9, 0, 0) ||
2040             !amdgpu_device_has_display_hardware(adev))
2041                 *states = ATTR_STATE_UNSUPPORTED;
2042
2043         /* SMU MP1 does not support dcefclk level setting,
2044          * setting should not be allowed from VF if not in one VF mode.
2045          */
2046         if (gc_ver >= IP_VERSION(10, 0, 0) ||
2047             (amdgpu_sriov_vf(adev) && !amdgpu_sriov_is_pp_one_vf(adev))) {
2048                 dev_attr->attr.mode &= ~S_IWUGO;
2049                 dev_attr->store = NULL;
2050         }
2051
2052         return 0;
2053 }
2054
2055 static int pp_dpm_clk_default_attr_update(struct amdgpu_device *adev, struct amdgpu_device_attr *attr,
2056                                           uint32_t mask, enum amdgpu_device_attr_states *states)
2057 {
2058         struct device_attribute *dev_attr = &attr->dev_attr;
2059         enum amdgpu_device_attr_id attr_id = attr->attr_id;
2060         uint32_t mp1_ver = amdgpu_ip_version(adev, MP1_HWIP, 0);
2061         uint32_t gc_ver = amdgpu_ip_version(adev, GC_HWIP, 0);
2062
2063         *states = ATTR_STATE_SUPPORTED;
2064
2065         if (!(attr->flags & mask)) {
2066                 *states = ATTR_STATE_UNSUPPORTED;
2067                 return 0;
2068         }
2069
2070         if (DEVICE_ATTR_IS(pp_dpm_socclk)) {
2071                 if (gc_ver < IP_VERSION(9, 0, 0))
2072                         *states = ATTR_STATE_UNSUPPORTED;
2073         } else if (DEVICE_ATTR_IS(pp_dpm_fclk)) {
2074                 if (mp1_ver < IP_VERSION(10, 0, 0))
2075                         *states = ATTR_STATE_UNSUPPORTED;
2076         } else if (DEVICE_ATTR_IS(pp_dpm_vclk)) {
2077                 if (!(gc_ver == IP_VERSION(10, 3, 1) ||
2078                       gc_ver == IP_VERSION(10, 3, 3) ||
2079                       gc_ver == IP_VERSION(10, 3, 6) ||
2080                       gc_ver == IP_VERSION(10, 3, 7) ||
2081                       gc_ver == IP_VERSION(10, 3, 0) ||
2082                       gc_ver == IP_VERSION(10, 1, 2) ||
2083                       gc_ver == IP_VERSION(11, 0, 0) ||
2084                       gc_ver == IP_VERSION(11, 0, 1) ||
2085                       gc_ver == IP_VERSION(11, 0, 4) ||
2086                       gc_ver == IP_VERSION(11, 5, 0) ||
2087                       gc_ver == IP_VERSION(11, 0, 2) ||
2088                       gc_ver == IP_VERSION(11, 0, 3) ||
2089                       gc_ver == IP_VERSION(9, 4, 3) ||
2090                       gc_ver == IP_VERSION(9, 4, 4)))
2091                         *states = ATTR_STATE_UNSUPPORTED;
2092         } else if (DEVICE_ATTR_IS(pp_dpm_vclk1)) {
2093                 if (!((gc_ver == IP_VERSION(10, 3, 1) ||
2094                        gc_ver == IP_VERSION(10, 3, 0) ||
2095                        gc_ver == IP_VERSION(11, 0, 2) ||
2096                        gc_ver == IP_VERSION(11, 0, 3)) && adev->vcn.num_vcn_inst >= 2))
2097                         *states = ATTR_STATE_UNSUPPORTED;
2098         } else if (DEVICE_ATTR_IS(pp_dpm_dclk)) {
2099                 if (!(gc_ver == IP_VERSION(10, 3, 1) ||
2100                       gc_ver == IP_VERSION(10, 3, 3) ||
2101                       gc_ver == IP_VERSION(10, 3, 6) ||
2102                       gc_ver == IP_VERSION(10, 3, 7) ||
2103                       gc_ver == IP_VERSION(10, 3, 0) ||
2104                       gc_ver == IP_VERSION(10, 1, 2) ||
2105                       gc_ver == IP_VERSION(11, 0, 0) ||
2106                       gc_ver == IP_VERSION(11, 0, 1) ||
2107                       gc_ver == IP_VERSION(11, 0, 4) ||
2108                       gc_ver == IP_VERSION(11, 5, 0) ||
2109                       gc_ver == IP_VERSION(11, 0, 2) ||
2110                       gc_ver == IP_VERSION(11, 0, 3) ||
2111                       gc_ver == IP_VERSION(9, 4, 3) ||
2112                       gc_ver == IP_VERSION(9, 4, 4)))
2113                         *states = ATTR_STATE_UNSUPPORTED;
2114         } else if (DEVICE_ATTR_IS(pp_dpm_dclk1)) {
2115                 if (!((gc_ver == IP_VERSION(10, 3, 1) ||
2116                        gc_ver == IP_VERSION(10, 3, 0) ||
2117                        gc_ver == IP_VERSION(11, 0, 2) ||
2118                        gc_ver == IP_VERSION(11, 0, 3)) && adev->vcn.num_vcn_inst >= 2))
2119                         *states = ATTR_STATE_UNSUPPORTED;
2120         } else if (DEVICE_ATTR_IS(pp_dpm_pcie)) {
2121                 if (gc_ver == IP_VERSION(9, 4, 2) ||
2122                     gc_ver == IP_VERSION(9, 4, 3) ||
2123                     gc_ver == IP_VERSION(9, 4, 4))
2124                         *states = ATTR_STATE_UNSUPPORTED;
2125         }
2126
2127         switch (gc_ver) {
2128         case IP_VERSION(9, 4, 1):
2129         case IP_VERSION(9, 4, 2):
2130                 /* the Mi series card does not support standalone mclk/socclk/fclk level setting */
2131                 if (DEVICE_ATTR_IS(pp_dpm_mclk) ||
2132                     DEVICE_ATTR_IS(pp_dpm_socclk) ||
2133                     DEVICE_ATTR_IS(pp_dpm_fclk)) {
2134                         dev_attr->attr.mode &= ~S_IWUGO;
2135                         dev_attr->store = NULL;
2136                 }
2137                 break;
2138         default:
2139                 break;
2140         }
2141
2142         /* setting should not be allowed from VF if not in one VF mode */
2143         if (amdgpu_sriov_vf(adev) && amdgpu_sriov_is_pp_one_vf(adev)) {
2144                 dev_attr->attr.mode &= ~S_IWUGO;
2145                 dev_attr->store = NULL;
2146         }
2147
2148         return 0;
2149 }
2150
2151 /* pm policy attributes */
2152 struct amdgpu_pm_policy_attr {
2153         struct device_attribute dev_attr;
2154         enum pp_pm_policy id;
2155 };
2156
2157 /**
2158  * DOC: pm_policy
2159  *
2160  * Certain SOCs can support different power policies to optimize application
2161  * performance. However, this policy is provided only at SOC level and not at a
2162  * per-process level. This is useful especially when entire SOC is utilized for
2163  * dedicated workload.
2164  *
2165  * The amdgpu driver provides a sysfs API for selecting the policy. Presently,
2166  * only two types of policies are supported through this interface.
2167  *
2168  *  Pstate Policy Selection - This is to select different Pstate profiles which
2169  *  decides clock/throttling preferences.
2170  *
2171  *  XGMI PLPD Policy Selection - When multiple devices are connected over XGMI,
2172  *  this helps to select policy to be applied for per link power down.
2173  *
2174  * The list of available policies and policy levels vary between SOCs. They can
2175  * be viewed under pm_policy node directory. If SOC doesn't support any policy,
2176  * this node won't be available. The different policies supported will be
2177  * available as separate nodes under pm_policy.
2178  *
2179  *      cat /sys/bus/pci/devices/.../pm_policy/<policy_type>
2180  *
2181  * Reading the policy file shows the different levels supported. The level which
2182  * is applied presently is denoted by * (asterisk). E.g.,
2183  *
2184  * .. code-block:: console
2185  *
2186  *      cat /sys/bus/pci/devices/.../pm_policy/soc_pstate
2187  *      0 : soc_pstate_default
2188  *      1 : soc_pstate_0
2189  *      2 : soc_pstate_1*
2190  *      3 : soc_pstate_2
2191  *
2192  *      cat /sys/bus/pci/devices/.../pm_policy/xgmi_plpd
2193  *      0 : plpd_disallow
2194  *      1 : plpd_default
2195  *      2 : plpd_optimized*
2196  *
2197  * To apply a specific policy
2198  *
2199  * "echo  <level> > /sys/bus/pci/devices/.../pm_policy/<policy_type>"
2200  *
2201  * For the levels listed in the example above, to select "plpd_optimized" for
2202  * XGMI and "soc_pstate_2" for soc pstate policy -
2203  *
2204  * .. code-block:: console
2205  *
2206  *      echo "2" > /sys/bus/pci/devices/.../pm_policy/xgmi_plpd
2207  *      echo "3" > /sys/bus/pci/devices/.../pm_policy/soc_pstate
2208  *
2209  */
2210 static ssize_t amdgpu_get_pm_policy_attr(struct device *dev,
2211                                          struct device_attribute *attr,
2212                                          char *buf)
2213 {
2214         struct drm_device *ddev = dev_get_drvdata(dev);
2215         struct amdgpu_device *adev = drm_to_adev(ddev);
2216         struct amdgpu_pm_policy_attr *policy_attr;
2217
2218         policy_attr =
2219                 container_of(attr, struct amdgpu_pm_policy_attr, dev_attr);
2220
2221         if (amdgpu_in_reset(adev))
2222                 return -EPERM;
2223         if (adev->in_suspend && !adev->in_runpm)
2224                 return -EPERM;
2225
2226         return amdgpu_dpm_get_pm_policy_info(adev, policy_attr->id, buf);
2227 }
2228
2229 static ssize_t amdgpu_set_pm_policy_attr(struct device *dev,
2230                                          struct device_attribute *attr,
2231                                          const char *buf, size_t count)
2232 {
2233         struct drm_device *ddev = dev_get_drvdata(dev);
2234         struct amdgpu_device *adev = drm_to_adev(ddev);
2235         struct amdgpu_pm_policy_attr *policy_attr;
2236         int ret, num_params = 0;
2237         char delimiter[] = " \n\t";
2238         char tmp_buf[128];
2239         char *tmp, *param;
2240         long val;
2241
2242         if (amdgpu_in_reset(adev))
2243                 return -EPERM;
2244         if (adev->in_suspend && !adev->in_runpm)
2245                 return -EPERM;
2246
2247         count = min(count, sizeof(tmp_buf));
2248         memcpy(tmp_buf, buf, count);
2249         tmp_buf[count - 1] = '\0';
2250         tmp = tmp_buf;
2251
2252         tmp = skip_spaces(tmp);
2253         while ((param = strsep(&tmp, delimiter))) {
2254                 if (!strlen(param)) {
2255                         tmp = skip_spaces(tmp);
2256                         continue;
2257                 }
2258                 ret = kstrtol(param, 0, &val);
2259                 if (ret)
2260                         return -EINVAL;
2261                 num_params++;
2262                 if (num_params > 1)
2263                         return -EINVAL;
2264         }
2265
2266         if (num_params != 1)
2267                 return -EINVAL;
2268
2269         policy_attr =
2270                 container_of(attr, struct amdgpu_pm_policy_attr, dev_attr);
2271
2272         ret = pm_runtime_resume_and_get(ddev->dev);
2273         if (ret < 0)
2274                 return ret;
2275
2276         ret = amdgpu_dpm_set_pm_policy(adev, policy_attr->id, val);
2277
2278         pm_runtime_mark_last_busy(ddev->dev);
2279         pm_runtime_put_autosuspend(ddev->dev);
2280
2281         if (ret)
2282                 return ret;
2283
2284         return count;
2285 }
2286
2287 #define AMDGPU_PM_POLICY_ATTR(_name, _id)                                  \
2288         static struct amdgpu_pm_policy_attr pm_policy_attr_##_name = {     \
2289                 .dev_attr = __ATTR(_name, 0644, amdgpu_get_pm_policy_attr, \
2290                                    amdgpu_set_pm_policy_attr),             \
2291                 .id = PP_PM_POLICY_##_id,                                  \
2292         };
2293
2294 #define AMDGPU_PM_POLICY_ATTR_VAR(_name) pm_policy_attr_##_name.dev_attr.attr
2295
2296 AMDGPU_PM_POLICY_ATTR(soc_pstate, SOC_PSTATE)
2297 AMDGPU_PM_POLICY_ATTR(xgmi_plpd, XGMI_PLPD)
2298
2299 static struct attribute *pm_policy_attrs[] = {
2300         &AMDGPU_PM_POLICY_ATTR_VAR(soc_pstate),
2301         &AMDGPU_PM_POLICY_ATTR_VAR(xgmi_plpd),
2302         NULL
2303 };
2304
2305 static umode_t amdgpu_pm_policy_attr_visible(struct kobject *kobj,
2306                                              struct attribute *attr, int n)
2307 {
2308         struct device *dev = kobj_to_dev(kobj);
2309         struct drm_device *ddev = dev_get_drvdata(dev);
2310         struct amdgpu_device *adev = drm_to_adev(ddev);
2311         struct amdgpu_pm_policy_attr *policy_attr;
2312
2313         policy_attr =
2314                 container_of(attr, struct amdgpu_pm_policy_attr, dev_attr.attr);
2315
2316         if (amdgpu_dpm_get_pm_policy_info(adev, policy_attr->id, NULL) ==
2317             -ENOENT)
2318                 return 0;
2319
2320         return attr->mode;
2321 }
2322
2323 const struct attribute_group amdgpu_pm_policy_attr_group = {
2324         .name = "pm_policy",
2325         .attrs = pm_policy_attrs,
2326         .is_visible = amdgpu_pm_policy_attr_visible,
2327 };
2328
2329 static struct amdgpu_device_attr amdgpu_device_attrs[] = {
2330         AMDGPU_DEVICE_ATTR_RW(power_dpm_state,                          ATTR_FLAG_BASIC|ATTR_FLAG_ONEVF),
2331         AMDGPU_DEVICE_ATTR_RW(power_dpm_force_performance_level,        ATTR_FLAG_BASIC|ATTR_FLAG_ONEVF),
2332         AMDGPU_DEVICE_ATTR_RO(pp_num_states,                            ATTR_FLAG_BASIC|ATTR_FLAG_ONEVF),
2333         AMDGPU_DEVICE_ATTR_RO(pp_cur_state,                             ATTR_FLAG_BASIC|ATTR_FLAG_ONEVF),
2334         AMDGPU_DEVICE_ATTR_RW(pp_force_state,                           ATTR_FLAG_BASIC|ATTR_FLAG_ONEVF),
2335         AMDGPU_DEVICE_ATTR_RW(pp_table,                                 ATTR_FLAG_BASIC|ATTR_FLAG_ONEVF),
2336         AMDGPU_DEVICE_ATTR_RW(pp_dpm_sclk,                              ATTR_FLAG_BASIC|ATTR_FLAG_ONEVF,
2337                               .attr_update = pp_dpm_clk_default_attr_update),
2338         AMDGPU_DEVICE_ATTR_RW(pp_dpm_mclk,                              ATTR_FLAG_BASIC|ATTR_FLAG_ONEVF,
2339                               .attr_update = pp_dpm_clk_default_attr_update),
2340         AMDGPU_DEVICE_ATTR_RW(pp_dpm_socclk,                            ATTR_FLAG_BASIC|ATTR_FLAG_ONEVF,
2341                               .attr_update = pp_dpm_clk_default_attr_update),
2342         AMDGPU_DEVICE_ATTR_RW(pp_dpm_fclk,                              ATTR_FLAG_BASIC|ATTR_FLAG_ONEVF,
2343                               .attr_update = pp_dpm_clk_default_attr_update),
2344         AMDGPU_DEVICE_ATTR_RW(pp_dpm_vclk,                              ATTR_FLAG_BASIC|ATTR_FLAG_ONEVF,
2345                               .attr_update = pp_dpm_clk_default_attr_update),
2346         AMDGPU_DEVICE_ATTR_RW(pp_dpm_vclk1,                             ATTR_FLAG_BASIC|ATTR_FLAG_ONEVF,
2347                               .attr_update = pp_dpm_clk_default_attr_update),
2348         AMDGPU_DEVICE_ATTR_RW(pp_dpm_dclk,                              ATTR_FLAG_BASIC|ATTR_FLAG_ONEVF,
2349                               .attr_update = pp_dpm_clk_default_attr_update),
2350         AMDGPU_DEVICE_ATTR_RW(pp_dpm_dclk1,                             ATTR_FLAG_BASIC|ATTR_FLAG_ONEVF,
2351                               .attr_update = pp_dpm_clk_default_attr_update),
2352         AMDGPU_DEVICE_ATTR_RW(pp_dpm_dcefclk,                           ATTR_FLAG_BASIC|ATTR_FLAG_ONEVF,
2353                               .attr_update = pp_dpm_dcefclk_attr_update),
2354         AMDGPU_DEVICE_ATTR_RW(pp_dpm_pcie,                              ATTR_FLAG_BASIC|ATTR_FLAG_ONEVF,
2355                               .attr_update = pp_dpm_clk_default_attr_update),
2356         AMDGPU_DEVICE_ATTR_RW(pp_sclk_od,                               ATTR_FLAG_BASIC),
2357         AMDGPU_DEVICE_ATTR_RW(pp_mclk_od,                               ATTR_FLAG_BASIC),
2358         AMDGPU_DEVICE_ATTR_RW(pp_power_profile_mode,                    ATTR_FLAG_BASIC|ATTR_FLAG_ONEVF),
2359         AMDGPU_DEVICE_ATTR_RW(pp_od_clk_voltage,                        ATTR_FLAG_BASIC,
2360                               .attr_update = pp_od_clk_voltage_attr_update),
2361         AMDGPU_DEVICE_ATTR_RO(gpu_busy_percent,                         ATTR_FLAG_BASIC|ATTR_FLAG_ONEVF),
2362         AMDGPU_DEVICE_ATTR_RO(mem_busy_percent,                         ATTR_FLAG_BASIC|ATTR_FLAG_ONEVF),
2363         AMDGPU_DEVICE_ATTR_RO(vcn_busy_percent,                         ATTR_FLAG_BASIC|ATTR_FLAG_ONEVF),
2364         AMDGPU_DEVICE_ATTR_RO(pcie_bw,                                  ATTR_FLAG_BASIC),
2365         AMDGPU_DEVICE_ATTR_RW(pp_features,                              ATTR_FLAG_BASIC|ATTR_FLAG_ONEVF),
2366         AMDGPU_DEVICE_ATTR_RO(unique_id,                                ATTR_FLAG_BASIC|ATTR_FLAG_ONEVF),
2367         AMDGPU_DEVICE_ATTR_RW(thermal_throttling_logging,               ATTR_FLAG_BASIC|ATTR_FLAG_ONEVF),
2368         AMDGPU_DEVICE_ATTR_RW(apu_thermal_cap,                          ATTR_FLAG_BASIC|ATTR_FLAG_ONEVF),
2369         AMDGPU_DEVICE_ATTR_RO(gpu_metrics,                              ATTR_FLAG_BASIC|ATTR_FLAG_ONEVF),
2370         AMDGPU_DEVICE_ATTR_RO(smartshift_apu_power,                     ATTR_FLAG_BASIC,
2371                               .attr_update = ss_power_attr_update),
2372         AMDGPU_DEVICE_ATTR_RO(smartshift_dgpu_power,                    ATTR_FLAG_BASIC,
2373                               .attr_update = ss_power_attr_update),
2374         AMDGPU_DEVICE_ATTR_RW(smartshift_bias,                          ATTR_FLAG_BASIC,
2375                               .attr_update = ss_bias_attr_update),
2376         AMDGPU_DEVICE_ATTR_RO(pm_metrics,                               ATTR_FLAG_BASIC,
2377                               .attr_update = amdgpu_pm_metrics_attr_update),
2378 };
2379
2380 static int default_attr_update(struct amdgpu_device *adev, struct amdgpu_device_attr *attr,
2381                                uint32_t mask, enum amdgpu_device_attr_states *states)
2382 {
2383         struct device_attribute *dev_attr = &attr->dev_attr;
2384         enum amdgpu_device_attr_id attr_id = attr->attr_id;
2385         uint32_t gc_ver = amdgpu_ip_version(adev, GC_HWIP, 0);
2386
2387         if (!(attr->flags & mask)) {
2388                 *states = ATTR_STATE_UNSUPPORTED;
2389                 return 0;
2390         }
2391
2392         if (DEVICE_ATTR_IS(mem_busy_percent)) {
2393                 if ((adev->flags & AMD_IS_APU &&
2394                      gc_ver != IP_VERSION(9, 4, 3)) ||
2395                     gc_ver == IP_VERSION(9, 0, 1))
2396                         *states = ATTR_STATE_UNSUPPORTED;
2397         } else if (DEVICE_ATTR_IS(vcn_busy_percent)) {
2398                 if (!(gc_ver == IP_VERSION(10, 3, 1) ||
2399                           gc_ver == IP_VERSION(10, 3, 3) ||
2400                           gc_ver == IP_VERSION(10, 3, 6) ||
2401                           gc_ver == IP_VERSION(10, 3, 7) ||
2402                           gc_ver == IP_VERSION(11, 0, 1) ||
2403                           gc_ver == IP_VERSION(11, 0, 4) ||
2404                           gc_ver == IP_VERSION(11, 5, 0)))
2405                         *states = ATTR_STATE_UNSUPPORTED;
2406         } else if (DEVICE_ATTR_IS(pcie_bw)) {
2407                 /* PCIe Perf counters won't work on APU nodes */
2408                 if (adev->flags & AMD_IS_APU ||
2409                     !adev->asic_funcs->get_pcie_usage)
2410                         *states = ATTR_STATE_UNSUPPORTED;
2411         } else if (DEVICE_ATTR_IS(unique_id)) {
2412                 switch (gc_ver) {
2413                 case IP_VERSION(9, 0, 1):
2414                 case IP_VERSION(9, 4, 0):
2415                 case IP_VERSION(9, 4, 1):
2416                 case IP_VERSION(9, 4, 2):
2417                 case IP_VERSION(9, 4, 3):
2418                 case IP_VERSION(9, 4, 4):
2419                 case IP_VERSION(10, 3, 0):
2420                 case IP_VERSION(11, 0, 0):
2421                 case IP_VERSION(11, 0, 1):
2422                 case IP_VERSION(11, 0, 2):
2423                 case IP_VERSION(11, 0, 3):
2424                         *states = ATTR_STATE_SUPPORTED;
2425                         break;
2426                 default:
2427                         *states = ATTR_STATE_UNSUPPORTED;
2428                 }
2429         } else if (DEVICE_ATTR_IS(pp_features)) {
2430                 if ((adev->flags & AMD_IS_APU &&
2431                      gc_ver != IP_VERSION(9, 4, 3)) ||
2432                     gc_ver < IP_VERSION(9, 0, 0))
2433                         *states = ATTR_STATE_UNSUPPORTED;
2434         } else if (DEVICE_ATTR_IS(gpu_metrics)) {
2435                 if (gc_ver < IP_VERSION(9, 1, 0))
2436                         *states = ATTR_STATE_UNSUPPORTED;
2437         } else if (DEVICE_ATTR_IS(pp_power_profile_mode)) {
2438                 if (amdgpu_dpm_get_power_profile_mode(adev, NULL) == -EOPNOTSUPP)
2439                         *states = ATTR_STATE_UNSUPPORTED;
2440                 else if ((gc_ver == IP_VERSION(10, 3, 0) ||
2441                           gc_ver == IP_VERSION(11, 0, 3)) && amdgpu_sriov_vf(adev))
2442                         *states = ATTR_STATE_UNSUPPORTED;
2443         } else if (DEVICE_ATTR_IS(pp_mclk_od)) {
2444                 if (amdgpu_dpm_get_mclk_od(adev) == -EOPNOTSUPP)
2445                         *states = ATTR_STATE_UNSUPPORTED;
2446         } else if (DEVICE_ATTR_IS(pp_sclk_od)) {
2447                 if (amdgpu_dpm_get_sclk_od(adev) == -EOPNOTSUPP)
2448                         *states = ATTR_STATE_UNSUPPORTED;
2449         } else if (DEVICE_ATTR_IS(apu_thermal_cap)) {
2450                 u32 limit;
2451
2452                 if (amdgpu_dpm_get_apu_thermal_limit(adev, &limit) ==
2453                     -EOPNOTSUPP)
2454                         *states = ATTR_STATE_UNSUPPORTED;
2455         }
2456
2457         switch (gc_ver) {
2458         case IP_VERSION(10, 3, 0):
2459                 if (DEVICE_ATTR_IS(power_dpm_force_performance_level) &&
2460                     amdgpu_sriov_vf(adev)) {
2461                         dev_attr->attr.mode &= ~0222;
2462                         dev_attr->store = NULL;
2463                 }
2464                 break;
2465         default:
2466                 break;
2467         }
2468
2469         return 0;
2470 }
2471
2472
2473 static int amdgpu_device_attr_create(struct amdgpu_device *adev,
2474                                      struct amdgpu_device_attr *attr,
2475                                      uint32_t mask, struct list_head *attr_list)
2476 {
2477         int ret = 0;
2478         enum amdgpu_device_attr_states attr_states = ATTR_STATE_SUPPORTED;
2479         struct amdgpu_device_attr_entry *attr_entry;
2480         struct device_attribute *dev_attr;
2481         const char *name;
2482
2483         int (*attr_update)(struct amdgpu_device *adev, struct amdgpu_device_attr *attr,
2484                            uint32_t mask, enum amdgpu_device_attr_states *states) = default_attr_update;
2485
2486         if (!attr)
2487                 return -EINVAL;
2488
2489         dev_attr = &attr->dev_attr;
2490         name = dev_attr->attr.name;
2491
2492         attr_update = attr->attr_update ? attr->attr_update : default_attr_update;
2493
2494         ret = attr_update(adev, attr, mask, &attr_states);
2495         if (ret) {
2496                 dev_err(adev->dev, "failed to update device file %s, ret = %d\n",
2497                         name, ret);
2498                 return ret;
2499         }
2500
2501         if (attr_states == ATTR_STATE_UNSUPPORTED)
2502                 return 0;
2503
2504         ret = device_create_file(adev->dev, dev_attr);
2505         if (ret) {
2506                 dev_err(adev->dev, "failed to create device file %s, ret = %d\n",
2507                         name, ret);
2508         }
2509
2510         attr_entry = kmalloc(sizeof(*attr_entry), GFP_KERNEL);
2511         if (!attr_entry)
2512                 return -ENOMEM;
2513
2514         attr_entry->attr = attr;
2515         INIT_LIST_HEAD(&attr_entry->entry);
2516
2517         list_add_tail(&attr_entry->entry, attr_list);
2518
2519         return ret;
2520 }
2521
2522 static void amdgpu_device_attr_remove(struct amdgpu_device *adev, struct amdgpu_device_attr *attr)
2523 {
2524         struct device_attribute *dev_attr = &attr->dev_attr;
2525
2526         device_remove_file(adev->dev, dev_attr);
2527 }
2528
2529 static void amdgpu_device_attr_remove_groups(struct amdgpu_device *adev,
2530                                              struct list_head *attr_list);
2531
2532 static int amdgpu_device_attr_create_groups(struct amdgpu_device *adev,
2533                                             struct amdgpu_device_attr *attrs,
2534                                             uint32_t counts,
2535                                             uint32_t mask,
2536                                             struct list_head *attr_list)
2537 {
2538         int ret = 0;
2539         uint32_t i = 0;
2540
2541         for (i = 0; i < counts; i++) {
2542                 ret = amdgpu_device_attr_create(adev, &attrs[i], mask, attr_list);
2543                 if (ret)
2544                         goto failed;
2545         }
2546
2547         return 0;
2548
2549 failed:
2550         amdgpu_device_attr_remove_groups(adev, attr_list);
2551
2552         return ret;
2553 }
2554
2555 static void amdgpu_device_attr_remove_groups(struct amdgpu_device *adev,
2556                                              struct list_head *attr_list)
2557 {
2558         struct amdgpu_device_attr_entry *entry, *entry_tmp;
2559
2560         if (list_empty(attr_list))
2561                 return ;
2562
2563         list_for_each_entry_safe(entry, entry_tmp, attr_list, entry) {
2564                 amdgpu_device_attr_remove(adev, entry->attr);
2565                 list_del(&entry->entry);
2566                 kfree(entry);
2567         }
2568 }
2569
2570 static ssize_t amdgpu_hwmon_show_temp(struct device *dev,
2571                                       struct device_attribute *attr,
2572                                       char *buf)
2573 {
2574         struct amdgpu_device *adev = dev_get_drvdata(dev);
2575         int channel = to_sensor_dev_attr(attr)->index;
2576         int r, temp = 0;
2577
2578         if (channel >= PP_TEMP_MAX)
2579                 return -EINVAL;
2580
2581         switch (channel) {
2582         case PP_TEMP_JUNCTION:
2583                 /* get current junction temperature */
2584                 r = amdgpu_hwmon_get_sensor_generic(adev, AMDGPU_PP_SENSOR_HOTSPOT_TEMP,
2585                                            (void *)&temp);
2586                 break;
2587         case PP_TEMP_EDGE:
2588                 /* get current edge temperature */
2589                 r = amdgpu_hwmon_get_sensor_generic(adev, AMDGPU_PP_SENSOR_EDGE_TEMP,
2590                                            (void *)&temp);
2591                 break;
2592         case PP_TEMP_MEM:
2593                 /* get current memory temperature */
2594                 r = amdgpu_hwmon_get_sensor_generic(adev, AMDGPU_PP_SENSOR_MEM_TEMP,
2595                                            (void *)&temp);
2596                 break;
2597         default:
2598                 r = -EINVAL;
2599                 break;
2600         }
2601
2602         if (r)
2603                 return r;
2604
2605         return sysfs_emit(buf, "%d\n", temp);
2606 }
2607
2608 static ssize_t amdgpu_hwmon_show_temp_thresh(struct device *dev,
2609                                              struct device_attribute *attr,
2610                                              char *buf)
2611 {
2612         struct amdgpu_device *adev = dev_get_drvdata(dev);
2613         int hyst = to_sensor_dev_attr(attr)->index;
2614         int temp;
2615
2616         if (hyst)
2617                 temp = adev->pm.dpm.thermal.min_temp;
2618         else
2619                 temp = adev->pm.dpm.thermal.max_temp;
2620
2621         return sysfs_emit(buf, "%d\n", temp);
2622 }
2623
2624 static ssize_t amdgpu_hwmon_show_hotspot_temp_thresh(struct device *dev,
2625                                              struct device_attribute *attr,
2626                                              char *buf)
2627 {
2628         struct amdgpu_device *adev = dev_get_drvdata(dev);
2629         int hyst = to_sensor_dev_attr(attr)->index;
2630         int temp;
2631
2632         if (hyst)
2633                 temp = adev->pm.dpm.thermal.min_hotspot_temp;
2634         else
2635                 temp = adev->pm.dpm.thermal.max_hotspot_crit_temp;
2636
2637         return sysfs_emit(buf, "%d\n", temp);
2638 }
2639
2640 static ssize_t amdgpu_hwmon_show_mem_temp_thresh(struct device *dev,
2641                                              struct device_attribute *attr,
2642                                              char *buf)
2643 {
2644         struct amdgpu_device *adev = dev_get_drvdata(dev);
2645         int hyst = to_sensor_dev_attr(attr)->index;
2646         int temp;
2647
2648         if (hyst)
2649                 temp = adev->pm.dpm.thermal.min_mem_temp;
2650         else
2651                 temp = adev->pm.dpm.thermal.max_mem_crit_temp;
2652
2653         return sysfs_emit(buf, "%d\n", temp);
2654 }
2655
2656 static ssize_t amdgpu_hwmon_show_temp_label(struct device *dev,
2657                                              struct device_attribute *attr,
2658                                              char *buf)
2659 {
2660         int channel = to_sensor_dev_attr(attr)->index;
2661
2662         if (channel >= PP_TEMP_MAX)
2663                 return -EINVAL;
2664
2665         return sysfs_emit(buf, "%s\n", temp_label[channel].label);
2666 }
2667
2668 static ssize_t amdgpu_hwmon_show_temp_emergency(struct device *dev,
2669                                              struct device_attribute *attr,
2670                                              char *buf)
2671 {
2672         struct amdgpu_device *adev = dev_get_drvdata(dev);
2673         int channel = to_sensor_dev_attr(attr)->index;
2674         int temp = 0;
2675
2676         if (channel >= PP_TEMP_MAX)
2677                 return -EINVAL;
2678
2679         switch (channel) {
2680         case PP_TEMP_JUNCTION:
2681                 temp = adev->pm.dpm.thermal.max_hotspot_emergency_temp;
2682                 break;
2683         case PP_TEMP_EDGE:
2684                 temp = adev->pm.dpm.thermal.max_edge_emergency_temp;
2685                 break;
2686         case PP_TEMP_MEM:
2687                 temp = adev->pm.dpm.thermal.max_mem_emergency_temp;
2688                 break;
2689         }
2690
2691         return sysfs_emit(buf, "%d\n", temp);
2692 }
2693
2694 static ssize_t amdgpu_hwmon_get_pwm1_enable(struct device *dev,
2695                                             struct device_attribute *attr,
2696                                             char *buf)
2697 {
2698         struct amdgpu_device *adev = dev_get_drvdata(dev);
2699         u32 pwm_mode = 0;
2700         int ret;
2701
2702         if (amdgpu_in_reset(adev))
2703                 return -EPERM;
2704         if (adev->in_suspend && !adev->in_runpm)
2705                 return -EPERM;
2706
2707         ret = pm_runtime_get_if_active(adev->dev);
2708         if (ret <= 0)
2709                 return ret ?: -EPERM;
2710
2711         ret = amdgpu_dpm_get_fan_control_mode(adev, &pwm_mode);
2712
2713         pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
2714
2715         if (ret)
2716                 return -EINVAL;
2717
2718         return sysfs_emit(buf, "%u\n", pwm_mode);
2719 }
2720
2721 static ssize_t amdgpu_hwmon_set_pwm1_enable(struct device *dev,
2722                                             struct device_attribute *attr,
2723                                             const char *buf,
2724                                             size_t count)
2725 {
2726         struct amdgpu_device *adev = dev_get_drvdata(dev);
2727         int err, ret;
2728         u32 pwm_mode;
2729         int value;
2730
2731         if (amdgpu_in_reset(adev))
2732                 return -EPERM;
2733         if (adev->in_suspend && !adev->in_runpm)
2734                 return -EPERM;
2735
2736         err = kstrtoint(buf, 10, &value);
2737         if (err)
2738                 return err;
2739
2740         if (value == 0)
2741                 pwm_mode = AMD_FAN_CTRL_NONE;
2742         else if (value == 1)
2743                 pwm_mode = AMD_FAN_CTRL_MANUAL;
2744         else if (value == 2)
2745                 pwm_mode = AMD_FAN_CTRL_AUTO;
2746         else
2747                 return -EINVAL;
2748
2749         ret = pm_runtime_resume_and_get(adev->dev);
2750         if (ret < 0)
2751                 return ret;
2752
2753         ret = amdgpu_dpm_set_fan_control_mode(adev, pwm_mode);
2754
2755         pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
2756         pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
2757
2758         if (ret)
2759                 return -EINVAL;
2760
2761         return count;
2762 }
2763
2764 static ssize_t amdgpu_hwmon_get_pwm1_min(struct device *dev,
2765                                          struct device_attribute *attr,
2766                                          char *buf)
2767 {
2768         return sysfs_emit(buf, "%i\n", 0);
2769 }
2770
2771 static ssize_t amdgpu_hwmon_get_pwm1_max(struct device *dev,
2772                                          struct device_attribute *attr,
2773                                          char *buf)
2774 {
2775         return sysfs_emit(buf, "%i\n", 255);
2776 }
2777
2778 static ssize_t amdgpu_hwmon_set_pwm1(struct device *dev,
2779                                      struct device_attribute *attr,
2780                                      const char *buf, size_t count)
2781 {
2782         struct amdgpu_device *adev = dev_get_drvdata(dev);
2783         int err;
2784         u32 value;
2785         u32 pwm_mode;
2786
2787         if (amdgpu_in_reset(adev))
2788                 return -EPERM;
2789         if (adev->in_suspend && !adev->in_runpm)
2790                 return -EPERM;
2791
2792         err = kstrtou32(buf, 10, &value);
2793         if (err)
2794                 return err;
2795
2796         err = pm_runtime_resume_and_get(adev->dev);
2797         if (err < 0)
2798                 return err;
2799
2800         err = amdgpu_dpm_get_fan_control_mode(adev, &pwm_mode);
2801         if (err)
2802                 goto out;
2803
2804         if (pwm_mode != AMD_FAN_CTRL_MANUAL) {
2805                 pr_info("manual fan speed control should be enabled first\n");
2806                 err = -EINVAL;
2807                 goto out;
2808         }
2809
2810         err = amdgpu_dpm_set_fan_speed_pwm(adev, value);
2811
2812 out:
2813         pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
2814         pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
2815
2816         if (err)
2817                 return err;
2818
2819         return count;
2820 }
2821
2822 static ssize_t amdgpu_hwmon_get_pwm1(struct device *dev,
2823                                      struct device_attribute *attr,
2824                                      char *buf)
2825 {
2826         struct amdgpu_device *adev = dev_get_drvdata(dev);
2827         int err;
2828         u32 speed = 0;
2829
2830         if (amdgpu_in_reset(adev))
2831                 return -EPERM;
2832         if (adev->in_suspend && !adev->in_runpm)
2833                 return -EPERM;
2834
2835         err = pm_runtime_get_if_active(adev->dev);
2836         if (err <= 0)
2837                 return err ?: -EPERM;
2838
2839         err = amdgpu_dpm_get_fan_speed_pwm(adev, &speed);
2840
2841         pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
2842
2843         if (err)
2844                 return err;
2845
2846         return sysfs_emit(buf, "%i\n", speed);
2847 }
2848
2849 static ssize_t amdgpu_hwmon_get_fan1_input(struct device *dev,
2850                                            struct device_attribute *attr,
2851                                            char *buf)
2852 {
2853         struct amdgpu_device *adev = dev_get_drvdata(dev);
2854         int err;
2855         u32 speed = 0;
2856
2857         if (amdgpu_in_reset(adev))
2858                 return -EPERM;
2859         if (adev->in_suspend && !adev->in_runpm)
2860                 return -EPERM;
2861
2862         err = pm_runtime_get_if_active(adev->dev);
2863         if (err <= 0)
2864                 return err ?: -EPERM;
2865
2866         err = amdgpu_dpm_get_fan_speed_rpm(adev, &speed);
2867
2868         pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
2869
2870         if (err)
2871                 return err;
2872
2873         return sysfs_emit(buf, "%i\n", speed);
2874 }
2875
2876 static ssize_t amdgpu_hwmon_get_fan1_min(struct device *dev,
2877                                          struct device_attribute *attr,
2878                                          char *buf)
2879 {
2880         struct amdgpu_device *adev = dev_get_drvdata(dev);
2881         u32 min_rpm = 0;
2882         int r;
2883
2884         r = amdgpu_hwmon_get_sensor_generic(adev, AMDGPU_PP_SENSOR_MIN_FAN_RPM,
2885                                    (void *)&min_rpm);
2886
2887         if (r)
2888                 return r;
2889
2890         return sysfs_emit(buf, "%d\n", min_rpm);
2891 }
2892
2893 static ssize_t amdgpu_hwmon_get_fan1_max(struct device *dev,
2894                                          struct device_attribute *attr,
2895                                          char *buf)
2896 {
2897         struct amdgpu_device *adev = dev_get_drvdata(dev);
2898         u32 max_rpm = 0;
2899         int r;
2900
2901         r = amdgpu_hwmon_get_sensor_generic(adev, AMDGPU_PP_SENSOR_MAX_FAN_RPM,
2902                                    (void *)&max_rpm);
2903
2904         if (r)
2905                 return r;
2906
2907         return sysfs_emit(buf, "%d\n", max_rpm);
2908 }
2909
2910 static ssize_t amdgpu_hwmon_get_fan1_target(struct device *dev,
2911                                            struct device_attribute *attr,
2912                                            char *buf)
2913 {
2914         struct amdgpu_device *adev = dev_get_drvdata(dev);
2915         int err;
2916         u32 rpm = 0;
2917
2918         if (amdgpu_in_reset(adev))
2919                 return -EPERM;
2920         if (adev->in_suspend && !adev->in_runpm)
2921                 return -EPERM;
2922
2923         err = pm_runtime_get_if_active(adev->dev);
2924         if (err <= 0)
2925                 return err ?: -EPERM;
2926
2927         err = amdgpu_dpm_get_fan_speed_rpm(adev, &rpm);
2928
2929         pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
2930
2931         if (err)
2932                 return err;
2933
2934         return sysfs_emit(buf, "%i\n", rpm);
2935 }
2936
2937 static ssize_t amdgpu_hwmon_set_fan1_target(struct device *dev,
2938                                      struct device_attribute *attr,
2939                                      const char *buf, size_t count)
2940 {
2941         struct amdgpu_device *adev = dev_get_drvdata(dev);
2942         int err;
2943         u32 value;
2944         u32 pwm_mode;
2945
2946         if (amdgpu_in_reset(adev))
2947                 return -EPERM;
2948         if (adev->in_suspend && !adev->in_runpm)
2949                 return -EPERM;
2950
2951         err = kstrtou32(buf, 10, &value);
2952         if (err)
2953                 return err;
2954
2955         err = pm_runtime_resume_and_get(adev->dev);
2956         if (err < 0)
2957                 return err;
2958
2959         err = amdgpu_dpm_get_fan_control_mode(adev, &pwm_mode);
2960         if (err)
2961                 goto out;
2962
2963         if (pwm_mode != AMD_FAN_CTRL_MANUAL) {
2964                 err = -ENODATA;
2965                 goto out;
2966         }
2967
2968         err = amdgpu_dpm_set_fan_speed_rpm(adev, value);
2969
2970 out:
2971         pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
2972         pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
2973
2974         if (err)
2975                 return err;
2976
2977         return count;
2978 }
2979
2980 static ssize_t amdgpu_hwmon_get_fan1_enable(struct device *dev,
2981                                             struct device_attribute *attr,
2982                                             char *buf)
2983 {
2984         struct amdgpu_device *adev = dev_get_drvdata(dev);
2985         u32 pwm_mode = 0;
2986         int ret;
2987
2988         if (amdgpu_in_reset(adev))
2989                 return -EPERM;
2990         if (adev->in_suspend && !adev->in_runpm)
2991                 return -EPERM;
2992
2993         ret = pm_runtime_get_if_active(adev->dev);
2994         if (ret <= 0)
2995                 return ret ?: -EPERM;
2996
2997         ret = amdgpu_dpm_get_fan_control_mode(adev, &pwm_mode);
2998
2999         pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
3000
3001         if (ret)
3002                 return -EINVAL;
3003
3004         return sysfs_emit(buf, "%i\n", pwm_mode == AMD_FAN_CTRL_AUTO ? 0 : 1);
3005 }
3006
3007 static ssize_t amdgpu_hwmon_set_fan1_enable(struct device *dev,
3008                                             struct device_attribute *attr,
3009                                             const char *buf,
3010                                             size_t count)
3011 {
3012         struct amdgpu_device *adev = dev_get_drvdata(dev);
3013         int err;
3014         int value;
3015         u32 pwm_mode;
3016
3017         if (amdgpu_in_reset(adev))
3018                 return -EPERM;
3019         if (adev->in_suspend && !adev->in_runpm)
3020                 return -EPERM;
3021
3022         err = kstrtoint(buf, 10, &value);
3023         if (err)
3024                 return err;
3025
3026         if (value == 0)
3027                 pwm_mode = AMD_FAN_CTRL_AUTO;
3028         else if (value == 1)
3029                 pwm_mode = AMD_FAN_CTRL_MANUAL;
3030         else
3031                 return -EINVAL;
3032
3033         err = pm_runtime_resume_and_get(adev->dev);
3034         if (err < 0)
3035                 return err;
3036
3037         err = amdgpu_dpm_set_fan_control_mode(adev, pwm_mode);
3038
3039         pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
3040         pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
3041
3042         if (err)
3043                 return -EINVAL;
3044
3045         return count;
3046 }
3047
3048 static ssize_t amdgpu_hwmon_show_vddgfx(struct device *dev,
3049                                         struct device_attribute *attr,
3050                                         char *buf)
3051 {
3052         struct amdgpu_device *adev = dev_get_drvdata(dev);
3053         u32 vddgfx;
3054         int r;
3055
3056         /* get the voltage */
3057         r = amdgpu_hwmon_get_sensor_generic(adev, AMDGPU_PP_SENSOR_VDDGFX,
3058                                    (void *)&vddgfx);
3059         if (r)
3060                 return r;
3061
3062         return sysfs_emit(buf, "%d\n", vddgfx);
3063 }
3064
3065 static ssize_t amdgpu_hwmon_show_vddgfx_label(struct device *dev,
3066                                               struct device_attribute *attr,
3067                                               char *buf)
3068 {
3069         return sysfs_emit(buf, "vddgfx\n");
3070 }
3071
3072 static ssize_t amdgpu_hwmon_show_vddnb(struct device *dev,
3073                                        struct device_attribute *attr,
3074                                        char *buf)
3075 {
3076         struct amdgpu_device *adev = dev_get_drvdata(dev);
3077         u32 vddnb;
3078         int r;
3079
3080         /* only APUs have vddnb */
3081         if  (!(adev->flags & AMD_IS_APU))
3082                 return -EINVAL;
3083
3084         /* get the voltage */
3085         r = amdgpu_hwmon_get_sensor_generic(adev, AMDGPU_PP_SENSOR_VDDNB,
3086                                    (void *)&vddnb);
3087         if (r)
3088                 return r;
3089
3090         return sysfs_emit(buf, "%d\n", vddnb);
3091 }
3092
3093 static ssize_t amdgpu_hwmon_show_vddnb_label(struct device *dev,
3094                                               struct device_attribute *attr,
3095                                               char *buf)
3096 {
3097         return sysfs_emit(buf, "vddnb\n");
3098 }
3099
3100 static int amdgpu_hwmon_get_power(struct device *dev,
3101                                   enum amd_pp_sensors sensor)
3102 {
3103         struct amdgpu_device *adev = dev_get_drvdata(dev);
3104         unsigned int uw;
3105         u32 query = 0;
3106         int r;
3107
3108         r = amdgpu_hwmon_get_sensor_generic(adev, sensor, (void *)&query);
3109         if (r)
3110                 return r;
3111
3112         /* convert to microwatts */
3113         uw = (query >> 8) * 1000000 + (query & 0xff) * 1000;
3114
3115         return uw;
3116 }
3117
3118 static ssize_t amdgpu_hwmon_show_power_avg(struct device *dev,
3119                                            struct device_attribute *attr,
3120                                            char *buf)
3121 {
3122         ssize_t val;
3123
3124         val = amdgpu_hwmon_get_power(dev, AMDGPU_PP_SENSOR_GPU_AVG_POWER);
3125         if (val < 0)
3126                 return val;
3127
3128         return sysfs_emit(buf, "%zd\n", val);
3129 }
3130
3131 static ssize_t amdgpu_hwmon_show_power_input(struct device *dev,
3132                                              struct device_attribute *attr,
3133                                              char *buf)
3134 {
3135         ssize_t val;
3136
3137         val = amdgpu_hwmon_get_power(dev, AMDGPU_PP_SENSOR_GPU_INPUT_POWER);
3138         if (val < 0)
3139                 return val;
3140
3141         return sysfs_emit(buf, "%zd\n", val);
3142 }
3143
3144 static ssize_t amdgpu_hwmon_show_power_cap_generic(struct device *dev,
3145                                         struct device_attribute *attr,
3146                                         char *buf,
3147                                         enum pp_power_limit_level pp_limit_level)
3148 {
3149         struct amdgpu_device *adev = dev_get_drvdata(dev);
3150         enum pp_power_type power_type = to_sensor_dev_attr(attr)->index;
3151         uint32_t limit;
3152         ssize_t size;
3153         int r;
3154
3155         if (amdgpu_in_reset(adev))
3156                 return -EPERM;
3157         if (adev->in_suspend && !adev->in_runpm)
3158                 return -EPERM;
3159
3160         r = pm_runtime_get_if_active(adev->dev);
3161         if (r <= 0)
3162                 return r ?: -EPERM;
3163
3164         r = amdgpu_dpm_get_power_limit(adev, &limit,
3165                                       pp_limit_level, power_type);
3166
3167         if (!r)
3168                 size = sysfs_emit(buf, "%u\n", limit * 1000000);
3169         else
3170                 size = sysfs_emit(buf, "\n");
3171
3172         pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
3173
3174         return size;
3175 }
3176
3177 static ssize_t amdgpu_hwmon_show_power_cap_min(struct device *dev,
3178                                          struct device_attribute *attr,
3179                                          char *buf)
3180 {
3181         return amdgpu_hwmon_show_power_cap_generic(dev, attr, buf, PP_PWR_LIMIT_MIN);
3182 }
3183
3184 static ssize_t amdgpu_hwmon_show_power_cap_max(struct device *dev,
3185                                          struct device_attribute *attr,
3186                                          char *buf)
3187 {
3188         return amdgpu_hwmon_show_power_cap_generic(dev, attr, buf, PP_PWR_LIMIT_MAX);
3189
3190 }
3191
3192 static ssize_t amdgpu_hwmon_show_power_cap(struct device *dev,
3193                                          struct device_attribute *attr,
3194                                          char *buf)
3195 {
3196         return amdgpu_hwmon_show_power_cap_generic(dev, attr, buf, PP_PWR_LIMIT_CURRENT);
3197
3198 }
3199
3200 static ssize_t amdgpu_hwmon_show_power_cap_default(struct device *dev,
3201                                          struct device_attribute *attr,
3202                                          char *buf)
3203 {
3204         return amdgpu_hwmon_show_power_cap_generic(dev, attr, buf, PP_PWR_LIMIT_DEFAULT);
3205
3206 }
3207
3208 static ssize_t amdgpu_hwmon_show_power_label(struct device *dev,
3209                                          struct device_attribute *attr,
3210                                          char *buf)
3211 {
3212         struct amdgpu_device *adev = dev_get_drvdata(dev);
3213         uint32_t gc_ver = amdgpu_ip_version(adev, GC_HWIP, 0);
3214
3215         if (gc_ver == IP_VERSION(10, 3, 1))
3216                 return sysfs_emit(buf, "%s\n",
3217                                   to_sensor_dev_attr(attr)->index == PP_PWR_TYPE_FAST ?
3218                                   "fastPPT" : "slowPPT");
3219         else
3220                 return sysfs_emit(buf, "PPT\n");
3221 }
3222
3223 static ssize_t amdgpu_hwmon_set_power_cap(struct device *dev,
3224                 struct device_attribute *attr,
3225                 const char *buf,
3226                 size_t count)
3227 {
3228         struct amdgpu_device *adev = dev_get_drvdata(dev);
3229         int limit_type = to_sensor_dev_attr(attr)->index;
3230         int err;
3231         u32 value;
3232
3233         if (amdgpu_in_reset(adev))
3234                 return -EPERM;
3235         if (adev->in_suspend && !adev->in_runpm)
3236                 return -EPERM;
3237
3238         if (amdgpu_sriov_vf(adev))
3239                 return -EINVAL;
3240
3241         err = kstrtou32(buf, 10, &value);
3242         if (err)
3243                 return err;
3244
3245         value = value / 1000000; /* convert to Watt */
3246         value |= limit_type << 24;
3247
3248         err = pm_runtime_resume_and_get(adev->dev);
3249         if (err < 0)
3250                 return err;
3251
3252         err = amdgpu_dpm_set_power_limit(adev, value);
3253
3254         pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
3255         pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
3256
3257         if (err)
3258                 return err;
3259
3260         return count;
3261 }
3262
3263 static ssize_t amdgpu_hwmon_show_sclk(struct device *dev,
3264                                       struct device_attribute *attr,
3265                                       char *buf)
3266 {
3267         struct amdgpu_device *adev = dev_get_drvdata(dev);
3268         uint32_t sclk;
3269         int r;
3270
3271         /* get the sclk */
3272         r = amdgpu_hwmon_get_sensor_generic(adev, AMDGPU_PP_SENSOR_GFX_SCLK,
3273                                    (void *)&sclk);
3274         if (r)
3275                 return r;
3276
3277         return sysfs_emit(buf, "%u\n", sclk * 10 * 1000);
3278 }
3279
3280 static ssize_t amdgpu_hwmon_show_sclk_label(struct device *dev,
3281                                             struct device_attribute *attr,
3282                                             char *buf)
3283 {
3284         return sysfs_emit(buf, "sclk\n");
3285 }
3286
3287 static ssize_t amdgpu_hwmon_show_mclk(struct device *dev,
3288                                       struct device_attribute *attr,
3289                                       char *buf)
3290 {
3291         struct amdgpu_device *adev = dev_get_drvdata(dev);
3292         uint32_t mclk;
3293         int r;
3294
3295         /* get the sclk */
3296         r = amdgpu_hwmon_get_sensor_generic(adev, AMDGPU_PP_SENSOR_GFX_MCLK,
3297                                    (void *)&mclk);
3298         if (r)
3299                 return r;
3300
3301         return sysfs_emit(buf, "%u\n", mclk * 10 * 1000);
3302 }
3303
3304 static ssize_t amdgpu_hwmon_show_mclk_label(struct device *dev,
3305                                             struct device_attribute *attr,
3306                                             char *buf)
3307 {
3308         return sysfs_emit(buf, "mclk\n");
3309 }
3310
3311 /**
3312  * DOC: hwmon
3313  *
3314  * The amdgpu driver exposes the following sensor interfaces:
3315  *
3316  * - GPU temperature (via the on-die sensor)
3317  *
3318  * - GPU voltage
3319  *
3320  * - Northbridge voltage (APUs only)
3321  *
3322  * - GPU power
3323  *
3324  * - GPU fan
3325  *
3326  * - GPU gfx/compute engine clock
3327  *
3328  * - GPU memory clock (dGPU only)
3329  *
3330  * hwmon interfaces for GPU temperature:
3331  *
3332  * - temp[1-3]_input: the on die GPU temperature in millidegrees Celsius
3333  *   - temp2_input and temp3_input are supported on SOC15 dGPUs only
3334  *
3335  * - temp[1-3]_label: temperature channel label
3336  *   - temp2_label and temp3_label are supported on SOC15 dGPUs only
3337  *
3338  * - temp[1-3]_crit: temperature critical max value in millidegrees Celsius
3339  *   - temp2_crit and temp3_crit are supported on SOC15 dGPUs only
3340  *
3341  * - temp[1-3]_crit_hyst: temperature hysteresis for critical limit in millidegrees Celsius
3342  *   - temp2_crit_hyst and temp3_crit_hyst are supported on SOC15 dGPUs only
3343  *
3344  * - temp[1-3]_emergency: temperature emergency max value(asic shutdown) in millidegrees Celsius
3345  *   - these are supported on SOC15 dGPUs only
3346  *
3347  * hwmon interfaces for GPU voltage:
3348  *
3349  * - in0_input: the voltage on the GPU in millivolts
3350  *
3351  * - in1_input: the voltage on the Northbridge in millivolts
3352  *
3353  * hwmon interfaces for GPU power:
3354  *
3355  * - power1_average: average power used by the SoC in microWatts.  On APUs this includes the CPU.
3356  *
3357  * - power1_input: instantaneous power used by the SoC in microWatts.  On APUs this includes the CPU.
3358  *
3359  * - power1_cap_min: minimum cap supported in microWatts
3360  *
3361  * - power1_cap_max: maximum cap supported in microWatts
3362  *
3363  * - power1_cap: selected power cap in microWatts
3364  *
3365  * hwmon interfaces for GPU fan:
3366  *
3367  * - pwm1: pulse width modulation fan level (0-255)
3368  *
3369  * - pwm1_enable: pulse width modulation fan control method (0: no fan speed control, 1: manual fan speed control using pwm interface, 2: automatic fan speed control)
3370  *
3371  * - pwm1_min: pulse width modulation fan control minimum level (0)
3372  *
3373  * - pwm1_max: pulse width modulation fan control maximum level (255)
3374  *
3375  * - fan1_min: a minimum value Unit: revolution/min (RPM)
3376  *
3377  * - fan1_max: a maximum value Unit: revolution/max (RPM)
3378  *
3379  * - fan1_input: fan speed in RPM
3380  *
3381  * - fan[1-\*]_target: Desired fan speed Unit: revolution/min (RPM)
3382  *
3383  * - fan[1-\*]_enable: Enable or disable the sensors.1: Enable 0: Disable
3384  *
3385  * NOTE: DO NOT set the fan speed via "pwm1" and "fan[1-\*]_target" interfaces at the same time.
3386  *       That will get the former one overridden.
3387  *
3388  * hwmon interfaces for GPU clocks:
3389  *
3390  * - freq1_input: the gfx/compute clock in hertz
3391  *
3392  * - freq2_input: the memory clock in hertz
3393  *
3394  * You can use hwmon tools like sensors to view this information on your system.
3395  *
3396  */
3397
3398 static SENSOR_DEVICE_ATTR(temp1_input, S_IRUGO, amdgpu_hwmon_show_temp, NULL, PP_TEMP_EDGE);
3399 static SENSOR_DEVICE_ATTR(temp1_crit, S_IRUGO, amdgpu_hwmon_show_temp_thresh, NULL, 0);
3400 static SENSOR_DEVICE_ATTR(temp1_crit_hyst, S_IRUGO, amdgpu_hwmon_show_temp_thresh, NULL, 1);
3401 static SENSOR_DEVICE_ATTR(temp1_emergency, S_IRUGO, amdgpu_hwmon_show_temp_emergency, NULL, PP_TEMP_EDGE);
3402 static SENSOR_DEVICE_ATTR(temp2_input, S_IRUGO, amdgpu_hwmon_show_temp, NULL, PP_TEMP_JUNCTION);
3403 static SENSOR_DEVICE_ATTR(temp2_crit, S_IRUGO, amdgpu_hwmon_show_hotspot_temp_thresh, NULL, 0);
3404 static SENSOR_DEVICE_ATTR(temp2_crit_hyst, S_IRUGO, amdgpu_hwmon_show_hotspot_temp_thresh, NULL, 1);
3405 static SENSOR_DEVICE_ATTR(temp2_emergency, S_IRUGO, amdgpu_hwmon_show_temp_emergency, NULL, PP_TEMP_JUNCTION);
3406 static SENSOR_DEVICE_ATTR(temp3_input, S_IRUGO, amdgpu_hwmon_show_temp, NULL, PP_TEMP_MEM);
3407 static SENSOR_DEVICE_ATTR(temp3_crit, S_IRUGO, amdgpu_hwmon_show_mem_temp_thresh, NULL, 0);
3408 static SENSOR_DEVICE_ATTR(temp3_crit_hyst, S_IRUGO, amdgpu_hwmon_show_mem_temp_thresh, NULL, 1);
3409 static SENSOR_DEVICE_ATTR(temp3_emergency, S_IRUGO, amdgpu_hwmon_show_temp_emergency, NULL, PP_TEMP_MEM);
3410 static SENSOR_DEVICE_ATTR(temp1_label, S_IRUGO, amdgpu_hwmon_show_temp_label, NULL, PP_TEMP_EDGE);
3411 static SENSOR_DEVICE_ATTR(temp2_label, S_IRUGO, amdgpu_hwmon_show_temp_label, NULL, PP_TEMP_JUNCTION);
3412 static SENSOR_DEVICE_ATTR(temp3_label, S_IRUGO, amdgpu_hwmon_show_temp_label, NULL, PP_TEMP_MEM);
3413 static SENSOR_DEVICE_ATTR(pwm1, S_IRUGO | S_IWUSR, amdgpu_hwmon_get_pwm1, amdgpu_hwmon_set_pwm1, 0);
3414 static SENSOR_DEVICE_ATTR(pwm1_enable, S_IRUGO | S_IWUSR, amdgpu_hwmon_get_pwm1_enable, amdgpu_hwmon_set_pwm1_enable, 0);
3415 static SENSOR_DEVICE_ATTR(pwm1_min, S_IRUGO, amdgpu_hwmon_get_pwm1_min, NULL, 0);
3416 static SENSOR_DEVICE_ATTR(pwm1_max, S_IRUGO, amdgpu_hwmon_get_pwm1_max, NULL, 0);
3417 static SENSOR_DEVICE_ATTR(fan1_input, S_IRUGO, amdgpu_hwmon_get_fan1_input, NULL, 0);
3418 static SENSOR_DEVICE_ATTR(fan1_min, S_IRUGO, amdgpu_hwmon_get_fan1_min, NULL, 0);
3419 static SENSOR_DEVICE_ATTR(fan1_max, S_IRUGO, amdgpu_hwmon_get_fan1_max, NULL, 0);
3420 static SENSOR_DEVICE_ATTR(fan1_target, S_IRUGO | S_IWUSR, amdgpu_hwmon_get_fan1_target, amdgpu_hwmon_set_fan1_target, 0);
3421 static SENSOR_DEVICE_ATTR(fan1_enable, S_IRUGO | S_IWUSR, amdgpu_hwmon_get_fan1_enable, amdgpu_hwmon_set_fan1_enable, 0);
3422 static SENSOR_DEVICE_ATTR(in0_input, S_IRUGO, amdgpu_hwmon_show_vddgfx, NULL, 0);
3423 static SENSOR_DEVICE_ATTR(in0_label, S_IRUGO, amdgpu_hwmon_show_vddgfx_label, NULL, 0);
3424 static SENSOR_DEVICE_ATTR(in1_input, S_IRUGO, amdgpu_hwmon_show_vddnb, NULL, 0);
3425 static SENSOR_DEVICE_ATTR(in1_label, S_IRUGO, amdgpu_hwmon_show_vddnb_label, NULL, 0);
3426 static SENSOR_DEVICE_ATTR(power1_average, S_IRUGO, amdgpu_hwmon_show_power_avg, NULL, 0);
3427 static SENSOR_DEVICE_ATTR(power1_input, S_IRUGO, amdgpu_hwmon_show_power_input, NULL, 0);
3428 static SENSOR_DEVICE_ATTR(power1_cap_max, S_IRUGO, amdgpu_hwmon_show_power_cap_max, NULL, 0);
3429 static SENSOR_DEVICE_ATTR(power1_cap_min, S_IRUGO, amdgpu_hwmon_show_power_cap_min, NULL, 0);
3430 static SENSOR_DEVICE_ATTR(power1_cap, S_IRUGO | S_IWUSR, amdgpu_hwmon_show_power_cap, amdgpu_hwmon_set_power_cap, 0);
3431 static SENSOR_DEVICE_ATTR(power1_cap_default, S_IRUGO, amdgpu_hwmon_show_power_cap_default, NULL, 0);
3432 static SENSOR_DEVICE_ATTR(power1_label, S_IRUGO, amdgpu_hwmon_show_power_label, NULL, 0);
3433 static SENSOR_DEVICE_ATTR(power2_average, S_IRUGO, amdgpu_hwmon_show_power_avg, NULL, 1);
3434 static SENSOR_DEVICE_ATTR(power2_cap_max, S_IRUGO, amdgpu_hwmon_show_power_cap_max, NULL, 1);
3435 static SENSOR_DEVICE_ATTR(power2_cap_min, S_IRUGO, amdgpu_hwmon_show_power_cap_min, NULL, 1);
3436 static SENSOR_DEVICE_ATTR(power2_cap, S_IRUGO | S_IWUSR, amdgpu_hwmon_show_power_cap, amdgpu_hwmon_set_power_cap, 1);
3437 static SENSOR_DEVICE_ATTR(power2_cap_default, S_IRUGO, amdgpu_hwmon_show_power_cap_default, NULL, 1);
3438 static SENSOR_DEVICE_ATTR(power2_label, S_IRUGO, amdgpu_hwmon_show_power_label, NULL, 1);
3439 static SENSOR_DEVICE_ATTR(freq1_input, S_IRUGO, amdgpu_hwmon_show_sclk, NULL, 0);
3440 static SENSOR_DEVICE_ATTR(freq1_label, S_IRUGO, amdgpu_hwmon_show_sclk_label, NULL, 0);
3441 static SENSOR_DEVICE_ATTR(freq2_input, S_IRUGO, amdgpu_hwmon_show_mclk, NULL, 0);
3442 static SENSOR_DEVICE_ATTR(freq2_label, S_IRUGO, amdgpu_hwmon_show_mclk_label, NULL, 0);
3443
3444 static struct attribute *hwmon_attributes[] = {
3445         &sensor_dev_attr_temp1_input.dev_attr.attr,
3446         &sensor_dev_attr_temp1_crit.dev_attr.attr,
3447         &sensor_dev_attr_temp1_crit_hyst.dev_attr.attr,
3448         &sensor_dev_attr_temp2_input.dev_attr.attr,
3449         &sensor_dev_attr_temp2_crit.dev_attr.attr,
3450         &sensor_dev_attr_temp2_crit_hyst.dev_attr.attr,
3451         &sensor_dev_attr_temp3_input.dev_attr.attr,
3452         &sensor_dev_attr_temp3_crit.dev_attr.attr,
3453         &sensor_dev_attr_temp3_crit_hyst.dev_attr.attr,
3454         &sensor_dev_attr_temp1_emergency.dev_attr.attr,
3455         &sensor_dev_attr_temp2_emergency.dev_attr.attr,
3456         &sensor_dev_attr_temp3_emergency.dev_attr.attr,
3457         &sensor_dev_attr_temp1_label.dev_attr.attr,
3458         &sensor_dev_attr_temp2_label.dev_attr.attr,
3459         &sensor_dev_attr_temp3_label.dev_attr.attr,
3460         &sensor_dev_attr_pwm1.dev_attr.attr,
3461         &sensor_dev_attr_pwm1_enable.dev_attr.attr,
3462         &sensor_dev_attr_pwm1_min.dev_attr.attr,
3463         &sensor_dev_attr_pwm1_max.dev_attr.attr,
3464         &sensor_dev_attr_fan1_input.dev_attr.attr,
3465         &sensor_dev_attr_fan1_min.dev_attr.attr,
3466         &sensor_dev_attr_fan1_max.dev_attr.attr,
3467         &sensor_dev_attr_fan1_target.dev_attr.attr,
3468         &sensor_dev_attr_fan1_enable.dev_attr.attr,
3469         &sensor_dev_attr_in0_input.dev_attr.attr,
3470         &sensor_dev_attr_in0_label.dev_attr.attr,
3471         &sensor_dev_attr_in1_input.dev_attr.attr,
3472         &sensor_dev_attr_in1_label.dev_attr.attr,
3473         &sensor_dev_attr_power1_average.dev_attr.attr,
3474         &sensor_dev_attr_power1_input.dev_attr.attr,
3475         &sensor_dev_attr_power1_cap_max.dev_attr.attr,
3476         &sensor_dev_attr_power1_cap_min.dev_attr.attr,
3477         &sensor_dev_attr_power1_cap.dev_attr.attr,
3478         &sensor_dev_attr_power1_cap_default.dev_attr.attr,
3479         &sensor_dev_attr_power1_label.dev_attr.attr,
3480         &sensor_dev_attr_power2_average.dev_attr.attr,
3481         &sensor_dev_attr_power2_cap_max.dev_attr.attr,
3482         &sensor_dev_attr_power2_cap_min.dev_attr.attr,
3483         &sensor_dev_attr_power2_cap.dev_attr.attr,
3484         &sensor_dev_attr_power2_cap_default.dev_attr.attr,
3485         &sensor_dev_attr_power2_label.dev_attr.attr,
3486         &sensor_dev_attr_freq1_input.dev_attr.attr,
3487         &sensor_dev_attr_freq1_label.dev_attr.attr,
3488         &sensor_dev_attr_freq2_input.dev_attr.attr,
3489         &sensor_dev_attr_freq2_label.dev_attr.attr,
3490         NULL
3491 };
3492
3493 static umode_t hwmon_attributes_visible(struct kobject *kobj,
3494                                         struct attribute *attr, int index)
3495 {
3496         struct device *dev = kobj_to_dev(kobj);
3497         struct amdgpu_device *adev = dev_get_drvdata(dev);
3498         umode_t effective_mode = attr->mode;
3499         uint32_t gc_ver = amdgpu_ip_version(adev, GC_HWIP, 0);
3500         uint32_t tmp;
3501
3502         /* under pp one vf mode manage of hwmon attributes is not supported */
3503         if (amdgpu_sriov_is_pp_one_vf(adev))
3504                 effective_mode &= ~S_IWUSR;
3505
3506         /* Skip fan attributes if fan is not present */
3507         if (adev->pm.no_fan && (attr == &sensor_dev_attr_pwm1.dev_attr.attr ||
3508             attr == &sensor_dev_attr_pwm1_enable.dev_attr.attr ||
3509             attr == &sensor_dev_attr_pwm1_max.dev_attr.attr ||
3510             attr == &sensor_dev_attr_pwm1_min.dev_attr.attr ||
3511             attr == &sensor_dev_attr_fan1_input.dev_attr.attr ||
3512             attr == &sensor_dev_attr_fan1_min.dev_attr.attr ||
3513             attr == &sensor_dev_attr_fan1_max.dev_attr.attr ||
3514             attr == &sensor_dev_attr_fan1_target.dev_attr.attr ||
3515             attr == &sensor_dev_attr_fan1_enable.dev_attr.attr))
3516                 return 0;
3517
3518         /* Skip fan attributes on APU */
3519         if ((adev->flags & AMD_IS_APU) &&
3520             (attr == &sensor_dev_attr_pwm1.dev_attr.attr ||
3521              attr == &sensor_dev_attr_pwm1_enable.dev_attr.attr ||
3522              attr == &sensor_dev_attr_pwm1_max.dev_attr.attr ||
3523              attr == &sensor_dev_attr_pwm1_min.dev_attr.attr ||
3524              attr == &sensor_dev_attr_fan1_input.dev_attr.attr ||
3525              attr == &sensor_dev_attr_fan1_min.dev_attr.attr ||
3526              attr == &sensor_dev_attr_fan1_max.dev_attr.attr ||
3527              attr == &sensor_dev_attr_fan1_target.dev_attr.attr ||
3528              attr == &sensor_dev_attr_fan1_enable.dev_attr.attr))
3529                 return 0;
3530
3531         /* Skip crit temp on APU */
3532         if ((((adev->flags & AMD_IS_APU) && (adev->family >= AMDGPU_FAMILY_CZ)) ||
3533             (gc_ver == IP_VERSION(9, 4, 3) || gc_ver == IP_VERSION(9, 4, 4))) &&
3534             (attr == &sensor_dev_attr_temp1_crit.dev_attr.attr ||
3535              attr == &sensor_dev_attr_temp1_crit_hyst.dev_attr.attr))
3536                 return 0;
3537
3538         /* Skip limit attributes if DPM is not enabled */
3539         if (!adev->pm.dpm_enabled &&
3540             (attr == &sensor_dev_attr_temp1_crit.dev_attr.attr ||
3541              attr == &sensor_dev_attr_temp1_crit_hyst.dev_attr.attr ||
3542              attr == &sensor_dev_attr_pwm1.dev_attr.attr ||
3543              attr == &sensor_dev_attr_pwm1_enable.dev_attr.attr ||
3544              attr == &sensor_dev_attr_pwm1_max.dev_attr.attr ||
3545              attr == &sensor_dev_attr_pwm1_min.dev_attr.attr ||
3546              attr == &sensor_dev_attr_fan1_input.dev_attr.attr ||
3547              attr == &sensor_dev_attr_fan1_min.dev_attr.attr ||
3548              attr == &sensor_dev_attr_fan1_max.dev_attr.attr ||
3549              attr == &sensor_dev_attr_fan1_target.dev_attr.attr ||
3550              attr == &sensor_dev_attr_fan1_enable.dev_attr.attr))
3551                 return 0;
3552
3553         /* mask fan attributes if we have no bindings for this asic to expose */
3554         if (((amdgpu_dpm_get_fan_speed_pwm(adev, NULL) == -EOPNOTSUPP) &&
3555               attr == &sensor_dev_attr_pwm1.dev_attr.attr) || /* can't query fan */
3556             ((amdgpu_dpm_get_fan_control_mode(adev, NULL) == -EOPNOTSUPP) &&
3557              attr == &sensor_dev_attr_pwm1_enable.dev_attr.attr)) /* can't query state */
3558                 effective_mode &= ~S_IRUGO;
3559
3560         if (((amdgpu_dpm_set_fan_speed_pwm(adev, U32_MAX) == -EOPNOTSUPP) &&
3561               attr == &sensor_dev_attr_pwm1.dev_attr.attr) || /* can't manage fan */
3562               ((amdgpu_dpm_set_fan_control_mode(adev, U32_MAX) == -EOPNOTSUPP) &&
3563               attr == &sensor_dev_attr_pwm1_enable.dev_attr.attr)) /* can't manage state */
3564                 effective_mode &= ~S_IWUSR;
3565
3566         /* not implemented yet for APUs other than GC 10.3.1 (vangogh) and 9.4.3 */
3567         if (((adev->family == AMDGPU_FAMILY_SI) ||
3568              ((adev->flags & AMD_IS_APU) && (gc_ver != IP_VERSION(10, 3, 1)) &&
3569               (gc_ver != IP_VERSION(9, 4, 3) && gc_ver != IP_VERSION(9, 4, 4)))) &&
3570             (attr == &sensor_dev_attr_power1_cap_max.dev_attr.attr ||
3571              attr == &sensor_dev_attr_power1_cap_min.dev_attr.attr ||
3572              attr == &sensor_dev_attr_power1_cap.dev_attr.attr ||
3573              attr == &sensor_dev_attr_power1_cap_default.dev_attr.attr))
3574                 return 0;
3575
3576         /* not implemented yet for APUs having < GC 9.3.0 (Renoir) */
3577         if (((adev->family == AMDGPU_FAMILY_SI) ||
3578              ((adev->flags & AMD_IS_APU) && (gc_ver < IP_VERSION(9, 3, 0)))) &&
3579             (attr == &sensor_dev_attr_power1_average.dev_attr.attr))
3580                 return 0;
3581
3582         /* not all products support both average and instantaneous */
3583         if (attr == &sensor_dev_attr_power1_average.dev_attr.attr &&
3584             amdgpu_hwmon_get_sensor_generic(adev, AMDGPU_PP_SENSOR_GPU_AVG_POWER, (void *)&tmp) == -EOPNOTSUPP)
3585                 return 0;
3586         if (attr == &sensor_dev_attr_power1_input.dev_attr.attr &&
3587             amdgpu_hwmon_get_sensor_generic(adev, AMDGPU_PP_SENSOR_GPU_INPUT_POWER, (void *)&tmp) == -EOPNOTSUPP)
3588                 return 0;
3589
3590         /* hide max/min values if we can't both query and manage the fan */
3591         if (((amdgpu_dpm_set_fan_speed_pwm(adev, U32_MAX) == -EOPNOTSUPP) &&
3592               (amdgpu_dpm_get_fan_speed_pwm(adev, NULL) == -EOPNOTSUPP) &&
3593               (amdgpu_dpm_set_fan_speed_rpm(adev, U32_MAX) == -EOPNOTSUPP) &&
3594               (amdgpu_dpm_get_fan_speed_rpm(adev, NULL) == -EOPNOTSUPP)) &&
3595             (attr == &sensor_dev_attr_pwm1_max.dev_attr.attr ||
3596              attr == &sensor_dev_attr_pwm1_min.dev_attr.attr))
3597                 return 0;
3598
3599         if ((amdgpu_dpm_set_fan_speed_rpm(adev, U32_MAX) == -EOPNOTSUPP) &&
3600              (amdgpu_dpm_get_fan_speed_rpm(adev, NULL) == -EOPNOTSUPP) &&
3601              (attr == &sensor_dev_attr_fan1_max.dev_attr.attr ||
3602              attr == &sensor_dev_attr_fan1_min.dev_attr.attr))
3603                 return 0;
3604
3605         if ((adev->family == AMDGPU_FAMILY_SI ||        /* not implemented yet */
3606              adev->family == AMDGPU_FAMILY_KV ||        /* not implemented yet */
3607              (gc_ver == IP_VERSION(9, 4, 3) ||
3608               gc_ver == IP_VERSION(9, 4, 4))) &&
3609             (attr == &sensor_dev_attr_in0_input.dev_attr.attr ||
3610              attr == &sensor_dev_attr_in0_label.dev_attr.attr))
3611                 return 0;
3612
3613         /* only APUs other than gc 9,4,3 have vddnb */
3614         if ((!(adev->flags & AMD_IS_APU) ||
3615              (gc_ver == IP_VERSION(9, 4, 3) ||
3616               gc_ver == IP_VERSION(9, 4, 4))) &&
3617             (attr == &sensor_dev_attr_in1_input.dev_attr.attr ||
3618              attr == &sensor_dev_attr_in1_label.dev_attr.attr))
3619                 return 0;
3620
3621         /* no mclk on APUs other than gc 9,4,3*/
3622         if (((adev->flags & AMD_IS_APU) && (gc_ver != IP_VERSION(9, 4, 3))) &&
3623             (attr == &sensor_dev_attr_freq2_input.dev_attr.attr ||
3624              attr == &sensor_dev_attr_freq2_label.dev_attr.attr))
3625                 return 0;
3626
3627         if (((adev->flags & AMD_IS_APU) || gc_ver < IP_VERSION(9, 0, 0)) &&
3628             (gc_ver != IP_VERSION(9, 4, 3) && gc_ver != IP_VERSION(9, 4, 4)) &&
3629             (attr == &sensor_dev_attr_temp2_input.dev_attr.attr ||
3630              attr == &sensor_dev_attr_temp2_label.dev_attr.attr ||
3631              attr == &sensor_dev_attr_temp2_crit.dev_attr.attr ||
3632              attr == &sensor_dev_attr_temp3_input.dev_attr.attr ||
3633              attr == &sensor_dev_attr_temp3_label.dev_attr.attr ||
3634              attr == &sensor_dev_attr_temp3_crit.dev_attr.attr))
3635                 return 0;
3636
3637         /* hotspot temperature for gc 9,4,3*/
3638         if (gc_ver == IP_VERSION(9, 4, 3) ||
3639             gc_ver == IP_VERSION(9, 4, 4)) {
3640                 if (attr == &sensor_dev_attr_temp1_input.dev_attr.attr ||
3641                     attr == &sensor_dev_attr_temp1_emergency.dev_attr.attr ||
3642                     attr == &sensor_dev_attr_temp1_label.dev_attr.attr)
3643                         return 0;
3644
3645                 if (attr == &sensor_dev_attr_temp2_emergency.dev_attr.attr ||
3646                     attr == &sensor_dev_attr_temp3_emergency.dev_attr.attr)
3647                         return attr->mode;
3648         }
3649
3650         /* only SOC15 dGPUs support hotspot and mem temperatures */
3651         if (((adev->flags & AMD_IS_APU) || gc_ver < IP_VERSION(9, 0, 0)) &&
3652             (attr == &sensor_dev_attr_temp2_crit_hyst.dev_attr.attr ||
3653              attr == &sensor_dev_attr_temp3_crit_hyst.dev_attr.attr ||
3654              attr == &sensor_dev_attr_temp1_emergency.dev_attr.attr ||
3655              attr == &sensor_dev_attr_temp2_emergency.dev_attr.attr ||
3656              attr == &sensor_dev_attr_temp3_emergency.dev_attr.attr))
3657                 return 0;
3658
3659         /* only Vangogh has fast PPT limit and power labels */
3660         if (!(gc_ver == IP_VERSION(10, 3, 1)) &&
3661             (attr == &sensor_dev_attr_power2_average.dev_attr.attr ||
3662              attr == &sensor_dev_attr_power2_cap_max.dev_attr.attr ||
3663              attr == &sensor_dev_attr_power2_cap_min.dev_attr.attr ||
3664              attr == &sensor_dev_attr_power2_cap.dev_attr.attr ||
3665              attr == &sensor_dev_attr_power2_cap_default.dev_attr.attr ||
3666              attr == &sensor_dev_attr_power2_label.dev_attr.attr))
3667                 return 0;
3668
3669         return effective_mode;
3670 }
3671
3672 static const struct attribute_group hwmon_attrgroup = {
3673         .attrs = hwmon_attributes,
3674         .is_visible = hwmon_attributes_visible,
3675 };
3676
3677 static const struct attribute_group *hwmon_groups[] = {
3678         &hwmon_attrgroup,
3679         NULL
3680 };
3681
3682 static int amdgpu_retrieve_od_settings(struct amdgpu_device *adev,
3683                                        enum pp_clock_type od_type,
3684                                        char *buf)
3685 {
3686         int size = 0;
3687         int ret;
3688
3689         if (amdgpu_in_reset(adev))
3690                 return -EPERM;
3691         if (adev->in_suspend && !adev->in_runpm)
3692                 return -EPERM;
3693
3694         ret = pm_runtime_get_if_active(adev->dev);
3695         if (ret <= 0)
3696                 return ret ?: -EPERM;
3697
3698         size = amdgpu_dpm_print_clock_levels(adev, od_type, buf);
3699         if (size == 0)
3700                 size = sysfs_emit(buf, "\n");
3701
3702         pm_runtime_put_autosuspend(adev->dev);
3703
3704         return size;
3705 }
3706
3707 static int parse_input_od_command_lines(const char *buf,
3708                                         size_t count,
3709                                         u32 *type,
3710                                         long *params,
3711                                         uint32_t *num_of_params)
3712 {
3713         const char delimiter[3] = {' ', '\n', '\0'};
3714         uint32_t parameter_size = 0;
3715         char buf_cpy[128] = {0};
3716         char *tmp_str, *sub_str;
3717         int ret;
3718
3719         if (count > sizeof(buf_cpy) - 1)
3720                 return -EINVAL;
3721
3722         memcpy(buf_cpy, buf, count);
3723         tmp_str = buf_cpy;
3724
3725         /* skip heading spaces */
3726         while (isspace(*tmp_str))
3727                 tmp_str++;
3728
3729         switch (*tmp_str) {
3730         case 'c':
3731                 *type = PP_OD_COMMIT_DPM_TABLE;
3732                 return 0;
3733         case 'r':
3734                 params[parameter_size] = *type;
3735                 *num_of_params = 1;
3736                 *type = PP_OD_RESTORE_DEFAULT_TABLE;
3737                 return 0;
3738         default:
3739                 break;
3740         }
3741
3742         while ((sub_str = strsep(&tmp_str, delimiter)) != NULL) {
3743                 if (strlen(sub_str) == 0)
3744                         continue;
3745
3746                 ret = kstrtol(sub_str, 0, &params[parameter_size]);
3747                 if (ret)
3748                         return -EINVAL;
3749                 parameter_size++;
3750
3751                 while (isspace(*tmp_str))
3752                         tmp_str++;
3753         }
3754
3755         *num_of_params = parameter_size;
3756
3757         return 0;
3758 }
3759
3760 static int
3761 amdgpu_distribute_custom_od_settings(struct amdgpu_device *adev,
3762                                      enum PP_OD_DPM_TABLE_COMMAND cmd_type,
3763                                      const char *in_buf,
3764                                      size_t count)
3765 {
3766         uint32_t parameter_size = 0;
3767         long parameter[64];
3768         int ret;
3769
3770         if (amdgpu_in_reset(adev))
3771                 return -EPERM;
3772         if (adev->in_suspend && !adev->in_runpm)
3773                 return -EPERM;
3774
3775         ret = parse_input_od_command_lines(in_buf,
3776                                            count,
3777                                            &cmd_type,
3778                                            parameter,
3779                                            &parameter_size);
3780         if (ret)
3781                 return ret;
3782
3783         ret = pm_runtime_resume_and_get(adev->dev);
3784         if (ret < 0)
3785                 return ret;
3786
3787         ret = amdgpu_dpm_odn_edit_dpm_table(adev,
3788                                             cmd_type,
3789                                             parameter,
3790                                             parameter_size);
3791         if (ret)
3792                 goto err_out;
3793
3794         if (cmd_type == PP_OD_COMMIT_DPM_TABLE) {
3795                 ret = amdgpu_dpm_dispatch_task(adev,
3796                                                AMD_PP_TASK_READJUST_POWER_STATE,
3797                                                NULL);
3798                 if (ret)
3799                         goto err_out;
3800         }
3801
3802         pm_runtime_mark_last_busy(adev->dev);
3803         pm_runtime_put_autosuspend(adev->dev);
3804
3805         return count;
3806
3807 err_out:
3808         pm_runtime_mark_last_busy(adev->dev);
3809         pm_runtime_put_autosuspend(adev->dev);
3810
3811         return ret;
3812 }
3813
3814 /**
3815  * DOC: fan_curve
3816  *
3817  * The amdgpu driver provides a sysfs API for checking and adjusting the fan
3818  * control curve line.
3819  *
3820  * Reading back the file shows you the current settings(temperature in Celsius
3821  * degree and fan speed in pwm) applied to every anchor point of the curve line
3822  * and their permitted ranges if changable.
3823  *
3824  * Writing a desired string(with the format like "anchor_point_index temperature
3825  * fan_speed_in_pwm") to the file, change the settings for the specific anchor
3826  * point accordingly.
3827  *
3828  * When you have finished the editing, write "c" (commit) to the file to commit
3829  * your changes.
3830  *
3831  * If you want to reset to the default value, write "r" (reset) to the file to
3832  * reset them
3833  *
3834  * There are two fan control modes supported: auto and manual. With auto mode,
3835  * PMFW handles the fan speed control(how fan speed reacts to ASIC temperature).
3836  * While with manual mode, users can set their own fan curve line as what
3837  * described here. Normally the ASIC is booted up with auto mode. Any
3838  * settings via this interface will switch the fan control to manual mode
3839  * implicitly.
3840  */
3841 static ssize_t fan_curve_show(struct kobject *kobj,
3842                               struct kobj_attribute *attr,
3843                               char *buf)
3844 {
3845         struct od_kobj *container = container_of(kobj, struct od_kobj, kobj);
3846         struct amdgpu_device *adev = (struct amdgpu_device *)container->priv;
3847
3848         return (ssize_t)amdgpu_retrieve_od_settings(adev, OD_FAN_CURVE, buf);
3849 }
3850
3851 static ssize_t fan_curve_store(struct kobject *kobj,
3852                                struct kobj_attribute *attr,
3853                                const char *buf,
3854                                size_t count)
3855 {
3856         struct od_kobj *container = container_of(kobj, struct od_kobj, kobj);
3857         struct amdgpu_device *adev = (struct amdgpu_device *)container->priv;
3858
3859         return (ssize_t)amdgpu_distribute_custom_od_settings(adev,
3860                                                              PP_OD_EDIT_FAN_CURVE,
3861                                                              buf,
3862                                                              count);
3863 }
3864
3865 static umode_t fan_curve_visible(struct amdgpu_device *adev)
3866 {
3867         umode_t umode = 0000;
3868
3869         if (adev->pm.od_feature_mask & OD_OPS_SUPPORT_FAN_CURVE_RETRIEVE)
3870                 umode |= S_IRUSR | S_IRGRP | S_IROTH;
3871
3872         if (adev->pm.od_feature_mask & OD_OPS_SUPPORT_FAN_CURVE_SET)
3873                 umode |= S_IWUSR;
3874
3875         return umode;
3876 }
3877
3878 /**
3879  * DOC: acoustic_limit_rpm_threshold
3880  *
3881  * The amdgpu driver provides a sysfs API for checking and adjusting the
3882  * acoustic limit in RPM for fan control.
3883  *
3884  * Reading back the file shows you the current setting and the permitted
3885  * ranges if changable.
3886  *
3887  * Writing an integer to the file, change the setting accordingly.
3888  *
3889  * When you have finished the editing, write "c" (commit) to the file to commit
3890  * your changes.
3891  *
3892  * If you want to reset to the default value, write "r" (reset) to the file to
3893  * reset them
3894  *
3895  * This setting works under auto fan control mode only. It adjusts the PMFW's
3896  * behavior about the maximum speed in RPM the fan can spin. Setting via this
3897  * interface will switch the fan control to auto mode implicitly.
3898  */
3899 static ssize_t acoustic_limit_threshold_show(struct kobject *kobj,
3900                                              struct kobj_attribute *attr,
3901                                              char *buf)
3902 {
3903         struct od_kobj *container = container_of(kobj, struct od_kobj, kobj);
3904         struct amdgpu_device *adev = (struct amdgpu_device *)container->priv;
3905
3906         return (ssize_t)amdgpu_retrieve_od_settings(adev, OD_ACOUSTIC_LIMIT, buf);
3907 }
3908
3909 static ssize_t acoustic_limit_threshold_store(struct kobject *kobj,
3910                                               struct kobj_attribute *attr,
3911                                               const char *buf,
3912                                               size_t count)
3913 {
3914         struct od_kobj *container = container_of(kobj, struct od_kobj, kobj);
3915         struct amdgpu_device *adev = (struct amdgpu_device *)container->priv;
3916
3917         return (ssize_t)amdgpu_distribute_custom_od_settings(adev,
3918                                                              PP_OD_EDIT_ACOUSTIC_LIMIT,
3919                                                              buf,
3920                                                              count);
3921 }
3922
3923 static umode_t acoustic_limit_threshold_visible(struct amdgpu_device *adev)
3924 {
3925         umode_t umode = 0000;
3926
3927         if (adev->pm.od_feature_mask & OD_OPS_SUPPORT_ACOUSTIC_LIMIT_THRESHOLD_RETRIEVE)
3928                 umode |= S_IRUSR | S_IRGRP | S_IROTH;
3929
3930         if (adev->pm.od_feature_mask & OD_OPS_SUPPORT_ACOUSTIC_LIMIT_THRESHOLD_SET)
3931                 umode |= S_IWUSR;
3932
3933         return umode;
3934 }
3935
3936 /**
3937  * DOC: acoustic_target_rpm_threshold
3938  *
3939  * The amdgpu driver provides a sysfs API for checking and adjusting the
3940  * acoustic target in RPM for fan control.
3941  *
3942  * Reading back the file shows you the current setting and the permitted
3943  * ranges if changable.
3944  *
3945  * Writing an integer to the file, change the setting accordingly.
3946  *
3947  * When you have finished the editing, write "c" (commit) to the file to commit
3948  * your changes.
3949  *
3950  * If you want to reset to the default value, write "r" (reset) to the file to
3951  * reset them
3952  *
3953  * This setting works under auto fan control mode only. It can co-exist with
3954  * other settings which can work also under auto mode. It adjusts the PMFW's
3955  * behavior about the maximum speed in RPM the fan can spin when ASIC
3956  * temperature is not greater than target temperature. Setting via this
3957  * interface will switch the fan control to auto mode implicitly.
3958  */
3959 static ssize_t acoustic_target_threshold_show(struct kobject *kobj,
3960                                               struct kobj_attribute *attr,
3961                                               char *buf)
3962 {
3963         struct od_kobj *container = container_of(kobj, struct od_kobj, kobj);
3964         struct amdgpu_device *adev = (struct amdgpu_device *)container->priv;
3965
3966         return (ssize_t)amdgpu_retrieve_od_settings(adev, OD_ACOUSTIC_TARGET, buf);
3967 }
3968
3969 static ssize_t acoustic_target_threshold_store(struct kobject *kobj,
3970                                                struct kobj_attribute *attr,
3971                                                const char *buf,
3972                                                size_t count)
3973 {
3974         struct od_kobj *container = container_of(kobj, struct od_kobj, kobj);
3975         struct amdgpu_device *adev = (struct amdgpu_device *)container->priv;
3976
3977         return (ssize_t)amdgpu_distribute_custom_od_settings(adev,
3978                                                              PP_OD_EDIT_ACOUSTIC_TARGET,
3979                                                              buf,
3980                                                              count);
3981 }
3982
3983 static umode_t acoustic_target_threshold_visible(struct amdgpu_device *adev)
3984 {
3985         umode_t umode = 0000;
3986
3987         if (adev->pm.od_feature_mask & OD_OPS_SUPPORT_ACOUSTIC_TARGET_THRESHOLD_RETRIEVE)
3988                 umode |= S_IRUSR | S_IRGRP | S_IROTH;
3989
3990         if (adev->pm.od_feature_mask & OD_OPS_SUPPORT_ACOUSTIC_TARGET_THRESHOLD_SET)
3991                 umode |= S_IWUSR;
3992
3993         return umode;
3994 }
3995
3996 /**
3997  * DOC: fan_target_temperature
3998  *
3999  * The amdgpu driver provides a sysfs API for checking and adjusting the
4000  * target tempeature in Celsius degree for fan control.
4001  *
4002  * Reading back the file shows you the current setting and the permitted
4003  * ranges if changable.
4004  *
4005  * Writing an integer to the file, change the setting accordingly.
4006  *
4007  * When you have finished the editing, write "c" (commit) to the file to commit
4008  * your changes.
4009  *
4010  * If you want to reset to the default value, write "r" (reset) to the file to
4011  * reset them
4012  *
4013  * This setting works under auto fan control mode only. It can co-exist with
4014  * other settings which can work also under auto mode. Paring with the
4015  * acoustic_target_rpm_threshold setting, they define the maximum speed in
4016  * RPM the fan can spin when ASIC temperature is not greater than target
4017  * temperature. Setting via this interface will switch the fan control to
4018  * auto mode implicitly.
4019  */
4020 static ssize_t fan_target_temperature_show(struct kobject *kobj,
4021                                            struct kobj_attribute *attr,
4022                                            char *buf)
4023 {
4024         struct od_kobj *container = container_of(kobj, struct od_kobj, kobj);
4025         struct amdgpu_device *adev = (struct amdgpu_device *)container->priv;
4026
4027         return (ssize_t)amdgpu_retrieve_od_settings(adev, OD_FAN_TARGET_TEMPERATURE, buf);
4028 }
4029
4030 static ssize_t fan_target_temperature_store(struct kobject *kobj,
4031                                             struct kobj_attribute *attr,
4032                                             const char *buf,
4033                                             size_t count)
4034 {
4035         struct od_kobj *container = container_of(kobj, struct od_kobj, kobj);
4036         struct amdgpu_device *adev = (struct amdgpu_device *)container->priv;
4037
4038         return (ssize_t)amdgpu_distribute_custom_od_settings(adev,
4039                                                              PP_OD_EDIT_FAN_TARGET_TEMPERATURE,
4040                                                              buf,
4041                                                              count);
4042 }
4043
4044 static umode_t fan_target_temperature_visible(struct amdgpu_device *adev)
4045 {
4046         umode_t umode = 0000;
4047
4048         if (adev->pm.od_feature_mask & OD_OPS_SUPPORT_FAN_TARGET_TEMPERATURE_RETRIEVE)
4049                 umode |= S_IRUSR | S_IRGRP | S_IROTH;
4050
4051         if (adev->pm.od_feature_mask & OD_OPS_SUPPORT_FAN_TARGET_TEMPERATURE_SET)
4052                 umode |= S_IWUSR;
4053
4054         return umode;
4055 }
4056
4057 /**
4058  * DOC: fan_minimum_pwm
4059  *
4060  * The amdgpu driver provides a sysfs API for checking and adjusting the
4061  * minimum fan speed in PWM.
4062  *
4063  * Reading back the file shows you the current setting and the permitted
4064  * ranges if changable.
4065  *
4066  * Writing an integer to the file, change the setting accordingly.
4067  *
4068  * When you have finished the editing, write "c" (commit) to the file to commit
4069  * your changes.
4070  *
4071  * If you want to reset to the default value, write "r" (reset) to the file to
4072  * reset them
4073  *
4074  * This setting works under auto fan control mode only. It can co-exist with
4075  * other settings which can work also under auto mode. It adjusts the PMFW's
4076  * behavior about the minimum fan speed in PWM the fan should spin. Setting
4077  * via this interface will switch the fan control to auto mode implicitly.
4078  */
4079 static ssize_t fan_minimum_pwm_show(struct kobject *kobj,
4080                                     struct kobj_attribute *attr,
4081                                     char *buf)
4082 {
4083         struct od_kobj *container = container_of(kobj, struct od_kobj, kobj);
4084         struct amdgpu_device *adev = (struct amdgpu_device *)container->priv;
4085
4086         return (ssize_t)amdgpu_retrieve_od_settings(adev, OD_FAN_MINIMUM_PWM, buf);
4087 }
4088
4089 static ssize_t fan_minimum_pwm_store(struct kobject *kobj,
4090                                      struct kobj_attribute *attr,
4091                                      const char *buf,
4092                                      size_t count)
4093 {
4094         struct od_kobj *container = container_of(kobj, struct od_kobj, kobj);
4095         struct amdgpu_device *adev = (struct amdgpu_device *)container->priv;
4096
4097         return (ssize_t)amdgpu_distribute_custom_od_settings(adev,
4098                                                              PP_OD_EDIT_FAN_MINIMUM_PWM,
4099                                                              buf,
4100                                                              count);
4101 }
4102
4103 static umode_t fan_minimum_pwm_visible(struct amdgpu_device *adev)
4104 {
4105         umode_t umode = 0000;
4106
4107         if (adev->pm.od_feature_mask & OD_OPS_SUPPORT_FAN_MINIMUM_PWM_RETRIEVE)
4108                 umode |= S_IRUSR | S_IRGRP | S_IROTH;
4109
4110         if (adev->pm.od_feature_mask & OD_OPS_SUPPORT_FAN_MINIMUM_PWM_SET)
4111                 umode |= S_IWUSR;
4112
4113         return umode;
4114 }
4115
4116 /**
4117  * DOC: fan_zero_rpm_enable
4118  *
4119  * The amdgpu driver provides a sysfs API for checking and adjusting the
4120  * zero RPM feature.
4121  *
4122  * Reading back the file shows you the current setting and the permitted
4123  * ranges if changable.
4124  *
4125  * Writing an integer to the file, change the setting accordingly.
4126  *
4127  * When you have finished the editing, write "c" (commit) to the file to commit
4128  * your changes.
4129  *
4130  * If you want to reset to the default value, write "r" (reset) to the file to
4131  * reset them.
4132  */
4133 static ssize_t fan_zero_rpm_enable_show(struct kobject *kobj,
4134                                            struct kobj_attribute *attr,
4135                                            char *buf)
4136 {
4137         struct od_kobj *container = container_of(kobj, struct od_kobj, kobj);
4138         struct amdgpu_device *adev = (struct amdgpu_device *)container->priv;
4139
4140         return (ssize_t)amdgpu_retrieve_od_settings(adev, OD_FAN_ZERO_RPM_ENABLE, buf);
4141 }
4142
4143 static ssize_t fan_zero_rpm_enable_store(struct kobject *kobj,
4144                                             struct kobj_attribute *attr,
4145                                             const char *buf,
4146                                             size_t count)
4147 {
4148         struct od_kobj *container = container_of(kobj, struct od_kobj, kobj);
4149         struct amdgpu_device *adev = (struct amdgpu_device *)container->priv;
4150
4151         return (ssize_t)amdgpu_distribute_custom_od_settings(adev,
4152                                                              PP_OD_EDIT_FAN_ZERO_RPM_ENABLE,
4153                                                              buf,
4154                                                              count);
4155 }
4156
4157 static umode_t fan_zero_rpm_enable_visible(struct amdgpu_device *adev)
4158 {
4159         umode_t umode = 0000;
4160
4161         if (adev->pm.od_feature_mask & OD_OPS_SUPPORT_FAN_ZERO_RPM_ENABLE_RETRIEVE)
4162                 umode |= S_IRUSR | S_IRGRP | S_IROTH;
4163
4164         if (adev->pm.od_feature_mask & OD_OPS_SUPPORT_FAN_ZERO_RPM_ENABLE_SET)
4165                 umode |= S_IWUSR;
4166
4167         return umode;
4168 }
4169
4170 /**
4171  * DOC: fan_zero_rpm_stop_temperature
4172  *
4173  * The amdgpu driver provides a sysfs API for checking and adjusting the
4174  * zero RPM stop temperature feature.
4175  *
4176  * Reading back the file shows you the current setting and the permitted
4177  * ranges if changable.
4178  *
4179  * Writing an integer to the file, change the setting accordingly.
4180  *
4181  * When you have finished the editing, write "c" (commit) to the file to commit
4182  * your changes.
4183  *
4184  * If you want to reset to the default value, write "r" (reset) to the file to
4185  * reset them.
4186  *
4187  * This setting works only if the Zero RPM setting is enabled. It adjusts the
4188  * temperature below which the fan can stop.
4189  */
4190 static ssize_t fan_zero_rpm_stop_temp_show(struct kobject *kobj,
4191                                            struct kobj_attribute *attr,
4192                                            char *buf)
4193 {
4194         struct od_kobj *container = container_of(kobj, struct od_kobj, kobj);
4195         struct amdgpu_device *adev = (struct amdgpu_device *)container->priv;
4196
4197         return (ssize_t)amdgpu_retrieve_od_settings(adev, OD_FAN_ZERO_RPM_STOP_TEMP, buf);
4198 }
4199
4200 static ssize_t fan_zero_rpm_stop_temp_store(struct kobject *kobj,
4201                                             struct kobj_attribute *attr,
4202                                             const char *buf,
4203                                             size_t count)
4204 {
4205         struct od_kobj *container = container_of(kobj, struct od_kobj, kobj);
4206         struct amdgpu_device *adev = (struct amdgpu_device *)container->priv;
4207
4208         return (ssize_t)amdgpu_distribute_custom_od_settings(adev,
4209                                                              PP_OD_EDIT_FAN_ZERO_RPM_STOP_TEMP,
4210                                                              buf,
4211                                                              count);
4212 }
4213
4214 static umode_t fan_zero_rpm_stop_temp_visible(struct amdgpu_device *adev)
4215 {
4216         umode_t umode = 0000;
4217
4218         if (adev->pm.od_feature_mask & OD_OPS_SUPPORT_FAN_ZERO_RPM_STOP_TEMP_RETRIEVE)
4219                 umode |= S_IRUSR | S_IRGRP | S_IROTH;
4220
4221         if (adev->pm.od_feature_mask & OD_OPS_SUPPORT_FAN_ZERO_RPM_STOP_TEMP_SET)
4222                 umode |= S_IWUSR;
4223
4224         return umode;
4225 }
4226
4227 static struct od_feature_set amdgpu_od_set = {
4228         .containers = {
4229                 [0] = {
4230                         .name = "fan_ctrl",
4231                         .sub_feature = {
4232                                 [0] = {
4233                                         .name = "fan_curve",
4234                                         .ops = {
4235                                                 .is_visible = fan_curve_visible,
4236                                                 .show = fan_curve_show,
4237                                                 .store = fan_curve_store,
4238                                         },
4239                                 },
4240                                 [1] = {
4241                                         .name = "acoustic_limit_rpm_threshold",
4242                                         .ops = {
4243                                                 .is_visible = acoustic_limit_threshold_visible,
4244                                                 .show = acoustic_limit_threshold_show,
4245                                                 .store = acoustic_limit_threshold_store,
4246                                         },
4247                                 },
4248                                 [2] = {
4249                                         .name = "acoustic_target_rpm_threshold",
4250                                         .ops = {
4251                                                 .is_visible = acoustic_target_threshold_visible,
4252                                                 .show = acoustic_target_threshold_show,
4253                                                 .store = acoustic_target_threshold_store,
4254                                         },
4255                                 },
4256                                 [3] = {
4257                                         .name = "fan_target_temperature",
4258                                         .ops = {
4259                                                 .is_visible = fan_target_temperature_visible,
4260                                                 .show = fan_target_temperature_show,
4261                                                 .store = fan_target_temperature_store,
4262                                         },
4263                                 },
4264                                 [4] = {
4265                                         .name = "fan_minimum_pwm",
4266                                         .ops = {
4267                                                 .is_visible = fan_minimum_pwm_visible,
4268                                                 .show = fan_minimum_pwm_show,
4269                                                 .store = fan_minimum_pwm_store,
4270                                         },
4271                                 },
4272                                 [5] = {
4273                                         .name = "fan_zero_rpm_enable",
4274                                         .ops = {
4275                                                 .is_visible = fan_zero_rpm_enable_visible,
4276                                                 .show = fan_zero_rpm_enable_show,
4277                                                 .store = fan_zero_rpm_enable_store,
4278                                         },
4279                                 },
4280                                 [6] = {
4281                                         .name = "fan_zero_rpm_stop_temperature",
4282                                         .ops = {
4283                                                 .is_visible = fan_zero_rpm_stop_temp_visible,
4284                                                 .show = fan_zero_rpm_stop_temp_show,
4285                                                 .store = fan_zero_rpm_stop_temp_store,
4286                                         },
4287                                 },
4288                         },
4289                 },
4290         },
4291 };
4292
4293 static void od_kobj_release(struct kobject *kobj)
4294 {
4295         struct od_kobj *od_kobj = container_of(kobj, struct od_kobj, kobj);
4296
4297         kfree(od_kobj);
4298 }
4299
4300 static const struct kobj_type od_ktype = {
4301         .release        = od_kobj_release,
4302         .sysfs_ops      = &kobj_sysfs_ops,
4303 };
4304
4305 static void amdgpu_od_set_fini(struct amdgpu_device *adev)
4306 {
4307         struct od_kobj *container, *container_next;
4308         struct od_attribute *attribute, *attribute_next;
4309
4310         if (list_empty(&adev->pm.od_kobj_list))
4311                 return;
4312
4313         list_for_each_entry_safe(container, container_next,
4314                                  &adev->pm.od_kobj_list, entry) {
4315                 list_del(&container->entry);
4316
4317                 list_for_each_entry_safe(attribute, attribute_next,
4318                                          &container->attribute, entry) {
4319                         list_del(&attribute->entry);
4320                         sysfs_remove_file(&container->kobj,
4321                                           &attribute->attribute.attr);
4322                         kfree(attribute);
4323                 }
4324
4325                 kobject_put(&container->kobj);
4326         }
4327 }
4328
4329 static bool amdgpu_is_od_feature_supported(struct amdgpu_device *adev,
4330                                            struct od_feature_ops *feature_ops)
4331 {
4332         umode_t mode;
4333
4334         if (!feature_ops->is_visible)
4335                 return false;
4336
4337         /*
4338          * If the feature has no user read and write mode set,
4339          * we can assume the feature is actually not supported.(?)
4340          * And the revelant sysfs interface should not be exposed.
4341          */
4342         mode = feature_ops->is_visible(adev);
4343         if (mode & (S_IRUSR | S_IWUSR))
4344                 return true;
4345
4346         return false;
4347 }
4348
4349 static bool amdgpu_od_is_self_contained(struct amdgpu_device *adev,
4350                                         struct od_feature_container *container)
4351 {
4352         int i;
4353
4354         /*
4355          * If there is no valid entry within the container, the container
4356          * is recognized as a self contained container. And the valid entry
4357          * here means it has a valid naming and it is visible/supported by
4358          * the ASIC.
4359          */
4360         for (i = 0; i < ARRAY_SIZE(container->sub_feature); i++) {
4361                 if (container->sub_feature[i].name &&
4362                     amdgpu_is_od_feature_supported(adev,
4363                         &container->sub_feature[i].ops))
4364                         return false;
4365         }
4366
4367         return true;
4368 }
4369
4370 static int amdgpu_od_set_init(struct amdgpu_device *adev)
4371 {
4372         struct od_kobj *top_set, *sub_set;
4373         struct od_attribute *attribute;
4374         struct od_feature_container *container;
4375         struct od_feature_item *feature;
4376         int i, j;
4377         int ret;
4378
4379         /* Setup the top `gpu_od` directory which holds all other OD interfaces */
4380         top_set = kzalloc(sizeof(*top_set), GFP_KERNEL);
4381         if (!top_set)
4382                 return -ENOMEM;
4383         list_add(&top_set->entry, &adev->pm.od_kobj_list);
4384
4385         ret = kobject_init_and_add(&top_set->kobj,
4386                                    &od_ktype,
4387                                    &adev->dev->kobj,
4388                                    "%s",
4389                                    "gpu_od");
4390         if (ret)
4391                 goto err_out;
4392         INIT_LIST_HEAD(&top_set->attribute);
4393         top_set->priv = adev;
4394
4395         for (i = 0; i < ARRAY_SIZE(amdgpu_od_set.containers); i++) {
4396                 container = &amdgpu_od_set.containers[i];
4397
4398                 if (!container->name)
4399                         continue;
4400
4401                 /*
4402                  * If there is valid entries within the container, the container
4403                  * will be presented as a sub directory and all its holding entries
4404                  * will be presented as plain files under it.
4405                  * While if there is no valid entry within the container, the container
4406                  * itself will be presented as a plain file under top `gpu_od` directory.
4407                  */
4408                 if (amdgpu_od_is_self_contained(adev, container)) {
4409                         if (!amdgpu_is_od_feature_supported(adev,
4410                              &container->ops))
4411                                 continue;
4412
4413                         /*
4414                          * The container is presented as a plain file under top `gpu_od`
4415                          * directory.
4416                          */
4417                         attribute = kzalloc(sizeof(*attribute), GFP_KERNEL);
4418                         if (!attribute) {
4419                                 ret = -ENOMEM;
4420                                 goto err_out;
4421                         }
4422                         list_add(&attribute->entry, &top_set->attribute);
4423
4424                         attribute->attribute.attr.mode =
4425                                         container->ops.is_visible(adev);
4426                         attribute->attribute.attr.name = container->name;
4427                         attribute->attribute.show =
4428                                         container->ops.show;
4429                         attribute->attribute.store =
4430                                         container->ops.store;
4431                         ret = sysfs_create_file(&top_set->kobj,
4432                                                 &attribute->attribute.attr);
4433                         if (ret)
4434                                 goto err_out;
4435                 } else {
4436                         /* The container is presented as a sub directory. */
4437                         sub_set = kzalloc(sizeof(*sub_set), GFP_KERNEL);
4438                         if (!sub_set) {
4439                                 ret = -ENOMEM;
4440                                 goto err_out;
4441                         }
4442                         list_add(&sub_set->entry, &adev->pm.od_kobj_list);
4443
4444                         ret = kobject_init_and_add(&sub_set->kobj,
4445                                                    &od_ktype,
4446                                                    &top_set->kobj,
4447                                                    "%s",
4448                                                    container->name);
4449                         if (ret)
4450                                 goto err_out;
4451                         INIT_LIST_HEAD(&sub_set->attribute);
4452                         sub_set->priv = adev;
4453
4454                         for (j = 0; j < ARRAY_SIZE(container->sub_feature); j++) {
4455                                 feature = &container->sub_feature[j];
4456                                 if (!feature->name)
4457                                         continue;
4458
4459                                 if (!amdgpu_is_od_feature_supported(adev,
4460                                      &feature->ops))
4461                                         continue;
4462
4463                                 /*
4464                                  * With the container presented as a sub directory, the entry within
4465                                  * it is presented as a plain file under the sub directory.
4466                                  */
4467                                 attribute = kzalloc(sizeof(*attribute), GFP_KERNEL);
4468                                 if (!attribute) {
4469                                         ret = -ENOMEM;
4470                                         goto err_out;
4471                                 }
4472                                 list_add(&attribute->entry, &sub_set->attribute);
4473
4474                                 attribute->attribute.attr.mode =
4475                                                 feature->ops.is_visible(adev);
4476                                 attribute->attribute.attr.name = feature->name;
4477                                 attribute->attribute.show =
4478                                                 feature->ops.show;
4479                                 attribute->attribute.store =
4480                                                 feature->ops.store;
4481                                 ret = sysfs_create_file(&sub_set->kobj,
4482                                                         &attribute->attribute.attr);
4483                                 if (ret)
4484                                         goto err_out;
4485                         }
4486                 }
4487         }
4488
4489         /*
4490          * If gpu_od is the only member in the list, that means gpu_od is an
4491          * empty directory, so remove it.
4492          */
4493         if (list_is_singular(&adev->pm.od_kobj_list))
4494                 goto err_out;
4495
4496         return 0;
4497
4498 err_out:
4499         amdgpu_od_set_fini(adev);
4500
4501         return ret;
4502 }
4503
4504 int amdgpu_pm_sysfs_init(struct amdgpu_device *adev)
4505 {
4506         enum amdgpu_sriov_vf_mode mode;
4507         uint32_t mask = 0;
4508         int ret;
4509
4510         if (adev->pm.sysfs_initialized)
4511                 return 0;
4512
4513         INIT_LIST_HEAD(&adev->pm.pm_attr_list);
4514
4515         if (adev->pm.dpm_enabled == 0)
4516                 return 0;
4517
4518         mode = amdgpu_virt_get_sriov_vf_mode(adev);
4519
4520         /* under multi-vf mode, the hwmon attributes are all not supported */
4521         if (mode != SRIOV_VF_MODE_MULTI_VF) {
4522                 adev->pm.int_hwmon_dev = hwmon_device_register_with_groups(adev->dev,
4523                                                                         DRIVER_NAME, adev,
4524                                                                         hwmon_groups);
4525                 if (IS_ERR(adev->pm.int_hwmon_dev)) {
4526                         ret = PTR_ERR(adev->pm.int_hwmon_dev);
4527                         dev_err(adev->dev, "Unable to register hwmon device: %d\n", ret);
4528                         return ret;
4529                 }
4530         }
4531
4532         switch (mode) {
4533         case SRIOV_VF_MODE_ONE_VF:
4534                 mask = ATTR_FLAG_ONEVF;
4535                 break;
4536         case SRIOV_VF_MODE_MULTI_VF:
4537                 mask = 0;
4538                 break;
4539         case SRIOV_VF_MODE_BARE_METAL:
4540         default:
4541                 mask = ATTR_FLAG_MASK_ALL;
4542                 break;
4543         }
4544
4545         ret = amdgpu_device_attr_create_groups(adev,
4546                                                amdgpu_device_attrs,
4547                                                ARRAY_SIZE(amdgpu_device_attrs),
4548                                                mask,
4549                                                &adev->pm.pm_attr_list);
4550         if (ret)
4551                 goto err_out0;
4552
4553         if (amdgpu_dpm_is_overdrive_supported(adev)) {
4554                 ret = amdgpu_od_set_init(adev);
4555                 if (ret)
4556                         goto err_out1;
4557         } else if (adev->pm.pp_feature & PP_OVERDRIVE_MASK) {
4558                 dev_info(adev->dev, "overdrive feature is not supported\n");
4559         }
4560
4561         if (amdgpu_dpm_get_pm_policy_info(adev, PP_PM_POLICY_NONE, NULL) !=
4562             -EOPNOTSUPP) {
4563                 ret = devm_device_add_group(adev->dev,
4564                                             &amdgpu_pm_policy_attr_group);
4565                 if (ret)
4566                         goto err_out0;
4567         }
4568
4569         adev->pm.sysfs_initialized = true;
4570
4571         return 0;
4572
4573 err_out1:
4574         amdgpu_device_attr_remove_groups(adev, &adev->pm.pm_attr_list);
4575 err_out0:
4576         if (adev->pm.int_hwmon_dev)
4577                 hwmon_device_unregister(adev->pm.int_hwmon_dev);
4578
4579         return ret;
4580 }
4581
4582 void amdgpu_pm_sysfs_fini(struct amdgpu_device *adev)
4583 {
4584         amdgpu_od_set_fini(adev);
4585
4586         if (adev->pm.int_hwmon_dev)
4587                 hwmon_device_unregister(adev->pm.int_hwmon_dev);
4588
4589         amdgpu_device_attr_remove_groups(adev, &adev->pm.pm_attr_list);
4590 }
4591
4592 /*
4593  * Debugfs info
4594  */
4595 #if defined(CONFIG_DEBUG_FS)
4596
4597 static void amdgpu_debugfs_prints_cpu_info(struct seq_file *m,
4598                                            struct amdgpu_device *adev)
4599 {
4600         uint16_t *p_val;
4601         uint32_t size;
4602         int i;
4603         uint32_t num_cpu_cores = amdgpu_dpm_get_num_cpu_cores(adev);
4604
4605         if (amdgpu_dpm_is_cclk_dpm_supported(adev)) {
4606                 p_val = kcalloc(num_cpu_cores, sizeof(uint16_t),
4607                                 GFP_KERNEL);
4608
4609                 if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_CPU_CLK,
4610                                             (void *)p_val, &size)) {
4611                         for (i = 0; i < num_cpu_cores; i++)
4612                                 seq_printf(m, "\t%u MHz (CPU%d)\n",
4613                                            *(p_val + i), i);
4614                 }
4615
4616                 kfree(p_val);
4617         }
4618 }
4619
4620 static int amdgpu_debugfs_pm_info_pp(struct seq_file *m, struct amdgpu_device *adev)
4621 {
4622         uint32_t mp1_ver = amdgpu_ip_version(adev, MP1_HWIP, 0);
4623         uint32_t gc_ver = amdgpu_ip_version(adev, GC_HWIP, 0);
4624         uint32_t value;
4625         uint64_t value64 = 0;
4626         uint32_t query = 0;
4627         int size;
4628
4629         /* GPU Clocks */
4630         size = sizeof(value);
4631         seq_printf(m, "GFX Clocks and Power:\n");
4632
4633         amdgpu_debugfs_prints_cpu_info(m, adev);
4634
4635         if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_GFX_MCLK, (void *)&value, &size))
4636                 seq_printf(m, "\t%u MHz (MCLK)\n", value/100);
4637         if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_GFX_SCLK, (void *)&value, &size))
4638                 seq_printf(m, "\t%u MHz (SCLK)\n", value/100);
4639         if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_STABLE_PSTATE_SCLK, (void *)&value, &size))
4640                 seq_printf(m, "\t%u MHz (PSTATE_SCLK)\n", value/100);
4641         if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_STABLE_PSTATE_MCLK, (void *)&value, &size))
4642                 seq_printf(m, "\t%u MHz (PSTATE_MCLK)\n", value/100);
4643         if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_VDDGFX, (void *)&value, &size))
4644                 seq_printf(m, "\t%u mV (VDDGFX)\n", value);
4645         if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_VDDNB, (void *)&value, &size))
4646                 seq_printf(m, "\t%u mV (VDDNB)\n", value);
4647         size = sizeof(uint32_t);
4648         if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_GPU_AVG_POWER, (void *)&query, &size)) {
4649                 if (adev->flags & AMD_IS_APU)
4650                         seq_printf(m, "\t%u.%02u W (average SoC including CPU)\n", query >> 8, query & 0xff);
4651                 else
4652                         seq_printf(m, "\t%u.%02u W (average SoC)\n", query >> 8, query & 0xff);
4653         }
4654         size = sizeof(uint32_t);
4655         if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_GPU_INPUT_POWER, (void *)&query, &size)) {
4656                 if (adev->flags & AMD_IS_APU)
4657                         seq_printf(m, "\t%u.%02u W (current SoC including CPU)\n", query >> 8, query & 0xff);
4658                 else
4659                         seq_printf(m, "\t%u.%02u W (current SoC)\n", query >> 8, query & 0xff);
4660         }
4661         size = sizeof(value);
4662         seq_printf(m, "\n");
4663
4664         /* GPU Temp */
4665         if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_GPU_TEMP, (void *)&value, &size))
4666                 seq_printf(m, "GPU Temperature: %u C\n", value/1000);
4667
4668         /* GPU Load */
4669         if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_GPU_LOAD, (void *)&value, &size))
4670                 seq_printf(m, "GPU Load: %u %%\n", value);
4671         /* MEM Load */
4672         if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_MEM_LOAD, (void *)&value, &size))
4673                 seq_printf(m, "MEM Load: %u %%\n", value);
4674         /* VCN Load */
4675         if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_VCN_LOAD, (void *)&value, &size))
4676                 seq_printf(m, "VCN Load: %u %%\n", value);
4677
4678         seq_printf(m, "\n");
4679
4680         /* SMC feature mask */
4681         if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_ENABLED_SMC_FEATURES_MASK, (void *)&value64, &size))
4682                 seq_printf(m, "SMC Feature Mask: 0x%016llx\n", value64);
4683
4684         /* ASICs greater than CHIP_VEGA20 supports these sensors */
4685         if (gc_ver != IP_VERSION(9, 4, 0) && mp1_ver > IP_VERSION(9, 0, 0)) {
4686                 /* VCN clocks */
4687                 if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_VCN_POWER_STATE, (void *)&value, &size)) {
4688                         if (!value) {
4689                                 seq_printf(m, "VCN: Powered down\n");
4690                         } else {
4691                                 seq_printf(m, "VCN: Powered up\n");
4692                                 if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_UVD_DCLK, (void *)&value, &size))
4693                                         seq_printf(m, "\t%u MHz (DCLK)\n", value/100);
4694                                 if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_UVD_VCLK, (void *)&value, &size))
4695                                         seq_printf(m, "\t%u MHz (VCLK)\n", value/100);
4696                         }
4697                 }
4698                 seq_printf(m, "\n");
4699         } else {
4700                 /* UVD clocks */
4701                 if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_UVD_POWER, (void *)&value, &size)) {
4702                         if (!value) {
4703                                 seq_printf(m, "UVD: Powered down\n");
4704                         } else {
4705                                 seq_printf(m, "UVD: Powered up\n");
4706                                 if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_UVD_DCLK, (void *)&value, &size))
4707                                         seq_printf(m, "\t%u MHz (DCLK)\n", value/100);
4708                                 if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_UVD_VCLK, (void *)&value, &size))
4709                                         seq_printf(m, "\t%u MHz (VCLK)\n", value/100);
4710                         }
4711                 }
4712                 seq_printf(m, "\n");
4713
4714                 /* VCE clocks */
4715                 if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_VCE_POWER, (void *)&value, &size)) {
4716                         if (!value) {
4717                                 seq_printf(m, "VCE: Powered down\n");
4718                         } else {
4719                                 seq_printf(m, "VCE: Powered up\n");
4720                                 if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_VCE_ECCLK, (void *)&value, &size))
4721                                         seq_printf(m, "\t%u MHz (ECCLK)\n", value/100);
4722                         }
4723                 }
4724         }
4725
4726         return 0;
4727 }
4728
4729 static const struct cg_flag_name clocks[] = {
4730         {AMD_CG_SUPPORT_GFX_FGCG, "Graphics Fine Grain Clock Gating"},
4731         {AMD_CG_SUPPORT_GFX_MGCG, "Graphics Medium Grain Clock Gating"},
4732         {AMD_CG_SUPPORT_GFX_MGLS, "Graphics Medium Grain memory Light Sleep"},
4733         {AMD_CG_SUPPORT_GFX_CGCG, "Graphics Coarse Grain Clock Gating"},
4734         {AMD_CG_SUPPORT_GFX_CGLS, "Graphics Coarse Grain memory Light Sleep"},
4735         {AMD_CG_SUPPORT_GFX_CGTS, "Graphics Coarse Grain Tree Shader Clock Gating"},
4736         {AMD_CG_SUPPORT_GFX_CGTS_LS, "Graphics Coarse Grain Tree Shader Light Sleep"},
4737         {AMD_CG_SUPPORT_GFX_CP_LS, "Graphics Command Processor Light Sleep"},
4738         {AMD_CG_SUPPORT_GFX_RLC_LS, "Graphics Run List Controller Light Sleep"},
4739         {AMD_CG_SUPPORT_GFX_3D_CGCG, "Graphics 3D Coarse Grain Clock Gating"},
4740         {AMD_CG_SUPPORT_GFX_3D_CGLS, "Graphics 3D Coarse Grain memory Light Sleep"},
4741         {AMD_CG_SUPPORT_MC_LS, "Memory Controller Light Sleep"},
4742         {AMD_CG_SUPPORT_MC_MGCG, "Memory Controller Medium Grain Clock Gating"},
4743         {AMD_CG_SUPPORT_SDMA_LS, "System Direct Memory Access Light Sleep"},
4744         {AMD_CG_SUPPORT_SDMA_MGCG, "System Direct Memory Access Medium Grain Clock Gating"},
4745         {AMD_CG_SUPPORT_BIF_MGCG, "Bus Interface Medium Grain Clock Gating"},
4746         {AMD_CG_SUPPORT_BIF_LS, "Bus Interface Light Sleep"},
4747         {AMD_CG_SUPPORT_UVD_MGCG, "Unified Video Decoder Medium Grain Clock Gating"},
4748         {AMD_CG_SUPPORT_VCE_MGCG, "Video Compression Engine Medium Grain Clock Gating"},
4749         {AMD_CG_SUPPORT_HDP_LS, "Host Data Path Light Sleep"},
4750         {AMD_CG_SUPPORT_HDP_MGCG, "Host Data Path Medium Grain Clock Gating"},
4751         {AMD_CG_SUPPORT_DRM_MGCG, "Digital Right Management Medium Grain Clock Gating"},
4752         {AMD_CG_SUPPORT_DRM_LS, "Digital Right Management Light Sleep"},
4753         {AMD_CG_SUPPORT_ROM_MGCG, "Rom Medium Grain Clock Gating"},
4754         {AMD_CG_SUPPORT_DF_MGCG, "Data Fabric Medium Grain Clock Gating"},
4755         {AMD_CG_SUPPORT_VCN_MGCG, "VCN Medium Grain Clock Gating"},
4756         {AMD_CG_SUPPORT_HDP_DS, "Host Data Path Deep Sleep"},
4757         {AMD_CG_SUPPORT_HDP_SD, "Host Data Path Shutdown"},
4758         {AMD_CG_SUPPORT_IH_CG, "Interrupt Handler Clock Gating"},
4759         {AMD_CG_SUPPORT_JPEG_MGCG, "JPEG Medium Grain Clock Gating"},
4760         {AMD_CG_SUPPORT_REPEATER_FGCG, "Repeater Fine Grain Clock Gating"},
4761         {AMD_CG_SUPPORT_GFX_PERF_CLK, "Perfmon Clock Gating"},
4762         {AMD_CG_SUPPORT_ATHUB_MGCG, "Address Translation Hub Medium Grain Clock Gating"},
4763         {AMD_CG_SUPPORT_ATHUB_LS, "Address Translation Hub Light Sleep"},
4764         {0, NULL},
4765 };
4766
4767 static void amdgpu_parse_cg_state(struct seq_file *m, u64 flags)
4768 {
4769         int i;
4770
4771         for (i = 0; clocks[i].flag; i++)
4772                 seq_printf(m, "\t%s: %s\n", clocks[i].name,
4773                            (flags & clocks[i].flag) ? "On" : "Off");
4774 }
4775
4776 static int amdgpu_debugfs_pm_info_show(struct seq_file *m, void *unused)
4777 {
4778         struct amdgpu_device *adev = (struct amdgpu_device *)m->private;
4779         struct drm_device *dev = adev_to_drm(adev);
4780         u64 flags = 0;
4781         int r;
4782
4783         if (amdgpu_in_reset(adev))
4784                 return -EPERM;
4785         if (adev->in_suspend && !adev->in_runpm)
4786                 return -EPERM;
4787
4788         r = pm_runtime_resume_and_get(dev->dev);
4789         if (r < 0)
4790                 return r;
4791
4792         if (amdgpu_dpm_debugfs_print_current_performance_level(adev, m)) {
4793                 r = amdgpu_debugfs_pm_info_pp(m, adev);
4794                 if (r)
4795                         goto out;
4796         }
4797
4798         amdgpu_device_ip_get_clockgating_state(adev, &flags);
4799
4800         seq_printf(m, "Clock Gating Flags Mask: 0x%llx\n", flags);
4801         amdgpu_parse_cg_state(m, flags);
4802         seq_printf(m, "\n");
4803
4804 out:
4805         pm_runtime_put_autosuspend(dev->dev);
4806
4807         return r;
4808 }
4809
4810 DEFINE_SHOW_ATTRIBUTE(amdgpu_debugfs_pm_info);
4811
4812 /*
4813  * amdgpu_pm_priv_buffer_read - Read memory region allocated to FW
4814  *
4815  * Reads debug memory region allocated to PMFW
4816  */
4817 static ssize_t amdgpu_pm_prv_buffer_read(struct file *f, char __user *buf,
4818                                          size_t size, loff_t *pos)
4819 {
4820         struct amdgpu_device *adev = file_inode(f)->i_private;
4821         size_t smu_prv_buf_size;
4822         void *smu_prv_buf;
4823         int ret = 0;
4824
4825         if (amdgpu_in_reset(adev))
4826                 return -EPERM;
4827         if (adev->in_suspend && !adev->in_runpm)
4828                 return -EPERM;
4829
4830         ret = amdgpu_dpm_get_smu_prv_buf_details(adev, &smu_prv_buf, &smu_prv_buf_size);
4831         if (ret)
4832                 return ret;
4833
4834         if (!smu_prv_buf || !smu_prv_buf_size)
4835                 return -EINVAL;
4836
4837         return simple_read_from_buffer(buf, size, pos, smu_prv_buf,
4838                                        smu_prv_buf_size);
4839 }
4840
4841 static const struct file_operations amdgpu_debugfs_pm_prv_buffer_fops = {
4842         .owner = THIS_MODULE,
4843         .open = simple_open,
4844         .read = amdgpu_pm_prv_buffer_read,
4845         .llseek = default_llseek,
4846 };
4847
4848 #endif
4849
4850 void amdgpu_debugfs_pm_init(struct amdgpu_device *adev)
4851 {
4852 #if defined(CONFIG_DEBUG_FS)
4853         struct drm_minor *minor = adev_to_drm(adev)->primary;
4854         struct dentry *root = minor->debugfs_root;
4855
4856         if (!adev->pm.dpm_enabled)
4857                 return;
4858
4859         debugfs_create_file("amdgpu_pm_info", 0444, root, adev,
4860                             &amdgpu_debugfs_pm_info_fops);
4861
4862         if (adev->pm.smu_prv_buffer_size > 0)
4863                 debugfs_create_file_size("amdgpu_pm_prv_buffer", 0444, root,
4864                                          adev,
4865                                          &amdgpu_debugfs_pm_prv_buffer_fops,
4866                                          adev->pm.smu_prv_buffer_size);
4867
4868         amdgpu_dpm_stb_debug_fs_init(adev);
4869 #endif
4870 }
This page took 0.311654 seconds and 4 git commands to generate.