]> Git Repo - linux.git/blob - drivers/gpu/drm/amd/pm/amdgpu_pm.c
drm/amdgpu: Use function for IP version check
[linux.git] / drivers / gpu / drm / amd / pm / amdgpu_pm.c
1 /*
2  * Copyright 2017 Advanced Micro Devices, Inc.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20  * OTHER DEALINGS IN THE SOFTWARE.
21  *
22  * Authors: Rafał Miłecki <[email protected]>
23  *          Alex Deucher <[email protected]>
24  */
25
26 #include "amdgpu.h"
27 #include "amdgpu_drv.h"
28 #include "amdgpu_pm.h"
29 #include "amdgpu_dpm.h"
30 #include "atom.h"
31 #include <linux/pci.h>
32 #include <linux/hwmon.h>
33 #include <linux/hwmon-sysfs.h>
34 #include <linux/nospec.h>
35 #include <linux/pm_runtime.h>
36 #include <asm/processor.h>
37
38 #define MAX_NUM_OF_FEATURES_PER_SUBSET          8
39 #define MAX_NUM_OF_SUBSETS                      8
40
41 struct od_attribute {
42         struct kobj_attribute   attribute;
43         struct list_head        entry;
44 };
45
46 struct od_kobj {
47         struct kobject          kobj;
48         struct list_head        entry;
49         struct list_head        attribute;
50         void                    *priv;
51 };
52
53 struct od_feature_ops {
54         umode_t (*is_visible)(struct amdgpu_device *adev);
55         ssize_t (*show)(struct kobject *kobj, struct kobj_attribute *attr,
56                         char *buf);
57         ssize_t (*store)(struct kobject *kobj, struct kobj_attribute *attr,
58                          const char *buf, size_t count);
59 };
60
61 struct od_feature_item {
62         const char              *name;
63         struct od_feature_ops   ops;
64 };
65
66 struct od_feature_container {
67         char                            *name;
68         struct od_feature_ops           ops;
69         struct od_feature_item          sub_feature[MAX_NUM_OF_FEATURES_PER_SUBSET];
70 };
71
72 struct od_feature_set {
73         struct od_feature_container     containers[MAX_NUM_OF_SUBSETS];
74 };
75
76 static const struct hwmon_temp_label {
77         enum PP_HWMON_TEMP channel;
78         const char *label;
79 } temp_label[] = {
80         {PP_TEMP_EDGE, "edge"},
81         {PP_TEMP_JUNCTION, "junction"},
82         {PP_TEMP_MEM, "mem"},
83 };
84
85 const char * const amdgpu_pp_profile_name[] = {
86         "BOOTUP_DEFAULT",
87         "3D_FULL_SCREEN",
88         "POWER_SAVING",
89         "VIDEO",
90         "VR",
91         "COMPUTE",
92         "CUSTOM",
93         "WINDOW_3D",
94         "CAPPED",
95         "UNCAPPED",
96 };
97
98 /**
99  * DOC: power_dpm_state
100  *
101  * The power_dpm_state file is a legacy interface and is only provided for
102  * backwards compatibility. The amdgpu driver provides a sysfs API for adjusting
103  * certain power related parameters.  The file power_dpm_state is used for this.
104  * It accepts the following arguments:
105  *
106  * - battery
107  *
108  * - balanced
109  *
110  * - performance
111  *
112  * battery
113  *
114  * On older GPUs, the vbios provided a special power state for battery
115  * operation.  Selecting battery switched to this state.  This is no
116  * longer provided on newer GPUs so the option does nothing in that case.
117  *
118  * balanced
119  *
120  * On older GPUs, the vbios provided a special power state for balanced
121  * operation.  Selecting balanced switched to this state.  This is no
122  * longer provided on newer GPUs so the option does nothing in that case.
123  *
124  * performance
125  *
126  * On older GPUs, the vbios provided a special power state for performance
127  * operation.  Selecting performance switched to this state.  This is no
128  * longer provided on newer GPUs so the option does nothing in that case.
129  *
130  */
131
132 static ssize_t amdgpu_get_power_dpm_state(struct device *dev,
133                                           struct device_attribute *attr,
134                                           char *buf)
135 {
136         struct drm_device *ddev = dev_get_drvdata(dev);
137         struct amdgpu_device *adev = drm_to_adev(ddev);
138         enum amd_pm_state_type pm;
139         int ret;
140
141         if (amdgpu_in_reset(adev))
142                 return -EPERM;
143         if (adev->in_suspend && !adev->in_runpm)
144                 return -EPERM;
145
146         ret = pm_runtime_get_sync(ddev->dev);
147         if (ret < 0) {
148                 pm_runtime_put_autosuspend(ddev->dev);
149                 return ret;
150         }
151
152         amdgpu_dpm_get_current_power_state(adev, &pm);
153
154         pm_runtime_mark_last_busy(ddev->dev);
155         pm_runtime_put_autosuspend(ddev->dev);
156
157         return sysfs_emit(buf, "%s\n",
158                           (pm == POWER_STATE_TYPE_BATTERY) ? "battery" :
159                           (pm == POWER_STATE_TYPE_BALANCED) ? "balanced" : "performance");
160 }
161
162 static ssize_t amdgpu_set_power_dpm_state(struct device *dev,
163                                           struct device_attribute *attr,
164                                           const char *buf,
165                                           size_t count)
166 {
167         struct drm_device *ddev = dev_get_drvdata(dev);
168         struct amdgpu_device *adev = drm_to_adev(ddev);
169         enum amd_pm_state_type  state;
170         int ret;
171
172         if (amdgpu_in_reset(adev))
173                 return -EPERM;
174         if (adev->in_suspend && !adev->in_runpm)
175                 return -EPERM;
176
177         if (strncmp("battery", buf, strlen("battery")) == 0)
178                 state = POWER_STATE_TYPE_BATTERY;
179         else if (strncmp("balanced", buf, strlen("balanced")) == 0)
180                 state = POWER_STATE_TYPE_BALANCED;
181         else if (strncmp("performance", buf, strlen("performance")) == 0)
182                 state = POWER_STATE_TYPE_PERFORMANCE;
183         else
184                 return -EINVAL;
185
186         ret = pm_runtime_get_sync(ddev->dev);
187         if (ret < 0) {
188                 pm_runtime_put_autosuspend(ddev->dev);
189                 return ret;
190         }
191
192         amdgpu_dpm_set_power_state(adev, state);
193
194         pm_runtime_mark_last_busy(ddev->dev);
195         pm_runtime_put_autosuspend(ddev->dev);
196
197         return count;
198 }
199
200
201 /**
202  * DOC: power_dpm_force_performance_level
203  *
204  * The amdgpu driver provides a sysfs API for adjusting certain power
205  * related parameters.  The file power_dpm_force_performance_level is
206  * used for this.  It accepts the following arguments:
207  *
208  * - auto
209  *
210  * - low
211  *
212  * - high
213  *
214  * - manual
215  *
216  * - profile_standard
217  *
218  * - profile_min_sclk
219  *
220  * - profile_min_mclk
221  *
222  * - profile_peak
223  *
224  * auto
225  *
226  * When auto is selected, the driver will attempt to dynamically select
227  * the optimal power profile for current conditions in the driver.
228  *
229  * low
230  *
231  * When low is selected, the clocks are forced to the lowest power state.
232  *
233  * high
234  *
235  * When high is selected, the clocks are forced to the highest power state.
236  *
237  * manual
238  *
239  * When manual is selected, the user can manually adjust which power states
240  * are enabled for each clock domain via the sysfs pp_dpm_mclk, pp_dpm_sclk,
241  * and pp_dpm_pcie files and adjust the power state transition heuristics
242  * via the pp_power_profile_mode sysfs file.
243  *
244  * profile_standard
245  * profile_min_sclk
246  * profile_min_mclk
247  * profile_peak
248  *
249  * When the profiling modes are selected, clock and power gating are
250  * disabled and the clocks are set for different profiling cases. This
251  * mode is recommended for profiling specific work loads where you do
252  * not want clock or power gating for clock fluctuation to interfere
253  * with your results. profile_standard sets the clocks to a fixed clock
254  * level which varies from asic to asic.  profile_min_sclk forces the sclk
255  * to the lowest level.  profile_min_mclk forces the mclk to the lowest level.
256  * profile_peak sets all clocks (mclk, sclk, pcie) to the highest levels.
257  *
258  */
259
260 static ssize_t amdgpu_get_power_dpm_force_performance_level(struct device *dev,
261                                                             struct device_attribute *attr,
262                                                             char *buf)
263 {
264         struct drm_device *ddev = dev_get_drvdata(dev);
265         struct amdgpu_device *adev = drm_to_adev(ddev);
266         enum amd_dpm_forced_level level = 0xff;
267         int ret;
268
269         if (amdgpu_in_reset(adev))
270                 return -EPERM;
271         if (adev->in_suspend && !adev->in_runpm)
272                 return -EPERM;
273
274         ret = pm_runtime_get_sync(ddev->dev);
275         if (ret < 0) {
276                 pm_runtime_put_autosuspend(ddev->dev);
277                 return ret;
278         }
279
280         level = amdgpu_dpm_get_performance_level(adev);
281
282         pm_runtime_mark_last_busy(ddev->dev);
283         pm_runtime_put_autosuspend(ddev->dev);
284
285         return sysfs_emit(buf, "%s\n",
286                           (level == AMD_DPM_FORCED_LEVEL_AUTO) ? "auto" :
287                           (level == AMD_DPM_FORCED_LEVEL_LOW) ? "low" :
288                           (level == AMD_DPM_FORCED_LEVEL_HIGH) ? "high" :
289                           (level == AMD_DPM_FORCED_LEVEL_MANUAL) ? "manual" :
290                           (level == AMD_DPM_FORCED_LEVEL_PROFILE_STANDARD) ? "profile_standard" :
291                           (level == AMD_DPM_FORCED_LEVEL_PROFILE_MIN_SCLK) ? "profile_min_sclk" :
292                           (level == AMD_DPM_FORCED_LEVEL_PROFILE_MIN_MCLK) ? "profile_min_mclk" :
293                           (level == AMD_DPM_FORCED_LEVEL_PROFILE_PEAK) ? "profile_peak" :
294                           (level == AMD_DPM_FORCED_LEVEL_PERF_DETERMINISM) ? "perf_determinism" :
295                           "unknown");
296 }
297
298 static ssize_t amdgpu_set_power_dpm_force_performance_level(struct device *dev,
299                                                             struct device_attribute *attr,
300                                                             const char *buf,
301                                                             size_t count)
302 {
303         struct drm_device *ddev = dev_get_drvdata(dev);
304         struct amdgpu_device *adev = drm_to_adev(ddev);
305         enum amd_dpm_forced_level level;
306         int ret = 0;
307
308         if (amdgpu_in_reset(adev))
309                 return -EPERM;
310         if (adev->in_suspend && !adev->in_runpm)
311                 return -EPERM;
312
313         if (strncmp("low", buf, strlen("low")) == 0) {
314                 level = AMD_DPM_FORCED_LEVEL_LOW;
315         } else if (strncmp("high", buf, strlen("high")) == 0) {
316                 level = AMD_DPM_FORCED_LEVEL_HIGH;
317         } else if (strncmp("auto", buf, strlen("auto")) == 0) {
318                 level = AMD_DPM_FORCED_LEVEL_AUTO;
319         } else if (strncmp("manual", buf, strlen("manual")) == 0) {
320                 level = AMD_DPM_FORCED_LEVEL_MANUAL;
321         } else if (strncmp("profile_exit", buf, strlen("profile_exit")) == 0) {
322                 level = AMD_DPM_FORCED_LEVEL_PROFILE_EXIT;
323         } else if (strncmp("profile_standard", buf, strlen("profile_standard")) == 0) {
324                 level = AMD_DPM_FORCED_LEVEL_PROFILE_STANDARD;
325         } else if (strncmp("profile_min_sclk", buf, strlen("profile_min_sclk")) == 0) {
326                 level = AMD_DPM_FORCED_LEVEL_PROFILE_MIN_SCLK;
327         } else if (strncmp("profile_min_mclk", buf, strlen("profile_min_mclk")) == 0) {
328                 level = AMD_DPM_FORCED_LEVEL_PROFILE_MIN_MCLK;
329         } else if (strncmp("profile_peak", buf, strlen("profile_peak")) == 0) {
330                 level = AMD_DPM_FORCED_LEVEL_PROFILE_PEAK;
331         } else if (strncmp("perf_determinism", buf, strlen("perf_determinism")) == 0) {
332                 level = AMD_DPM_FORCED_LEVEL_PERF_DETERMINISM;
333         }  else {
334                 return -EINVAL;
335         }
336
337         ret = pm_runtime_get_sync(ddev->dev);
338         if (ret < 0) {
339                 pm_runtime_put_autosuspend(ddev->dev);
340                 return ret;
341         }
342
343         mutex_lock(&adev->pm.stable_pstate_ctx_lock);
344         if (amdgpu_dpm_force_performance_level(adev, level)) {
345                 pm_runtime_mark_last_busy(ddev->dev);
346                 pm_runtime_put_autosuspend(ddev->dev);
347                 mutex_unlock(&adev->pm.stable_pstate_ctx_lock);
348                 return -EINVAL;
349         }
350         /* override whatever a user ctx may have set */
351         adev->pm.stable_pstate_ctx = NULL;
352         mutex_unlock(&adev->pm.stable_pstate_ctx_lock);
353
354         pm_runtime_mark_last_busy(ddev->dev);
355         pm_runtime_put_autosuspend(ddev->dev);
356
357         return count;
358 }
359
360 static ssize_t amdgpu_get_pp_num_states(struct device *dev,
361                 struct device_attribute *attr,
362                 char *buf)
363 {
364         struct drm_device *ddev = dev_get_drvdata(dev);
365         struct amdgpu_device *adev = drm_to_adev(ddev);
366         struct pp_states_info data;
367         uint32_t i;
368         int buf_len, ret;
369
370         if (amdgpu_in_reset(adev))
371                 return -EPERM;
372         if (adev->in_suspend && !adev->in_runpm)
373                 return -EPERM;
374
375         ret = pm_runtime_get_sync(ddev->dev);
376         if (ret < 0) {
377                 pm_runtime_put_autosuspend(ddev->dev);
378                 return ret;
379         }
380
381         if (amdgpu_dpm_get_pp_num_states(adev, &data))
382                 memset(&data, 0, sizeof(data));
383
384         pm_runtime_mark_last_busy(ddev->dev);
385         pm_runtime_put_autosuspend(ddev->dev);
386
387         buf_len = sysfs_emit(buf, "states: %d\n", data.nums);
388         for (i = 0; i < data.nums; i++)
389                 buf_len += sysfs_emit_at(buf, buf_len, "%d %s\n", i,
390                                 (data.states[i] == POWER_STATE_TYPE_INTERNAL_BOOT) ? "boot" :
391                                 (data.states[i] == POWER_STATE_TYPE_BATTERY) ? "battery" :
392                                 (data.states[i] == POWER_STATE_TYPE_BALANCED) ? "balanced" :
393                                 (data.states[i] == POWER_STATE_TYPE_PERFORMANCE) ? "performance" : "default");
394
395         return buf_len;
396 }
397
398 static ssize_t amdgpu_get_pp_cur_state(struct device *dev,
399                 struct device_attribute *attr,
400                 char *buf)
401 {
402         struct drm_device *ddev = dev_get_drvdata(dev);
403         struct amdgpu_device *adev = drm_to_adev(ddev);
404         struct pp_states_info data = {0};
405         enum amd_pm_state_type pm = 0;
406         int i = 0, ret = 0;
407
408         if (amdgpu_in_reset(adev))
409                 return -EPERM;
410         if (adev->in_suspend && !adev->in_runpm)
411                 return -EPERM;
412
413         ret = pm_runtime_get_sync(ddev->dev);
414         if (ret < 0) {
415                 pm_runtime_put_autosuspend(ddev->dev);
416                 return ret;
417         }
418
419         amdgpu_dpm_get_current_power_state(adev, &pm);
420
421         ret = amdgpu_dpm_get_pp_num_states(adev, &data);
422
423         pm_runtime_mark_last_busy(ddev->dev);
424         pm_runtime_put_autosuspend(ddev->dev);
425
426         if (ret)
427                 return ret;
428
429         for (i = 0; i < data.nums; i++) {
430                 if (pm == data.states[i])
431                         break;
432         }
433
434         if (i == data.nums)
435                 i = -EINVAL;
436
437         return sysfs_emit(buf, "%d\n", i);
438 }
439
440 static ssize_t amdgpu_get_pp_force_state(struct device *dev,
441                 struct device_attribute *attr,
442                 char *buf)
443 {
444         struct drm_device *ddev = dev_get_drvdata(dev);
445         struct amdgpu_device *adev = drm_to_adev(ddev);
446
447         if (amdgpu_in_reset(adev))
448                 return -EPERM;
449         if (adev->in_suspend && !adev->in_runpm)
450                 return -EPERM;
451
452         if (adev->pm.pp_force_state_enabled)
453                 return amdgpu_get_pp_cur_state(dev, attr, buf);
454         else
455                 return sysfs_emit(buf, "\n");
456 }
457
458 static ssize_t amdgpu_set_pp_force_state(struct device *dev,
459                 struct device_attribute *attr,
460                 const char *buf,
461                 size_t count)
462 {
463         struct drm_device *ddev = dev_get_drvdata(dev);
464         struct amdgpu_device *adev = drm_to_adev(ddev);
465         enum amd_pm_state_type state = 0;
466         struct pp_states_info data;
467         unsigned long idx;
468         int ret;
469
470         if (amdgpu_in_reset(adev))
471                 return -EPERM;
472         if (adev->in_suspend && !adev->in_runpm)
473                 return -EPERM;
474
475         adev->pm.pp_force_state_enabled = false;
476
477         if (strlen(buf) == 1)
478                 return count;
479
480         ret = kstrtoul(buf, 0, &idx);
481         if (ret || idx >= ARRAY_SIZE(data.states))
482                 return -EINVAL;
483
484         idx = array_index_nospec(idx, ARRAY_SIZE(data.states));
485
486         ret = pm_runtime_get_sync(ddev->dev);
487         if (ret < 0) {
488                 pm_runtime_put_autosuspend(ddev->dev);
489                 return ret;
490         }
491
492         ret = amdgpu_dpm_get_pp_num_states(adev, &data);
493         if (ret)
494                 goto err_out;
495
496         state = data.states[idx];
497
498         /* only set user selected power states */
499         if (state != POWER_STATE_TYPE_INTERNAL_BOOT &&
500             state != POWER_STATE_TYPE_DEFAULT) {
501                 ret = amdgpu_dpm_dispatch_task(adev,
502                                 AMD_PP_TASK_ENABLE_USER_STATE, &state);
503                 if (ret)
504                         goto err_out;
505
506                 adev->pm.pp_force_state_enabled = true;
507         }
508
509         pm_runtime_mark_last_busy(ddev->dev);
510         pm_runtime_put_autosuspend(ddev->dev);
511
512         return count;
513
514 err_out:
515         pm_runtime_mark_last_busy(ddev->dev);
516         pm_runtime_put_autosuspend(ddev->dev);
517         return ret;
518 }
519
520 /**
521  * DOC: pp_table
522  *
523  * The amdgpu driver provides a sysfs API for uploading new powerplay
524  * tables.  The file pp_table is used for this.  Reading the file
525  * will dump the current power play table.  Writing to the file
526  * will attempt to upload a new powerplay table and re-initialize
527  * powerplay using that new table.
528  *
529  */
530
531 static ssize_t amdgpu_get_pp_table(struct device *dev,
532                 struct device_attribute *attr,
533                 char *buf)
534 {
535         struct drm_device *ddev = dev_get_drvdata(dev);
536         struct amdgpu_device *adev = drm_to_adev(ddev);
537         char *table = NULL;
538         int size, ret;
539
540         if (amdgpu_in_reset(adev))
541                 return -EPERM;
542         if (adev->in_suspend && !adev->in_runpm)
543                 return -EPERM;
544
545         ret = pm_runtime_get_sync(ddev->dev);
546         if (ret < 0) {
547                 pm_runtime_put_autosuspend(ddev->dev);
548                 return ret;
549         }
550
551         size = amdgpu_dpm_get_pp_table(adev, &table);
552
553         pm_runtime_mark_last_busy(ddev->dev);
554         pm_runtime_put_autosuspend(ddev->dev);
555
556         if (size <= 0)
557                 return size;
558
559         if (size >= PAGE_SIZE)
560                 size = PAGE_SIZE - 1;
561
562         memcpy(buf, table, size);
563
564         return size;
565 }
566
567 static ssize_t amdgpu_set_pp_table(struct device *dev,
568                 struct device_attribute *attr,
569                 const char *buf,
570                 size_t count)
571 {
572         struct drm_device *ddev = dev_get_drvdata(dev);
573         struct amdgpu_device *adev = drm_to_adev(ddev);
574         int ret = 0;
575
576         if (amdgpu_in_reset(adev))
577                 return -EPERM;
578         if (adev->in_suspend && !adev->in_runpm)
579                 return -EPERM;
580
581         ret = pm_runtime_get_sync(ddev->dev);
582         if (ret < 0) {
583                 pm_runtime_put_autosuspend(ddev->dev);
584                 return ret;
585         }
586
587         ret = amdgpu_dpm_set_pp_table(adev, buf, count);
588
589         pm_runtime_mark_last_busy(ddev->dev);
590         pm_runtime_put_autosuspend(ddev->dev);
591
592         if (ret)
593                 return ret;
594
595         return count;
596 }
597
598 /**
599  * DOC: pp_od_clk_voltage
600  *
601  * The amdgpu driver provides a sysfs API for adjusting the clocks and voltages
602  * in each power level within a power state.  The pp_od_clk_voltage is used for
603  * this.
604  *
605  * Note that the actual memory controller clock rate are exposed, not
606  * the effective memory clock of the DRAMs. To translate it, use the
607  * following formula:
608  *
609  * Clock conversion (Mhz):
610  *
611  * HBM: effective_memory_clock = memory_controller_clock * 1
612  *
613  * G5: effective_memory_clock = memory_controller_clock * 1
614  *
615  * G6: effective_memory_clock = memory_controller_clock * 2
616  *
617  * DRAM data rate (MT/s):
618  *
619  * HBM: effective_memory_clock * 2 = data_rate
620  *
621  * G5: effective_memory_clock * 4 = data_rate
622  *
623  * G6: effective_memory_clock * 8 = data_rate
624  *
625  * Bandwidth (MB/s):
626  *
627  * data_rate * vram_bit_width / 8 = memory_bandwidth
628  *
629  * Some examples:
630  *
631  * G5 on RX460:
632  *
633  * memory_controller_clock = 1750 Mhz
634  *
635  * effective_memory_clock = 1750 Mhz * 1 = 1750 Mhz
636  *
637  * data rate = 1750 * 4 = 7000 MT/s
638  *
639  * memory_bandwidth = 7000 * 128 bits / 8 = 112000 MB/s
640  *
641  * G6 on RX5700:
642  *
643  * memory_controller_clock = 875 Mhz
644  *
645  * effective_memory_clock = 875 Mhz * 2 = 1750 Mhz
646  *
647  * data rate = 1750 * 8 = 14000 MT/s
648  *
649  * memory_bandwidth = 14000 * 256 bits / 8 = 448000 MB/s
650  *
651  * < For Vega10 and previous ASICs >
652  *
653  * Reading the file will display:
654  *
655  * - a list of engine clock levels and voltages labeled OD_SCLK
656  *
657  * - a list of memory clock levels and voltages labeled OD_MCLK
658  *
659  * - a list of valid ranges for sclk, mclk, and voltage labeled OD_RANGE
660  *
661  * To manually adjust these settings, first select manual using
662  * power_dpm_force_performance_level. Enter a new value for each
663  * level by writing a string that contains "s/m level clock voltage" to
664  * the file.  E.g., "s 1 500 820" will update sclk level 1 to be 500 MHz
665  * at 820 mV; "m 0 350 810" will update mclk level 0 to be 350 MHz at
666  * 810 mV.  When you have edited all of the states as needed, write
667  * "c" (commit) to the file to commit your changes.  If you want to reset to the
668  * default power levels, write "r" (reset) to the file to reset them.
669  *
670  *
671  * < For Vega20 and newer ASICs >
672  *
673  * Reading the file will display:
674  *
675  * - minimum and maximum engine clock labeled OD_SCLK
676  *
677  * - minimum(not available for Vega20 and Navi1x) and maximum memory
678  *   clock labeled OD_MCLK
679  *
680  * - three <frequency, voltage> points labeled OD_VDDC_CURVE.
681  *   They can be used to calibrate the sclk voltage curve. This is
682  *   available for Vega20 and NV1X.
683  *
684  * - voltage offset(in mV) applied on target voltage calculation.
685  *   This is available for Sienna Cichlid, Navy Flounder, Dimgrey
686  *   Cavefish and some later SMU13 ASICs. For these ASICs, the target
687  *   voltage calculation can be illustrated by "voltage = voltage
688  *   calculated from v/f curve + overdrive vddgfx offset"
689  *
690  * - a list of valid ranges for sclk, mclk, voltage curve points
691  *   or voltage offset labeled OD_RANGE
692  *
693  * < For APUs >
694  *
695  * Reading the file will display:
696  *
697  * - minimum and maximum engine clock labeled OD_SCLK
698  *
699  * - a list of valid ranges for sclk labeled OD_RANGE
700  *
701  * < For VanGogh >
702  *
703  * Reading the file will display:
704  *
705  * - minimum and maximum engine clock labeled OD_SCLK
706  * - minimum and maximum core clocks labeled OD_CCLK
707  *
708  * - a list of valid ranges for sclk and cclk labeled OD_RANGE
709  *
710  * To manually adjust these settings:
711  *
712  * - First select manual using power_dpm_force_performance_level
713  *
714  * - For clock frequency setting, enter a new value by writing a
715  *   string that contains "s/m index clock" to the file. The index
716  *   should be 0 if to set minimum clock. And 1 if to set maximum
717  *   clock. E.g., "s 0 500" will update minimum sclk to be 500 MHz.
718  *   "m 1 800" will update maximum mclk to be 800Mhz. For core
719  *   clocks on VanGogh, the string contains "p core index clock".
720  *   E.g., "p 2 0 800" would set the minimum core clock on core
721  *   2 to 800Mhz.
722  *
723  *   For sclk voltage curve supported by Vega20 and NV1X, enter the new
724  *   values by writing a string that contains "vc point clock voltage"
725  *   to the file. The points are indexed by 0, 1 and 2. E.g., "vc 0 300
726  *   600" will update point1 with clock set as 300Mhz and voltage as 600mV.
727  *   "vc 2 1000 1000" will update point3 with clock set as 1000Mhz and
728  *   voltage 1000mV.
729  *
730  *   For voltage offset supported by Sienna Cichlid, Navy Flounder, Dimgrey
731  *   Cavefish and some later SMU13 ASICs, enter the new value by writing a
732  *   string that contains "vo offset". E.g., "vo -10" will update the extra
733  *   voltage offset applied to the whole v/f curve line as -10mv.
734  *
735  * - When you have edited all of the states as needed, write "c" (commit)
736  *   to the file to commit your changes
737  *
738  * - If you want to reset to the default power levels, write "r" (reset)
739  *   to the file to reset them
740  *
741  */
742
743 static ssize_t amdgpu_set_pp_od_clk_voltage(struct device *dev,
744                 struct device_attribute *attr,
745                 const char *buf,
746                 size_t count)
747 {
748         struct drm_device *ddev = dev_get_drvdata(dev);
749         struct amdgpu_device *adev = drm_to_adev(ddev);
750         int ret;
751         uint32_t parameter_size = 0;
752         long parameter[64];
753         char buf_cpy[128];
754         char *tmp_str;
755         char *sub_str;
756         const char delimiter[3] = {' ', '\n', '\0'};
757         uint32_t type;
758
759         if (amdgpu_in_reset(adev))
760                 return -EPERM;
761         if (adev->in_suspend && !adev->in_runpm)
762                 return -EPERM;
763
764         if (count > 127)
765                 return -EINVAL;
766
767         if (*buf == 's')
768                 type = PP_OD_EDIT_SCLK_VDDC_TABLE;
769         else if (*buf == 'p')
770                 type = PP_OD_EDIT_CCLK_VDDC_TABLE;
771         else if (*buf == 'm')
772                 type = PP_OD_EDIT_MCLK_VDDC_TABLE;
773         else if (*buf == 'r')
774                 type = PP_OD_RESTORE_DEFAULT_TABLE;
775         else if (*buf == 'c')
776                 type = PP_OD_COMMIT_DPM_TABLE;
777         else if (!strncmp(buf, "vc", 2))
778                 type = PP_OD_EDIT_VDDC_CURVE;
779         else if (!strncmp(buf, "vo", 2))
780                 type = PP_OD_EDIT_VDDGFX_OFFSET;
781         else
782                 return -EINVAL;
783
784         memcpy(buf_cpy, buf, count+1);
785
786         tmp_str = buf_cpy;
787
788         if ((type == PP_OD_EDIT_VDDC_CURVE) ||
789              (type == PP_OD_EDIT_VDDGFX_OFFSET))
790                 tmp_str++;
791         while (isspace(*++tmp_str));
792
793         while ((sub_str = strsep(&tmp_str, delimiter)) != NULL) {
794                 if (strlen(sub_str) == 0)
795                         continue;
796                 ret = kstrtol(sub_str, 0, &parameter[parameter_size]);
797                 if (ret)
798                         return -EINVAL;
799                 parameter_size++;
800
801                 while (isspace(*tmp_str))
802                         tmp_str++;
803         }
804
805         ret = pm_runtime_get_sync(ddev->dev);
806         if (ret < 0) {
807                 pm_runtime_put_autosuspend(ddev->dev);
808                 return ret;
809         }
810
811         if (amdgpu_dpm_set_fine_grain_clk_vol(adev,
812                                               type,
813                                               parameter,
814                                               parameter_size))
815                 goto err_out;
816
817         if (amdgpu_dpm_odn_edit_dpm_table(adev, type,
818                                           parameter, parameter_size))
819                 goto err_out;
820
821         if (type == PP_OD_COMMIT_DPM_TABLE) {
822                 if (amdgpu_dpm_dispatch_task(adev,
823                                              AMD_PP_TASK_READJUST_POWER_STATE,
824                                              NULL))
825                         goto err_out;
826         }
827
828         pm_runtime_mark_last_busy(ddev->dev);
829         pm_runtime_put_autosuspend(ddev->dev);
830
831         return count;
832
833 err_out:
834         pm_runtime_mark_last_busy(ddev->dev);
835         pm_runtime_put_autosuspend(ddev->dev);
836         return -EINVAL;
837 }
838
839 static ssize_t amdgpu_get_pp_od_clk_voltage(struct device *dev,
840                 struct device_attribute *attr,
841                 char *buf)
842 {
843         struct drm_device *ddev = dev_get_drvdata(dev);
844         struct amdgpu_device *adev = drm_to_adev(ddev);
845         int size = 0;
846         int ret;
847         enum pp_clock_type od_clocks[6] = {
848                 OD_SCLK,
849                 OD_MCLK,
850                 OD_VDDC_CURVE,
851                 OD_RANGE,
852                 OD_VDDGFX_OFFSET,
853                 OD_CCLK,
854         };
855         uint clk_index;
856
857         if (amdgpu_in_reset(adev))
858                 return -EPERM;
859         if (adev->in_suspend && !adev->in_runpm)
860                 return -EPERM;
861
862         ret = pm_runtime_get_sync(ddev->dev);
863         if (ret < 0) {
864                 pm_runtime_put_autosuspend(ddev->dev);
865                 return ret;
866         }
867
868         for (clk_index = 0 ; clk_index < 6 ; clk_index++) {
869                 ret = amdgpu_dpm_emit_clock_levels(adev, od_clocks[clk_index], buf, &size);
870                 if (ret)
871                         break;
872         }
873         if (ret == -ENOENT) {
874                 size = amdgpu_dpm_print_clock_levels(adev, OD_SCLK, buf);
875                 size += amdgpu_dpm_print_clock_levels(adev, OD_MCLK, buf + size);
876                 size += amdgpu_dpm_print_clock_levels(adev, OD_VDDC_CURVE, buf + size);
877                 size += amdgpu_dpm_print_clock_levels(adev, OD_VDDGFX_OFFSET, buf + size);
878                 size += amdgpu_dpm_print_clock_levels(adev, OD_RANGE, buf + size);
879                 size += amdgpu_dpm_print_clock_levels(adev, OD_CCLK, buf + size);
880         }
881
882         if (size == 0)
883                 size = sysfs_emit(buf, "\n");
884
885         pm_runtime_mark_last_busy(ddev->dev);
886         pm_runtime_put_autosuspend(ddev->dev);
887
888         return size;
889 }
890
891 /**
892  * DOC: pp_features
893  *
894  * The amdgpu driver provides a sysfs API for adjusting what powerplay
895  * features to be enabled. The file pp_features is used for this. And
896  * this is only available for Vega10 and later dGPUs.
897  *
898  * Reading back the file will show you the followings:
899  * - Current ppfeature masks
900  * - List of the all supported powerplay features with their naming,
901  *   bitmasks and enablement status('Y'/'N' means "enabled"/"disabled").
902  *
903  * To manually enable or disable a specific feature, just set or clear
904  * the corresponding bit from original ppfeature masks and input the
905  * new ppfeature masks.
906  */
907 static ssize_t amdgpu_set_pp_features(struct device *dev,
908                                       struct device_attribute *attr,
909                                       const char *buf,
910                                       size_t count)
911 {
912         struct drm_device *ddev = dev_get_drvdata(dev);
913         struct amdgpu_device *adev = drm_to_adev(ddev);
914         uint64_t featuremask;
915         int ret;
916
917         if (amdgpu_in_reset(adev))
918                 return -EPERM;
919         if (adev->in_suspend && !adev->in_runpm)
920                 return -EPERM;
921
922         ret = kstrtou64(buf, 0, &featuremask);
923         if (ret)
924                 return -EINVAL;
925
926         ret = pm_runtime_get_sync(ddev->dev);
927         if (ret < 0) {
928                 pm_runtime_put_autosuspend(ddev->dev);
929                 return ret;
930         }
931
932         ret = amdgpu_dpm_set_ppfeature_status(adev, featuremask);
933
934         pm_runtime_mark_last_busy(ddev->dev);
935         pm_runtime_put_autosuspend(ddev->dev);
936
937         if (ret)
938                 return -EINVAL;
939
940         return count;
941 }
942
943 static ssize_t amdgpu_get_pp_features(struct device *dev,
944                                       struct device_attribute *attr,
945                                       char *buf)
946 {
947         struct drm_device *ddev = dev_get_drvdata(dev);
948         struct amdgpu_device *adev = drm_to_adev(ddev);
949         ssize_t size;
950         int ret;
951
952         if (amdgpu_in_reset(adev))
953                 return -EPERM;
954         if (adev->in_suspend && !adev->in_runpm)
955                 return -EPERM;
956
957         ret = pm_runtime_get_sync(ddev->dev);
958         if (ret < 0) {
959                 pm_runtime_put_autosuspend(ddev->dev);
960                 return ret;
961         }
962
963         size = amdgpu_dpm_get_ppfeature_status(adev, buf);
964         if (size <= 0)
965                 size = sysfs_emit(buf, "\n");
966
967         pm_runtime_mark_last_busy(ddev->dev);
968         pm_runtime_put_autosuspend(ddev->dev);
969
970         return size;
971 }
972
973 /**
974  * DOC: pp_dpm_sclk pp_dpm_mclk pp_dpm_socclk pp_dpm_fclk pp_dpm_dcefclk pp_dpm_pcie
975  *
976  * The amdgpu driver provides a sysfs API for adjusting what power levels
977  * are enabled for a given power state.  The files pp_dpm_sclk, pp_dpm_mclk,
978  * pp_dpm_socclk, pp_dpm_fclk, pp_dpm_dcefclk and pp_dpm_pcie are used for
979  * this.
980  *
981  * pp_dpm_socclk and pp_dpm_dcefclk interfaces are only available for
982  * Vega10 and later ASICs.
983  * pp_dpm_fclk interface is only available for Vega20 and later ASICs.
984  *
985  * Reading back the files will show you the available power levels within
986  * the power state and the clock information for those levels.
987  *
988  * To manually adjust these states, first select manual using
989  * power_dpm_force_performance_level.
990  * Secondly, enter a new value for each level by inputing a string that
991  * contains " echo xx xx xx > pp_dpm_sclk/mclk/pcie"
992  * E.g.,
993  *
994  * .. code-block:: bash
995  *
996  *      echo "4 5 6" > pp_dpm_sclk
997  *
998  * will enable sclk levels 4, 5, and 6.
999  *
1000  * NOTE: change to the dcefclk max dpm level is not supported now
1001  */
1002
1003 static ssize_t amdgpu_get_pp_dpm_clock(struct device *dev,
1004                 enum pp_clock_type type,
1005                 char *buf)
1006 {
1007         struct drm_device *ddev = dev_get_drvdata(dev);
1008         struct amdgpu_device *adev = drm_to_adev(ddev);
1009         int size = 0;
1010         int ret = 0;
1011
1012         if (amdgpu_in_reset(adev))
1013                 return -EPERM;
1014         if (adev->in_suspend && !adev->in_runpm)
1015                 return -EPERM;
1016
1017         ret = pm_runtime_get_sync(ddev->dev);
1018         if (ret < 0) {
1019                 pm_runtime_put_autosuspend(ddev->dev);
1020                 return ret;
1021         }
1022
1023         ret = amdgpu_dpm_emit_clock_levels(adev, type, buf, &size);
1024         if (ret == -ENOENT)
1025                 size = amdgpu_dpm_print_clock_levels(adev, type, buf);
1026
1027         if (size == 0)
1028                 size = sysfs_emit(buf, "\n");
1029
1030         pm_runtime_mark_last_busy(ddev->dev);
1031         pm_runtime_put_autosuspend(ddev->dev);
1032
1033         return size;
1034 }
1035
1036 /*
1037  * Worst case: 32 bits individually specified, in octal at 12 characters
1038  * per line (+1 for \n).
1039  */
1040 #define AMDGPU_MASK_BUF_MAX     (32 * 13)
1041
1042 static ssize_t amdgpu_read_mask(const char *buf, size_t count, uint32_t *mask)
1043 {
1044         int ret;
1045         unsigned long level;
1046         char *sub_str = NULL;
1047         char *tmp;
1048         char buf_cpy[AMDGPU_MASK_BUF_MAX + 1];
1049         const char delimiter[3] = {' ', '\n', '\0'};
1050         size_t bytes;
1051
1052         *mask = 0;
1053
1054         bytes = min(count, sizeof(buf_cpy) - 1);
1055         memcpy(buf_cpy, buf, bytes);
1056         buf_cpy[bytes] = '\0';
1057         tmp = buf_cpy;
1058         while ((sub_str = strsep(&tmp, delimiter)) != NULL) {
1059                 if (strlen(sub_str)) {
1060                         ret = kstrtoul(sub_str, 0, &level);
1061                         if (ret || level > 31)
1062                                 return -EINVAL;
1063                         *mask |= 1 << level;
1064                 } else
1065                         break;
1066         }
1067
1068         return 0;
1069 }
1070
1071 static ssize_t amdgpu_set_pp_dpm_clock(struct device *dev,
1072                 enum pp_clock_type type,
1073                 const char *buf,
1074                 size_t count)
1075 {
1076         struct drm_device *ddev = dev_get_drvdata(dev);
1077         struct amdgpu_device *adev = drm_to_adev(ddev);
1078         int ret;
1079         uint32_t mask = 0;
1080
1081         if (amdgpu_in_reset(adev))
1082                 return -EPERM;
1083         if (adev->in_suspend && !adev->in_runpm)
1084                 return -EPERM;
1085
1086         ret = amdgpu_read_mask(buf, count, &mask);
1087         if (ret)
1088                 return ret;
1089
1090         ret = pm_runtime_get_sync(ddev->dev);
1091         if (ret < 0) {
1092                 pm_runtime_put_autosuspend(ddev->dev);
1093                 return ret;
1094         }
1095
1096         ret = amdgpu_dpm_force_clock_level(adev, type, mask);
1097
1098         pm_runtime_mark_last_busy(ddev->dev);
1099         pm_runtime_put_autosuspend(ddev->dev);
1100
1101         if (ret)
1102                 return -EINVAL;
1103
1104         return count;
1105 }
1106
1107 static ssize_t amdgpu_get_pp_dpm_sclk(struct device *dev,
1108                 struct device_attribute *attr,
1109                 char *buf)
1110 {
1111         return amdgpu_get_pp_dpm_clock(dev, PP_SCLK, buf);
1112 }
1113
1114 static ssize_t amdgpu_set_pp_dpm_sclk(struct device *dev,
1115                 struct device_attribute *attr,
1116                 const char *buf,
1117                 size_t count)
1118 {
1119         return amdgpu_set_pp_dpm_clock(dev, PP_SCLK, buf, count);
1120 }
1121
1122 static ssize_t amdgpu_get_pp_dpm_mclk(struct device *dev,
1123                 struct device_attribute *attr,
1124                 char *buf)
1125 {
1126         return amdgpu_get_pp_dpm_clock(dev, PP_MCLK, buf);
1127 }
1128
1129 static ssize_t amdgpu_set_pp_dpm_mclk(struct device *dev,
1130                 struct device_attribute *attr,
1131                 const char *buf,
1132                 size_t count)
1133 {
1134         return amdgpu_set_pp_dpm_clock(dev, PP_MCLK, buf, count);
1135 }
1136
1137 static ssize_t amdgpu_get_pp_dpm_socclk(struct device *dev,
1138                 struct device_attribute *attr,
1139                 char *buf)
1140 {
1141         return amdgpu_get_pp_dpm_clock(dev, PP_SOCCLK, buf);
1142 }
1143
1144 static ssize_t amdgpu_set_pp_dpm_socclk(struct device *dev,
1145                 struct device_attribute *attr,
1146                 const char *buf,
1147                 size_t count)
1148 {
1149         return amdgpu_set_pp_dpm_clock(dev, PP_SOCCLK, buf, count);
1150 }
1151
1152 static ssize_t amdgpu_get_pp_dpm_fclk(struct device *dev,
1153                 struct device_attribute *attr,
1154                 char *buf)
1155 {
1156         return amdgpu_get_pp_dpm_clock(dev, PP_FCLK, buf);
1157 }
1158
1159 static ssize_t amdgpu_set_pp_dpm_fclk(struct device *dev,
1160                 struct device_attribute *attr,
1161                 const char *buf,
1162                 size_t count)
1163 {
1164         return amdgpu_set_pp_dpm_clock(dev, PP_FCLK, buf, count);
1165 }
1166
1167 static ssize_t amdgpu_get_pp_dpm_vclk(struct device *dev,
1168                 struct device_attribute *attr,
1169                 char *buf)
1170 {
1171         return amdgpu_get_pp_dpm_clock(dev, PP_VCLK, buf);
1172 }
1173
1174 static ssize_t amdgpu_set_pp_dpm_vclk(struct device *dev,
1175                 struct device_attribute *attr,
1176                 const char *buf,
1177                 size_t count)
1178 {
1179         return amdgpu_set_pp_dpm_clock(dev, PP_VCLK, buf, count);
1180 }
1181
1182 static ssize_t amdgpu_get_pp_dpm_vclk1(struct device *dev,
1183                 struct device_attribute *attr,
1184                 char *buf)
1185 {
1186         return amdgpu_get_pp_dpm_clock(dev, PP_VCLK1, buf);
1187 }
1188
1189 static ssize_t amdgpu_set_pp_dpm_vclk1(struct device *dev,
1190                 struct device_attribute *attr,
1191                 const char *buf,
1192                 size_t count)
1193 {
1194         return amdgpu_set_pp_dpm_clock(dev, PP_VCLK1, buf, count);
1195 }
1196
1197 static ssize_t amdgpu_get_pp_dpm_dclk(struct device *dev,
1198                 struct device_attribute *attr,
1199                 char *buf)
1200 {
1201         return amdgpu_get_pp_dpm_clock(dev, PP_DCLK, buf);
1202 }
1203
1204 static ssize_t amdgpu_set_pp_dpm_dclk(struct device *dev,
1205                 struct device_attribute *attr,
1206                 const char *buf,
1207                 size_t count)
1208 {
1209         return amdgpu_set_pp_dpm_clock(dev, PP_DCLK, buf, count);
1210 }
1211
1212 static ssize_t amdgpu_get_pp_dpm_dclk1(struct device *dev,
1213                 struct device_attribute *attr,
1214                 char *buf)
1215 {
1216         return amdgpu_get_pp_dpm_clock(dev, PP_DCLK1, buf);
1217 }
1218
1219 static ssize_t amdgpu_set_pp_dpm_dclk1(struct device *dev,
1220                 struct device_attribute *attr,
1221                 const char *buf,
1222                 size_t count)
1223 {
1224         return amdgpu_set_pp_dpm_clock(dev, PP_DCLK1, buf, count);
1225 }
1226
1227 static ssize_t amdgpu_get_pp_dpm_dcefclk(struct device *dev,
1228                 struct device_attribute *attr,
1229                 char *buf)
1230 {
1231         return amdgpu_get_pp_dpm_clock(dev, PP_DCEFCLK, buf);
1232 }
1233
1234 static ssize_t amdgpu_set_pp_dpm_dcefclk(struct device *dev,
1235                 struct device_attribute *attr,
1236                 const char *buf,
1237                 size_t count)
1238 {
1239         return amdgpu_set_pp_dpm_clock(dev, PP_DCEFCLK, buf, count);
1240 }
1241
1242 static ssize_t amdgpu_get_pp_dpm_pcie(struct device *dev,
1243                 struct device_attribute *attr,
1244                 char *buf)
1245 {
1246         return amdgpu_get_pp_dpm_clock(dev, PP_PCIE, buf);
1247 }
1248
1249 static ssize_t amdgpu_set_pp_dpm_pcie(struct device *dev,
1250                 struct device_attribute *attr,
1251                 const char *buf,
1252                 size_t count)
1253 {
1254         return amdgpu_set_pp_dpm_clock(dev, PP_PCIE, buf, count);
1255 }
1256
1257 static ssize_t amdgpu_get_pp_sclk_od(struct device *dev,
1258                 struct device_attribute *attr,
1259                 char *buf)
1260 {
1261         struct drm_device *ddev = dev_get_drvdata(dev);
1262         struct amdgpu_device *adev = drm_to_adev(ddev);
1263         uint32_t value = 0;
1264         int ret;
1265
1266         if (amdgpu_in_reset(adev))
1267                 return -EPERM;
1268         if (adev->in_suspend && !adev->in_runpm)
1269                 return -EPERM;
1270
1271         ret = pm_runtime_get_sync(ddev->dev);
1272         if (ret < 0) {
1273                 pm_runtime_put_autosuspend(ddev->dev);
1274                 return ret;
1275         }
1276
1277         value = amdgpu_dpm_get_sclk_od(adev);
1278
1279         pm_runtime_mark_last_busy(ddev->dev);
1280         pm_runtime_put_autosuspend(ddev->dev);
1281
1282         return sysfs_emit(buf, "%d\n", value);
1283 }
1284
1285 static ssize_t amdgpu_set_pp_sclk_od(struct device *dev,
1286                 struct device_attribute *attr,
1287                 const char *buf,
1288                 size_t count)
1289 {
1290         struct drm_device *ddev = dev_get_drvdata(dev);
1291         struct amdgpu_device *adev = drm_to_adev(ddev);
1292         int ret;
1293         long int value;
1294
1295         if (amdgpu_in_reset(adev))
1296                 return -EPERM;
1297         if (adev->in_suspend && !adev->in_runpm)
1298                 return -EPERM;
1299
1300         ret = kstrtol(buf, 0, &value);
1301
1302         if (ret)
1303                 return -EINVAL;
1304
1305         ret = pm_runtime_get_sync(ddev->dev);
1306         if (ret < 0) {
1307                 pm_runtime_put_autosuspend(ddev->dev);
1308                 return ret;
1309         }
1310
1311         amdgpu_dpm_set_sclk_od(adev, (uint32_t)value);
1312
1313         pm_runtime_mark_last_busy(ddev->dev);
1314         pm_runtime_put_autosuspend(ddev->dev);
1315
1316         return count;
1317 }
1318
1319 static ssize_t amdgpu_get_pp_mclk_od(struct device *dev,
1320                 struct device_attribute *attr,
1321                 char *buf)
1322 {
1323         struct drm_device *ddev = dev_get_drvdata(dev);
1324         struct amdgpu_device *adev = drm_to_adev(ddev);
1325         uint32_t value = 0;
1326         int ret;
1327
1328         if (amdgpu_in_reset(adev))
1329                 return -EPERM;
1330         if (adev->in_suspend && !adev->in_runpm)
1331                 return -EPERM;
1332
1333         ret = pm_runtime_get_sync(ddev->dev);
1334         if (ret < 0) {
1335                 pm_runtime_put_autosuspend(ddev->dev);
1336                 return ret;
1337         }
1338
1339         value = amdgpu_dpm_get_mclk_od(adev);
1340
1341         pm_runtime_mark_last_busy(ddev->dev);
1342         pm_runtime_put_autosuspend(ddev->dev);
1343
1344         return sysfs_emit(buf, "%d\n", value);
1345 }
1346
1347 static ssize_t amdgpu_set_pp_mclk_od(struct device *dev,
1348                 struct device_attribute *attr,
1349                 const char *buf,
1350                 size_t count)
1351 {
1352         struct drm_device *ddev = dev_get_drvdata(dev);
1353         struct amdgpu_device *adev = drm_to_adev(ddev);
1354         int ret;
1355         long int value;
1356
1357         if (amdgpu_in_reset(adev))
1358                 return -EPERM;
1359         if (adev->in_suspend && !adev->in_runpm)
1360                 return -EPERM;
1361
1362         ret = kstrtol(buf, 0, &value);
1363
1364         if (ret)
1365                 return -EINVAL;
1366
1367         ret = pm_runtime_get_sync(ddev->dev);
1368         if (ret < 0) {
1369                 pm_runtime_put_autosuspend(ddev->dev);
1370                 return ret;
1371         }
1372
1373         amdgpu_dpm_set_mclk_od(adev, (uint32_t)value);
1374
1375         pm_runtime_mark_last_busy(ddev->dev);
1376         pm_runtime_put_autosuspend(ddev->dev);
1377
1378         return count;
1379 }
1380
1381 /**
1382  * DOC: pp_power_profile_mode
1383  *
1384  * The amdgpu driver provides a sysfs API for adjusting the heuristics
1385  * related to switching between power levels in a power state.  The file
1386  * pp_power_profile_mode is used for this.
1387  *
1388  * Reading this file outputs a list of all of the predefined power profiles
1389  * and the relevant heuristics settings for that profile.
1390  *
1391  * To select a profile or create a custom profile, first select manual using
1392  * power_dpm_force_performance_level.  Writing the number of a predefined
1393  * profile to pp_power_profile_mode will enable those heuristics.  To
1394  * create a custom set of heuristics, write a string of numbers to the file
1395  * starting with the number of the custom profile along with a setting
1396  * for each heuristic parameter.  Due to differences across asic families
1397  * the heuristic parameters vary from family to family.
1398  *
1399  */
1400
1401 static ssize_t amdgpu_get_pp_power_profile_mode(struct device *dev,
1402                 struct device_attribute *attr,
1403                 char *buf)
1404 {
1405         struct drm_device *ddev = dev_get_drvdata(dev);
1406         struct amdgpu_device *adev = drm_to_adev(ddev);
1407         ssize_t size;
1408         int ret;
1409
1410         if (amdgpu_in_reset(adev))
1411                 return -EPERM;
1412         if (adev->in_suspend && !adev->in_runpm)
1413                 return -EPERM;
1414
1415         ret = pm_runtime_get_sync(ddev->dev);
1416         if (ret < 0) {
1417                 pm_runtime_put_autosuspend(ddev->dev);
1418                 return ret;
1419         }
1420
1421         size = amdgpu_dpm_get_power_profile_mode(adev, buf);
1422         if (size <= 0)
1423                 size = sysfs_emit(buf, "\n");
1424
1425         pm_runtime_mark_last_busy(ddev->dev);
1426         pm_runtime_put_autosuspend(ddev->dev);
1427
1428         return size;
1429 }
1430
1431
1432 static ssize_t amdgpu_set_pp_power_profile_mode(struct device *dev,
1433                 struct device_attribute *attr,
1434                 const char *buf,
1435                 size_t count)
1436 {
1437         int ret;
1438         struct drm_device *ddev = dev_get_drvdata(dev);
1439         struct amdgpu_device *adev = drm_to_adev(ddev);
1440         uint32_t parameter_size = 0;
1441         long parameter[64];
1442         char *sub_str, buf_cpy[128];
1443         char *tmp_str;
1444         uint32_t i = 0;
1445         char tmp[2];
1446         long int profile_mode = 0;
1447         const char delimiter[3] = {' ', '\n', '\0'};
1448
1449         if (amdgpu_in_reset(adev))
1450                 return -EPERM;
1451         if (adev->in_suspend && !adev->in_runpm)
1452                 return -EPERM;
1453
1454         tmp[0] = *(buf);
1455         tmp[1] = '\0';
1456         ret = kstrtol(tmp, 0, &profile_mode);
1457         if (ret)
1458                 return -EINVAL;
1459
1460         if (profile_mode == PP_SMC_POWER_PROFILE_CUSTOM) {
1461                 if (count < 2 || count > 127)
1462                         return -EINVAL;
1463                 while (isspace(*++buf))
1464                         i++;
1465                 memcpy(buf_cpy, buf, count-i);
1466                 tmp_str = buf_cpy;
1467                 while ((sub_str = strsep(&tmp_str, delimiter)) != NULL) {
1468                         if (strlen(sub_str) == 0)
1469                                 continue;
1470                         ret = kstrtol(sub_str, 0, &parameter[parameter_size]);
1471                         if (ret)
1472                                 return -EINVAL;
1473                         parameter_size++;
1474                         while (isspace(*tmp_str))
1475                                 tmp_str++;
1476                 }
1477         }
1478         parameter[parameter_size] = profile_mode;
1479
1480         ret = pm_runtime_get_sync(ddev->dev);
1481         if (ret < 0) {
1482                 pm_runtime_put_autosuspend(ddev->dev);
1483                 return ret;
1484         }
1485
1486         ret = amdgpu_dpm_set_power_profile_mode(adev, parameter, parameter_size);
1487
1488         pm_runtime_mark_last_busy(ddev->dev);
1489         pm_runtime_put_autosuspend(ddev->dev);
1490
1491         if (!ret)
1492                 return count;
1493
1494         return -EINVAL;
1495 }
1496
1497 static int amdgpu_hwmon_get_sensor_generic(struct amdgpu_device *adev,
1498                                            enum amd_pp_sensors sensor,
1499                                            void *query)
1500 {
1501         int r, size = sizeof(uint32_t);
1502
1503         if (amdgpu_in_reset(adev))
1504                 return -EPERM;
1505         if (adev->in_suspend && !adev->in_runpm)
1506                 return -EPERM;
1507
1508         r = pm_runtime_get_sync(adev_to_drm(adev)->dev);
1509         if (r < 0) {
1510                 pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
1511                 return r;
1512         }
1513
1514         /* get the sensor value */
1515         r = amdgpu_dpm_read_sensor(adev, sensor, query, &size);
1516
1517         pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
1518         pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
1519
1520         return r;
1521 }
1522
1523 /**
1524  * DOC: gpu_busy_percent
1525  *
1526  * The amdgpu driver provides a sysfs API for reading how busy the GPU
1527  * is as a percentage.  The file gpu_busy_percent is used for this.
1528  * The SMU firmware computes a percentage of load based on the
1529  * aggregate activity level in the IP cores.
1530  */
1531 static ssize_t amdgpu_get_gpu_busy_percent(struct device *dev,
1532                                            struct device_attribute *attr,
1533                                            char *buf)
1534 {
1535         struct drm_device *ddev = dev_get_drvdata(dev);
1536         struct amdgpu_device *adev = drm_to_adev(ddev);
1537         unsigned int value;
1538         int r;
1539
1540         r = amdgpu_hwmon_get_sensor_generic(adev, AMDGPU_PP_SENSOR_GPU_LOAD, &value);
1541         if (r)
1542                 return r;
1543
1544         return sysfs_emit(buf, "%d\n", value);
1545 }
1546
1547 /**
1548  * DOC: mem_busy_percent
1549  *
1550  * The amdgpu driver provides a sysfs API for reading how busy the VRAM
1551  * is as a percentage.  The file mem_busy_percent is used for this.
1552  * The SMU firmware computes a percentage of load based on the
1553  * aggregate activity level in the IP cores.
1554  */
1555 static ssize_t amdgpu_get_mem_busy_percent(struct device *dev,
1556                                            struct device_attribute *attr,
1557                                            char *buf)
1558 {
1559         struct drm_device *ddev = dev_get_drvdata(dev);
1560         struct amdgpu_device *adev = drm_to_adev(ddev);
1561         unsigned int value;
1562         int r;
1563
1564         r = amdgpu_hwmon_get_sensor_generic(adev, AMDGPU_PP_SENSOR_MEM_LOAD, &value);
1565         if (r)
1566                 return r;
1567
1568         return sysfs_emit(buf, "%d\n", value);
1569 }
1570
1571 /**
1572  * DOC: pcie_bw
1573  *
1574  * The amdgpu driver provides a sysfs API for estimating how much data
1575  * has been received and sent by the GPU in the last second through PCIe.
1576  * The file pcie_bw is used for this.
1577  * The Perf counters count the number of received and sent messages and return
1578  * those values, as well as the maximum payload size of a PCIe packet (mps).
1579  * Note that it is not possible to easily and quickly obtain the size of each
1580  * packet transmitted, so we output the max payload size (mps) to allow for
1581  * quick estimation of the PCIe bandwidth usage
1582  */
1583 static ssize_t amdgpu_get_pcie_bw(struct device *dev,
1584                 struct device_attribute *attr,
1585                 char *buf)
1586 {
1587         struct drm_device *ddev = dev_get_drvdata(dev);
1588         struct amdgpu_device *adev = drm_to_adev(ddev);
1589         uint64_t count0 = 0, count1 = 0;
1590         int ret;
1591
1592         if (amdgpu_in_reset(adev))
1593                 return -EPERM;
1594         if (adev->in_suspend && !adev->in_runpm)
1595                 return -EPERM;
1596
1597         if (adev->flags & AMD_IS_APU)
1598                 return -ENODATA;
1599
1600         if (!adev->asic_funcs->get_pcie_usage)
1601                 return -ENODATA;
1602
1603         ret = pm_runtime_get_sync(ddev->dev);
1604         if (ret < 0) {
1605                 pm_runtime_put_autosuspend(ddev->dev);
1606                 return ret;
1607         }
1608
1609         amdgpu_asic_get_pcie_usage(adev, &count0, &count1);
1610
1611         pm_runtime_mark_last_busy(ddev->dev);
1612         pm_runtime_put_autosuspend(ddev->dev);
1613
1614         return sysfs_emit(buf, "%llu %llu %i\n",
1615                           count0, count1, pcie_get_mps(adev->pdev));
1616 }
1617
1618 /**
1619  * DOC: unique_id
1620  *
1621  * The amdgpu driver provides a sysfs API for providing a unique ID for the GPU
1622  * The file unique_id is used for this.
1623  * This will provide a Unique ID that will persist from machine to machine
1624  *
1625  * NOTE: This will only work for GFX9 and newer. This file will be absent
1626  * on unsupported ASICs (GFX8 and older)
1627  */
1628 static ssize_t amdgpu_get_unique_id(struct device *dev,
1629                 struct device_attribute *attr,
1630                 char *buf)
1631 {
1632         struct drm_device *ddev = dev_get_drvdata(dev);
1633         struct amdgpu_device *adev = drm_to_adev(ddev);
1634
1635         if (amdgpu_in_reset(adev))
1636                 return -EPERM;
1637         if (adev->in_suspend && !adev->in_runpm)
1638                 return -EPERM;
1639
1640         if (adev->unique_id)
1641                 return sysfs_emit(buf, "%016llx\n", adev->unique_id);
1642
1643         return 0;
1644 }
1645
1646 /**
1647  * DOC: thermal_throttling_logging
1648  *
1649  * Thermal throttling pulls down the clock frequency and thus the performance.
1650  * It's an useful mechanism to protect the chip from overheating. Since it
1651  * impacts performance, the user controls whether it is enabled and if so,
1652  * the log frequency.
1653  *
1654  * Reading back the file shows you the status(enabled or disabled) and
1655  * the interval(in seconds) between each thermal logging.
1656  *
1657  * Writing an integer to the file, sets a new logging interval, in seconds.
1658  * The value should be between 1 and 3600. If the value is less than 1,
1659  * thermal logging is disabled. Values greater than 3600 are ignored.
1660  */
1661 static ssize_t amdgpu_get_thermal_throttling_logging(struct device *dev,
1662                                                      struct device_attribute *attr,
1663                                                      char *buf)
1664 {
1665         struct drm_device *ddev = dev_get_drvdata(dev);
1666         struct amdgpu_device *adev = drm_to_adev(ddev);
1667
1668         return sysfs_emit(buf, "%s: thermal throttling logging %s, with interval %d seconds\n",
1669                           adev_to_drm(adev)->unique,
1670                           atomic_read(&adev->throttling_logging_enabled) ? "enabled" : "disabled",
1671                           adev->throttling_logging_rs.interval / HZ + 1);
1672 }
1673
1674 static ssize_t amdgpu_set_thermal_throttling_logging(struct device *dev,
1675                                                      struct device_attribute *attr,
1676                                                      const char *buf,
1677                                                      size_t count)
1678 {
1679         struct drm_device *ddev = dev_get_drvdata(dev);
1680         struct amdgpu_device *adev = drm_to_adev(ddev);
1681         long throttling_logging_interval;
1682         unsigned long flags;
1683         int ret = 0;
1684
1685         ret = kstrtol(buf, 0, &throttling_logging_interval);
1686         if (ret)
1687                 return ret;
1688
1689         if (throttling_logging_interval > 3600)
1690                 return -EINVAL;
1691
1692         if (throttling_logging_interval > 0) {
1693                 raw_spin_lock_irqsave(&adev->throttling_logging_rs.lock, flags);
1694                 /*
1695                  * Reset the ratelimit timer internals.
1696                  * This can effectively restart the timer.
1697                  */
1698                 adev->throttling_logging_rs.interval =
1699                         (throttling_logging_interval - 1) * HZ;
1700                 adev->throttling_logging_rs.begin = 0;
1701                 adev->throttling_logging_rs.printed = 0;
1702                 adev->throttling_logging_rs.missed = 0;
1703                 raw_spin_unlock_irqrestore(&adev->throttling_logging_rs.lock, flags);
1704
1705                 atomic_set(&adev->throttling_logging_enabled, 1);
1706         } else {
1707                 atomic_set(&adev->throttling_logging_enabled, 0);
1708         }
1709
1710         return count;
1711 }
1712
1713 /**
1714  * DOC: apu_thermal_cap
1715  *
1716  * The amdgpu driver provides a sysfs API for retrieving/updating thermal
1717  * limit temperature in millidegrees Celsius
1718  *
1719  * Reading back the file shows you core limit value
1720  *
1721  * Writing an integer to the file, sets a new thermal limit. The value
1722  * should be between 0 and 100. If the value is less than 0 or greater
1723  * than 100, then the write request will be ignored.
1724  */
1725 static ssize_t amdgpu_get_apu_thermal_cap(struct device *dev,
1726                                          struct device_attribute *attr,
1727                                          char *buf)
1728 {
1729         int ret, size;
1730         u32 limit;
1731         struct drm_device *ddev = dev_get_drvdata(dev);
1732         struct amdgpu_device *adev = drm_to_adev(ddev);
1733
1734         ret = pm_runtime_get_sync(ddev->dev);
1735         if (ret < 0) {
1736                 pm_runtime_put_autosuspend(ddev->dev);
1737                 return ret;
1738         }
1739
1740         ret = amdgpu_dpm_get_apu_thermal_limit(adev, &limit);
1741         if (!ret)
1742                 size = sysfs_emit(buf, "%u\n", limit);
1743         else
1744                 size = sysfs_emit(buf, "failed to get thermal limit\n");
1745
1746         pm_runtime_mark_last_busy(ddev->dev);
1747         pm_runtime_put_autosuspend(ddev->dev);
1748
1749         return size;
1750 }
1751
1752 static ssize_t amdgpu_set_apu_thermal_cap(struct device *dev,
1753                                          struct device_attribute *attr,
1754                                          const char *buf,
1755                                          size_t count)
1756 {
1757         int ret;
1758         u32 value;
1759         struct drm_device *ddev = dev_get_drvdata(dev);
1760         struct amdgpu_device *adev = drm_to_adev(ddev);
1761
1762         ret = kstrtou32(buf, 10, &value);
1763         if (ret)
1764                 return ret;
1765
1766         if (value > 100) {
1767                 dev_err(dev, "Invalid argument !\n");
1768                 return -EINVAL;
1769         }
1770
1771         ret = pm_runtime_get_sync(ddev->dev);
1772         if (ret < 0) {
1773                 pm_runtime_put_autosuspend(ddev->dev);
1774                 return ret;
1775         }
1776
1777         ret = amdgpu_dpm_set_apu_thermal_limit(adev, value);
1778         if (ret) {
1779                 dev_err(dev, "failed to update thermal limit\n");
1780                 return ret;
1781         }
1782
1783         pm_runtime_mark_last_busy(ddev->dev);
1784         pm_runtime_put_autosuspend(ddev->dev);
1785
1786         return count;
1787 }
1788
1789 /**
1790  * DOC: gpu_metrics
1791  *
1792  * The amdgpu driver provides a sysfs API for retrieving current gpu
1793  * metrics data. The file gpu_metrics is used for this. Reading the
1794  * file will dump all the current gpu metrics data.
1795  *
1796  * These data include temperature, frequency, engines utilization,
1797  * power consume, throttler status, fan speed and cpu core statistics(
1798  * available for APU only). That's it will give a snapshot of all sensors
1799  * at the same time.
1800  */
1801 static ssize_t amdgpu_get_gpu_metrics(struct device *dev,
1802                                       struct device_attribute *attr,
1803                                       char *buf)
1804 {
1805         struct drm_device *ddev = dev_get_drvdata(dev);
1806         struct amdgpu_device *adev = drm_to_adev(ddev);
1807         void *gpu_metrics;
1808         ssize_t size = 0;
1809         int ret;
1810
1811         if (amdgpu_in_reset(adev))
1812                 return -EPERM;
1813         if (adev->in_suspend && !adev->in_runpm)
1814                 return -EPERM;
1815
1816         ret = pm_runtime_get_sync(ddev->dev);
1817         if (ret < 0) {
1818                 pm_runtime_put_autosuspend(ddev->dev);
1819                 return ret;
1820         }
1821
1822         size = amdgpu_dpm_get_gpu_metrics(adev, &gpu_metrics);
1823         if (size <= 0)
1824                 goto out;
1825
1826         if (size >= PAGE_SIZE)
1827                 size = PAGE_SIZE - 1;
1828
1829         memcpy(buf, gpu_metrics, size);
1830
1831 out:
1832         pm_runtime_mark_last_busy(ddev->dev);
1833         pm_runtime_put_autosuspend(ddev->dev);
1834
1835         return size;
1836 }
1837
1838 static int amdgpu_show_powershift_percent(struct device *dev,
1839                                         char *buf, enum amd_pp_sensors sensor)
1840 {
1841         struct drm_device *ddev = dev_get_drvdata(dev);
1842         struct amdgpu_device *adev = drm_to_adev(ddev);
1843         uint32_t ss_power;
1844         int r = 0, i;
1845
1846         r = amdgpu_hwmon_get_sensor_generic(adev, sensor, (void *)&ss_power);
1847         if (r == -EOPNOTSUPP) {
1848                 /* sensor not available on dGPU, try to read from APU */
1849                 adev = NULL;
1850                 mutex_lock(&mgpu_info.mutex);
1851                 for (i = 0; i < mgpu_info.num_gpu; i++) {
1852                         if (mgpu_info.gpu_ins[i].adev->flags & AMD_IS_APU) {
1853                                 adev = mgpu_info.gpu_ins[i].adev;
1854                                 break;
1855                         }
1856                 }
1857                 mutex_unlock(&mgpu_info.mutex);
1858                 if (adev)
1859                         r = amdgpu_hwmon_get_sensor_generic(adev, sensor, (void *)&ss_power);
1860         }
1861
1862         if (r)
1863                 return r;
1864
1865         return sysfs_emit(buf, "%u%%\n", ss_power);
1866 }
1867
1868 /**
1869  * DOC: smartshift_apu_power
1870  *
1871  * The amdgpu driver provides a sysfs API for reporting APU power
1872  * shift in percentage if platform supports smartshift. Value 0 means that
1873  * there is no powershift and values between [1-100] means that the power
1874  * is shifted to APU, the percentage of boost is with respect to APU power
1875  * limit on the platform.
1876  */
1877
1878 static ssize_t amdgpu_get_smartshift_apu_power(struct device *dev, struct device_attribute *attr,
1879                                                char *buf)
1880 {
1881         return amdgpu_show_powershift_percent(dev, buf, AMDGPU_PP_SENSOR_SS_APU_SHARE);
1882 }
1883
1884 /**
1885  * DOC: smartshift_dgpu_power
1886  *
1887  * The amdgpu driver provides a sysfs API for reporting dGPU power
1888  * shift in percentage if platform supports smartshift. Value 0 means that
1889  * there is no powershift and values between [1-100] means that the power is
1890  * shifted to dGPU, the percentage of boost is with respect to dGPU power
1891  * limit on the platform.
1892  */
1893
1894 static ssize_t amdgpu_get_smartshift_dgpu_power(struct device *dev, struct device_attribute *attr,
1895                                                 char *buf)
1896 {
1897         return amdgpu_show_powershift_percent(dev, buf, AMDGPU_PP_SENSOR_SS_DGPU_SHARE);
1898 }
1899
1900 /**
1901  * DOC: smartshift_bias
1902  *
1903  * The amdgpu driver provides a sysfs API for reporting the
1904  * smartshift(SS2.0) bias level. The value ranges from -100 to 100
1905  * and the default is 0. -100 sets maximum preference to APU
1906  * and 100 sets max perference to dGPU.
1907  */
1908
1909 static ssize_t amdgpu_get_smartshift_bias(struct device *dev,
1910                                           struct device_attribute *attr,
1911                                           char *buf)
1912 {
1913         int r = 0;
1914
1915         r = sysfs_emit(buf, "%d\n", amdgpu_smartshift_bias);
1916
1917         return r;
1918 }
1919
1920 static ssize_t amdgpu_set_smartshift_bias(struct device *dev,
1921                                           struct device_attribute *attr,
1922                                           const char *buf, size_t count)
1923 {
1924         struct drm_device *ddev = dev_get_drvdata(dev);
1925         struct amdgpu_device *adev = drm_to_adev(ddev);
1926         int r = 0;
1927         int bias = 0;
1928
1929         if (amdgpu_in_reset(adev))
1930                 return -EPERM;
1931         if (adev->in_suspend && !adev->in_runpm)
1932                 return -EPERM;
1933
1934         r = pm_runtime_get_sync(ddev->dev);
1935         if (r < 0) {
1936                 pm_runtime_put_autosuspend(ddev->dev);
1937                 return r;
1938         }
1939
1940         r = kstrtoint(buf, 10, &bias);
1941         if (r)
1942                 goto out;
1943
1944         if (bias > AMDGPU_SMARTSHIFT_MAX_BIAS)
1945                 bias = AMDGPU_SMARTSHIFT_MAX_BIAS;
1946         else if (bias < AMDGPU_SMARTSHIFT_MIN_BIAS)
1947                 bias = AMDGPU_SMARTSHIFT_MIN_BIAS;
1948
1949         amdgpu_smartshift_bias = bias;
1950         r = count;
1951
1952         /* TODO: update bias level with SMU message */
1953
1954 out:
1955         pm_runtime_mark_last_busy(ddev->dev);
1956         pm_runtime_put_autosuspend(ddev->dev);
1957         return r;
1958 }
1959
1960 static int ss_power_attr_update(struct amdgpu_device *adev, struct amdgpu_device_attr *attr,
1961                                 uint32_t mask, enum amdgpu_device_attr_states *states)
1962 {
1963         if (!amdgpu_device_supports_smart_shift(adev_to_drm(adev)))
1964                 *states = ATTR_STATE_UNSUPPORTED;
1965
1966         return 0;
1967 }
1968
1969 static int ss_bias_attr_update(struct amdgpu_device *adev, struct amdgpu_device_attr *attr,
1970                                uint32_t mask, enum amdgpu_device_attr_states *states)
1971 {
1972         uint32_t ss_power;
1973
1974         if (!amdgpu_device_supports_smart_shift(adev_to_drm(adev)))
1975                 *states = ATTR_STATE_UNSUPPORTED;
1976         else if (amdgpu_hwmon_get_sensor_generic(adev, AMDGPU_PP_SENSOR_SS_APU_SHARE,
1977                  (void *)&ss_power))
1978                 *states = ATTR_STATE_UNSUPPORTED;
1979         else if (amdgpu_hwmon_get_sensor_generic(adev, AMDGPU_PP_SENSOR_SS_DGPU_SHARE,
1980                  (void *)&ss_power))
1981                 *states = ATTR_STATE_UNSUPPORTED;
1982
1983         return 0;
1984 }
1985
1986 static struct amdgpu_device_attr amdgpu_device_attrs[] = {
1987         AMDGPU_DEVICE_ATTR_RW(power_dpm_state,                          ATTR_FLAG_BASIC|ATTR_FLAG_ONEVF),
1988         AMDGPU_DEVICE_ATTR_RW(power_dpm_force_performance_level,        ATTR_FLAG_BASIC|ATTR_FLAG_ONEVF),
1989         AMDGPU_DEVICE_ATTR_RO(pp_num_states,                            ATTR_FLAG_BASIC|ATTR_FLAG_ONEVF),
1990         AMDGPU_DEVICE_ATTR_RO(pp_cur_state,                             ATTR_FLAG_BASIC|ATTR_FLAG_ONEVF),
1991         AMDGPU_DEVICE_ATTR_RW(pp_force_state,                           ATTR_FLAG_BASIC|ATTR_FLAG_ONEVF),
1992         AMDGPU_DEVICE_ATTR_RW(pp_table,                                 ATTR_FLAG_BASIC|ATTR_FLAG_ONEVF),
1993         AMDGPU_DEVICE_ATTR_RW(pp_dpm_sclk,                              ATTR_FLAG_BASIC|ATTR_FLAG_ONEVF),
1994         AMDGPU_DEVICE_ATTR_RW(pp_dpm_mclk,                              ATTR_FLAG_BASIC|ATTR_FLAG_ONEVF),
1995         AMDGPU_DEVICE_ATTR_RW(pp_dpm_socclk,                            ATTR_FLAG_BASIC|ATTR_FLAG_ONEVF),
1996         AMDGPU_DEVICE_ATTR_RW(pp_dpm_fclk,                              ATTR_FLAG_BASIC|ATTR_FLAG_ONEVF),
1997         AMDGPU_DEVICE_ATTR_RW(pp_dpm_vclk,                              ATTR_FLAG_BASIC|ATTR_FLAG_ONEVF),
1998         AMDGPU_DEVICE_ATTR_RW(pp_dpm_vclk1,                             ATTR_FLAG_BASIC|ATTR_FLAG_ONEVF),
1999         AMDGPU_DEVICE_ATTR_RW(pp_dpm_dclk,                              ATTR_FLAG_BASIC|ATTR_FLAG_ONEVF),
2000         AMDGPU_DEVICE_ATTR_RW(pp_dpm_dclk1,                             ATTR_FLAG_BASIC|ATTR_FLAG_ONEVF),
2001         AMDGPU_DEVICE_ATTR_RW(pp_dpm_dcefclk,                           ATTR_FLAG_BASIC|ATTR_FLAG_ONEVF),
2002         AMDGPU_DEVICE_ATTR_RW(pp_dpm_pcie,                              ATTR_FLAG_BASIC|ATTR_FLAG_ONEVF),
2003         AMDGPU_DEVICE_ATTR_RW(pp_sclk_od,                               ATTR_FLAG_BASIC),
2004         AMDGPU_DEVICE_ATTR_RW(pp_mclk_od,                               ATTR_FLAG_BASIC),
2005         AMDGPU_DEVICE_ATTR_RW(pp_power_profile_mode,                    ATTR_FLAG_BASIC|ATTR_FLAG_ONEVF),
2006         AMDGPU_DEVICE_ATTR_RW(pp_od_clk_voltage,                        ATTR_FLAG_BASIC),
2007         AMDGPU_DEVICE_ATTR_RO(gpu_busy_percent,                         ATTR_FLAG_BASIC|ATTR_FLAG_ONEVF),
2008         AMDGPU_DEVICE_ATTR_RO(mem_busy_percent,                         ATTR_FLAG_BASIC|ATTR_FLAG_ONEVF),
2009         AMDGPU_DEVICE_ATTR_RO(pcie_bw,                                  ATTR_FLAG_BASIC),
2010         AMDGPU_DEVICE_ATTR_RW(pp_features,                              ATTR_FLAG_BASIC|ATTR_FLAG_ONEVF),
2011         AMDGPU_DEVICE_ATTR_RO(unique_id,                                ATTR_FLAG_BASIC|ATTR_FLAG_ONEVF),
2012         AMDGPU_DEVICE_ATTR_RW(thermal_throttling_logging,               ATTR_FLAG_BASIC|ATTR_FLAG_ONEVF),
2013         AMDGPU_DEVICE_ATTR_RW(apu_thermal_cap,                          ATTR_FLAG_BASIC|ATTR_FLAG_ONEVF),
2014         AMDGPU_DEVICE_ATTR_RO(gpu_metrics,                              ATTR_FLAG_BASIC|ATTR_FLAG_ONEVF),
2015         AMDGPU_DEVICE_ATTR_RO(smartshift_apu_power,                     ATTR_FLAG_BASIC,
2016                               .attr_update = ss_power_attr_update),
2017         AMDGPU_DEVICE_ATTR_RO(smartshift_dgpu_power,                    ATTR_FLAG_BASIC,
2018                               .attr_update = ss_power_attr_update),
2019         AMDGPU_DEVICE_ATTR_RW(smartshift_bias,                          ATTR_FLAG_BASIC,
2020                               .attr_update = ss_bias_attr_update),
2021 };
2022
2023 static int default_attr_update(struct amdgpu_device *adev, struct amdgpu_device_attr *attr,
2024                                uint32_t mask, enum amdgpu_device_attr_states *states)
2025 {
2026         struct device_attribute *dev_attr = &attr->dev_attr;
2027         uint32_t mp1_ver = amdgpu_ip_version(adev, MP1_HWIP, 0);
2028         uint32_t gc_ver = amdgpu_ip_version(adev, GC_HWIP, 0);
2029         const char *attr_name = dev_attr->attr.name;
2030
2031         if (!(attr->flags & mask)) {
2032                 *states = ATTR_STATE_UNSUPPORTED;
2033                 return 0;
2034         }
2035
2036 #define DEVICE_ATTR_IS(_name)   (!strcmp(attr_name, #_name))
2037
2038         if (DEVICE_ATTR_IS(pp_dpm_socclk)) {
2039                 if (gc_ver < IP_VERSION(9, 0, 0))
2040                         *states = ATTR_STATE_UNSUPPORTED;
2041         } else if (DEVICE_ATTR_IS(pp_dpm_dcefclk)) {
2042                 if (gc_ver < IP_VERSION(9, 0, 0) ||
2043                     !amdgpu_device_has_display_hardware(adev))
2044                         *states = ATTR_STATE_UNSUPPORTED;
2045         } else if (DEVICE_ATTR_IS(pp_dpm_fclk)) {
2046                 if (mp1_ver < IP_VERSION(10, 0, 0))
2047                         *states = ATTR_STATE_UNSUPPORTED;
2048         } else if (DEVICE_ATTR_IS(pp_od_clk_voltage)) {
2049                 *states = ATTR_STATE_UNSUPPORTED;
2050                 if (amdgpu_dpm_is_overdrive_supported(adev))
2051                         *states = ATTR_STATE_SUPPORTED;
2052         } else if (DEVICE_ATTR_IS(mem_busy_percent)) {
2053                 if (adev->flags & AMD_IS_APU || gc_ver == IP_VERSION(9, 0, 1))
2054                         *states = ATTR_STATE_UNSUPPORTED;
2055         } else if (DEVICE_ATTR_IS(pcie_bw)) {
2056                 /* PCIe Perf counters won't work on APU nodes */
2057                 if (adev->flags & AMD_IS_APU)
2058                         *states = ATTR_STATE_UNSUPPORTED;
2059         } else if (DEVICE_ATTR_IS(unique_id)) {
2060                 switch (gc_ver) {
2061                 case IP_VERSION(9, 0, 1):
2062                 case IP_VERSION(9, 4, 0):
2063                 case IP_VERSION(9, 4, 1):
2064                 case IP_VERSION(9, 4, 2):
2065                 case IP_VERSION(9, 4, 3):
2066                 case IP_VERSION(10, 3, 0):
2067                 case IP_VERSION(11, 0, 0):
2068                 case IP_VERSION(11, 0, 1):
2069                 case IP_VERSION(11, 0, 2):
2070                         *states = ATTR_STATE_SUPPORTED;
2071                         break;
2072                 default:
2073                         *states = ATTR_STATE_UNSUPPORTED;
2074                 }
2075         } else if (DEVICE_ATTR_IS(pp_features)) {
2076                 if ((adev->flags & AMD_IS_APU &&
2077                      gc_ver != IP_VERSION(9, 4, 3)) ||
2078                     gc_ver < IP_VERSION(9, 0, 0))
2079                         *states = ATTR_STATE_UNSUPPORTED;
2080         } else if (DEVICE_ATTR_IS(gpu_metrics)) {
2081                 if (gc_ver < IP_VERSION(9, 1, 0))
2082                         *states = ATTR_STATE_UNSUPPORTED;
2083         } else if (DEVICE_ATTR_IS(pp_dpm_vclk)) {
2084                 if (!(gc_ver == IP_VERSION(10, 3, 1) ||
2085                       gc_ver == IP_VERSION(10, 3, 0) ||
2086                       gc_ver == IP_VERSION(10, 1, 2) ||
2087                       gc_ver == IP_VERSION(11, 0, 0) ||
2088                       gc_ver == IP_VERSION(11, 0, 2) ||
2089                       gc_ver == IP_VERSION(11, 0, 3) ||
2090                       gc_ver == IP_VERSION(9, 4, 3)))
2091                         *states = ATTR_STATE_UNSUPPORTED;
2092         } else if (DEVICE_ATTR_IS(pp_dpm_vclk1)) {
2093                 if (!((gc_ver == IP_VERSION(10, 3, 1) ||
2094                            gc_ver == IP_VERSION(10, 3, 0) ||
2095                            gc_ver == IP_VERSION(11, 0, 2) ||
2096                            gc_ver == IP_VERSION(11, 0, 3)) && adev->vcn.num_vcn_inst >= 2))
2097                         *states = ATTR_STATE_UNSUPPORTED;
2098         } else if (DEVICE_ATTR_IS(pp_dpm_dclk)) {
2099                 if (!(gc_ver == IP_VERSION(10, 3, 1) ||
2100                       gc_ver == IP_VERSION(10, 3, 0) ||
2101                       gc_ver == IP_VERSION(10, 1, 2) ||
2102                       gc_ver == IP_VERSION(11, 0, 0) ||
2103                       gc_ver == IP_VERSION(11, 0, 2) ||
2104                       gc_ver == IP_VERSION(11, 0, 3) ||
2105                       gc_ver == IP_VERSION(9, 4, 3)))
2106                         *states = ATTR_STATE_UNSUPPORTED;
2107         } else if (DEVICE_ATTR_IS(pp_dpm_dclk1)) {
2108                 if (!((gc_ver == IP_VERSION(10, 3, 1) ||
2109                            gc_ver == IP_VERSION(10, 3, 0) ||
2110                            gc_ver == IP_VERSION(11, 0, 2) ||
2111                            gc_ver == IP_VERSION(11, 0, 3)) && adev->vcn.num_vcn_inst >= 2))
2112                         *states = ATTR_STATE_UNSUPPORTED;
2113         } else if (DEVICE_ATTR_IS(pp_power_profile_mode)) {
2114                 if (amdgpu_dpm_get_power_profile_mode(adev, NULL) == -EOPNOTSUPP)
2115                         *states = ATTR_STATE_UNSUPPORTED;
2116                 else if (gc_ver == IP_VERSION(10, 3, 0) && amdgpu_sriov_vf(adev))
2117                         *states = ATTR_STATE_UNSUPPORTED;
2118         }
2119
2120         switch (gc_ver) {
2121         case IP_VERSION(9, 4, 1):
2122         case IP_VERSION(9, 4, 2):
2123                 /* the Mi series card does not support standalone mclk/socclk/fclk level setting */
2124                 if (DEVICE_ATTR_IS(pp_dpm_mclk) ||
2125                     DEVICE_ATTR_IS(pp_dpm_socclk) ||
2126                     DEVICE_ATTR_IS(pp_dpm_fclk)) {
2127                         dev_attr->attr.mode &= ~S_IWUGO;
2128                         dev_attr->store = NULL;
2129                 }
2130                 break;
2131         case IP_VERSION(10, 3, 0):
2132                 if (DEVICE_ATTR_IS(power_dpm_force_performance_level) &&
2133                     amdgpu_sriov_vf(adev)) {
2134                         dev_attr->attr.mode &= ~0222;
2135                         dev_attr->store = NULL;
2136                 }
2137                 break;
2138         default:
2139                 break;
2140         }
2141
2142         if (DEVICE_ATTR_IS(pp_dpm_dcefclk)) {
2143                 /* SMU MP1 does not support dcefclk level setting */
2144                 if (gc_ver >= IP_VERSION(10, 0, 0)) {
2145                         dev_attr->attr.mode &= ~S_IWUGO;
2146                         dev_attr->store = NULL;
2147                 }
2148         }
2149
2150         /* setting should not be allowed from VF if not in one VF mode */
2151         if (amdgpu_sriov_vf(adev) && !amdgpu_sriov_is_pp_one_vf(adev)) {
2152                 dev_attr->attr.mode &= ~S_IWUGO;
2153                 dev_attr->store = NULL;
2154         }
2155
2156 #undef DEVICE_ATTR_IS
2157
2158         return 0;
2159 }
2160
2161
2162 static int amdgpu_device_attr_create(struct amdgpu_device *adev,
2163                                      struct amdgpu_device_attr *attr,
2164                                      uint32_t mask, struct list_head *attr_list)
2165 {
2166         int ret = 0;
2167         enum amdgpu_device_attr_states attr_states = ATTR_STATE_SUPPORTED;
2168         struct amdgpu_device_attr_entry *attr_entry;
2169         struct device_attribute *dev_attr;
2170         const char *name;
2171
2172         int (*attr_update)(struct amdgpu_device *adev, struct amdgpu_device_attr *attr,
2173                            uint32_t mask, enum amdgpu_device_attr_states *states) = default_attr_update;
2174
2175         if (!attr)
2176                 return -EINVAL;
2177
2178         dev_attr = &attr->dev_attr;
2179         name = dev_attr->attr.name;
2180
2181         attr_update = attr->attr_update ? attr->attr_update : default_attr_update;
2182
2183         ret = attr_update(adev, attr, mask, &attr_states);
2184         if (ret) {
2185                 dev_err(adev->dev, "failed to update device file %s, ret = %d\n",
2186                         name, ret);
2187                 return ret;
2188         }
2189
2190         if (attr_states == ATTR_STATE_UNSUPPORTED)
2191                 return 0;
2192
2193         ret = device_create_file(adev->dev, dev_attr);
2194         if (ret) {
2195                 dev_err(adev->dev, "failed to create device file %s, ret = %d\n",
2196                         name, ret);
2197         }
2198
2199         attr_entry = kmalloc(sizeof(*attr_entry), GFP_KERNEL);
2200         if (!attr_entry)
2201                 return -ENOMEM;
2202
2203         attr_entry->attr = attr;
2204         INIT_LIST_HEAD(&attr_entry->entry);
2205
2206         list_add_tail(&attr_entry->entry, attr_list);
2207
2208         return ret;
2209 }
2210
2211 static void amdgpu_device_attr_remove(struct amdgpu_device *adev, struct amdgpu_device_attr *attr)
2212 {
2213         struct device_attribute *dev_attr = &attr->dev_attr;
2214
2215         device_remove_file(adev->dev, dev_attr);
2216 }
2217
2218 static void amdgpu_device_attr_remove_groups(struct amdgpu_device *adev,
2219                                              struct list_head *attr_list);
2220
2221 static int amdgpu_device_attr_create_groups(struct amdgpu_device *adev,
2222                                             struct amdgpu_device_attr *attrs,
2223                                             uint32_t counts,
2224                                             uint32_t mask,
2225                                             struct list_head *attr_list)
2226 {
2227         int ret = 0;
2228         uint32_t i = 0;
2229
2230         for (i = 0; i < counts; i++) {
2231                 ret = amdgpu_device_attr_create(adev, &attrs[i], mask, attr_list);
2232                 if (ret)
2233                         goto failed;
2234         }
2235
2236         return 0;
2237
2238 failed:
2239         amdgpu_device_attr_remove_groups(adev, attr_list);
2240
2241         return ret;
2242 }
2243
2244 static void amdgpu_device_attr_remove_groups(struct amdgpu_device *adev,
2245                                              struct list_head *attr_list)
2246 {
2247         struct amdgpu_device_attr_entry *entry, *entry_tmp;
2248
2249         if (list_empty(attr_list))
2250                 return ;
2251
2252         list_for_each_entry_safe(entry, entry_tmp, attr_list, entry) {
2253                 amdgpu_device_attr_remove(adev, entry->attr);
2254                 list_del(&entry->entry);
2255                 kfree(entry);
2256         }
2257 }
2258
2259 static ssize_t amdgpu_hwmon_show_temp(struct device *dev,
2260                                       struct device_attribute *attr,
2261                                       char *buf)
2262 {
2263         struct amdgpu_device *adev = dev_get_drvdata(dev);
2264         int channel = to_sensor_dev_attr(attr)->index;
2265         int r, temp = 0;
2266
2267         if (channel >= PP_TEMP_MAX)
2268                 return -EINVAL;
2269
2270         switch (channel) {
2271         case PP_TEMP_JUNCTION:
2272                 /* get current junction temperature */
2273                 r = amdgpu_hwmon_get_sensor_generic(adev, AMDGPU_PP_SENSOR_HOTSPOT_TEMP,
2274                                            (void *)&temp);
2275                 break;
2276         case PP_TEMP_EDGE:
2277                 /* get current edge temperature */
2278                 r = amdgpu_hwmon_get_sensor_generic(adev, AMDGPU_PP_SENSOR_EDGE_TEMP,
2279                                            (void *)&temp);
2280                 break;
2281         case PP_TEMP_MEM:
2282                 /* get current memory temperature */
2283                 r = amdgpu_hwmon_get_sensor_generic(adev, AMDGPU_PP_SENSOR_MEM_TEMP,
2284                                            (void *)&temp);
2285                 break;
2286         default:
2287                 r = -EINVAL;
2288                 break;
2289         }
2290
2291         if (r)
2292                 return r;
2293
2294         return sysfs_emit(buf, "%d\n", temp);
2295 }
2296
2297 static ssize_t amdgpu_hwmon_show_temp_thresh(struct device *dev,
2298                                              struct device_attribute *attr,
2299                                              char *buf)
2300 {
2301         struct amdgpu_device *adev = dev_get_drvdata(dev);
2302         int hyst = to_sensor_dev_attr(attr)->index;
2303         int temp;
2304
2305         if (hyst)
2306                 temp = adev->pm.dpm.thermal.min_temp;
2307         else
2308                 temp = adev->pm.dpm.thermal.max_temp;
2309
2310         return sysfs_emit(buf, "%d\n", temp);
2311 }
2312
2313 static ssize_t amdgpu_hwmon_show_hotspot_temp_thresh(struct device *dev,
2314                                              struct device_attribute *attr,
2315                                              char *buf)
2316 {
2317         struct amdgpu_device *adev = dev_get_drvdata(dev);
2318         int hyst = to_sensor_dev_attr(attr)->index;
2319         int temp;
2320
2321         if (hyst)
2322                 temp = adev->pm.dpm.thermal.min_hotspot_temp;
2323         else
2324                 temp = adev->pm.dpm.thermal.max_hotspot_crit_temp;
2325
2326         return sysfs_emit(buf, "%d\n", temp);
2327 }
2328
2329 static ssize_t amdgpu_hwmon_show_mem_temp_thresh(struct device *dev,
2330                                              struct device_attribute *attr,
2331                                              char *buf)
2332 {
2333         struct amdgpu_device *adev = dev_get_drvdata(dev);
2334         int hyst = to_sensor_dev_attr(attr)->index;
2335         int temp;
2336
2337         if (hyst)
2338                 temp = adev->pm.dpm.thermal.min_mem_temp;
2339         else
2340                 temp = adev->pm.dpm.thermal.max_mem_crit_temp;
2341
2342         return sysfs_emit(buf, "%d\n", temp);
2343 }
2344
2345 static ssize_t amdgpu_hwmon_show_temp_label(struct device *dev,
2346                                              struct device_attribute *attr,
2347                                              char *buf)
2348 {
2349         int channel = to_sensor_dev_attr(attr)->index;
2350
2351         if (channel >= PP_TEMP_MAX)
2352                 return -EINVAL;
2353
2354         return sysfs_emit(buf, "%s\n", temp_label[channel].label);
2355 }
2356
2357 static ssize_t amdgpu_hwmon_show_temp_emergency(struct device *dev,
2358                                              struct device_attribute *attr,
2359                                              char *buf)
2360 {
2361         struct amdgpu_device *adev = dev_get_drvdata(dev);
2362         int channel = to_sensor_dev_attr(attr)->index;
2363         int temp = 0;
2364
2365         if (channel >= PP_TEMP_MAX)
2366                 return -EINVAL;
2367
2368         switch (channel) {
2369         case PP_TEMP_JUNCTION:
2370                 temp = adev->pm.dpm.thermal.max_hotspot_emergency_temp;
2371                 break;
2372         case PP_TEMP_EDGE:
2373                 temp = adev->pm.dpm.thermal.max_edge_emergency_temp;
2374                 break;
2375         case PP_TEMP_MEM:
2376                 temp = adev->pm.dpm.thermal.max_mem_emergency_temp;
2377                 break;
2378         }
2379
2380         return sysfs_emit(buf, "%d\n", temp);
2381 }
2382
2383 static ssize_t amdgpu_hwmon_get_pwm1_enable(struct device *dev,
2384                                             struct device_attribute *attr,
2385                                             char *buf)
2386 {
2387         struct amdgpu_device *adev = dev_get_drvdata(dev);
2388         u32 pwm_mode = 0;
2389         int ret;
2390
2391         if (amdgpu_in_reset(adev))
2392                 return -EPERM;
2393         if (adev->in_suspend && !adev->in_runpm)
2394                 return -EPERM;
2395
2396         ret = pm_runtime_get_sync(adev_to_drm(adev)->dev);
2397         if (ret < 0) {
2398                 pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
2399                 return ret;
2400         }
2401
2402         ret = amdgpu_dpm_get_fan_control_mode(adev, &pwm_mode);
2403
2404         pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
2405         pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
2406
2407         if (ret)
2408                 return -EINVAL;
2409
2410         return sysfs_emit(buf, "%u\n", pwm_mode);
2411 }
2412
2413 static ssize_t amdgpu_hwmon_set_pwm1_enable(struct device *dev,
2414                                             struct device_attribute *attr,
2415                                             const char *buf,
2416                                             size_t count)
2417 {
2418         struct amdgpu_device *adev = dev_get_drvdata(dev);
2419         int err, ret;
2420         int value;
2421
2422         if (amdgpu_in_reset(adev))
2423                 return -EPERM;
2424         if (adev->in_suspend && !adev->in_runpm)
2425                 return -EPERM;
2426
2427         err = kstrtoint(buf, 10, &value);
2428         if (err)
2429                 return err;
2430
2431         ret = pm_runtime_get_sync(adev_to_drm(adev)->dev);
2432         if (ret < 0) {
2433                 pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
2434                 return ret;
2435         }
2436
2437         ret = amdgpu_dpm_set_fan_control_mode(adev, value);
2438
2439         pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
2440         pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
2441
2442         if (ret)
2443                 return -EINVAL;
2444
2445         return count;
2446 }
2447
2448 static ssize_t amdgpu_hwmon_get_pwm1_min(struct device *dev,
2449                                          struct device_attribute *attr,
2450                                          char *buf)
2451 {
2452         return sysfs_emit(buf, "%i\n", 0);
2453 }
2454
2455 static ssize_t amdgpu_hwmon_get_pwm1_max(struct device *dev,
2456                                          struct device_attribute *attr,
2457                                          char *buf)
2458 {
2459         return sysfs_emit(buf, "%i\n", 255);
2460 }
2461
2462 static ssize_t amdgpu_hwmon_set_pwm1(struct device *dev,
2463                                      struct device_attribute *attr,
2464                                      const char *buf, size_t count)
2465 {
2466         struct amdgpu_device *adev = dev_get_drvdata(dev);
2467         int err;
2468         u32 value;
2469         u32 pwm_mode;
2470
2471         if (amdgpu_in_reset(adev))
2472                 return -EPERM;
2473         if (adev->in_suspend && !adev->in_runpm)
2474                 return -EPERM;
2475
2476         err = kstrtou32(buf, 10, &value);
2477         if (err)
2478                 return err;
2479
2480         err = pm_runtime_get_sync(adev_to_drm(adev)->dev);
2481         if (err < 0) {
2482                 pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
2483                 return err;
2484         }
2485
2486         err = amdgpu_dpm_get_fan_control_mode(adev, &pwm_mode);
2487         if (err)
2488                 goto out;
2489
2490         if (pwm_mode != AMD_FAN_CTRL_MANUAL) {
2491                 pr_info("manual fan speed control should be enabled first\n");
2492                 err = -EINVAL;
2493                 goto out;
2494         }
2495
2496         err = amdgpu_dpm_set_fan_speed_pwm(adev, value);
2497
2498 out:
2499         pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
2500         pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
2501
2502         if (err)
2503                 return err;
2504
2505         return count;
2506 }
2507
2508 static ssize_t amdgpu_hwmon_get_pwm1(struct device *dev,
2509                                      struct device_attribute *attr,
2510                                      char *buf)
2511 {
2512         struct amdgpu_device *adev = dev_get_drvdata(dev);
2513         int err;
2514         u32 speed = 0;
2515
2516         if (amdgpu_in_reset(adev))
2517                 return -EPERM;
2518         if (adev->in_suspend && !adev->in_runpm)
2519                 return -EPERM;
2520
2521         err = pm_runtime_get_sync(adev_to_drm(adev)->dev);
2522         if (err < 0) {
2523                 pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
2524                 return err;
2525         }
2526
2527         err = amdgpu_dpm_get_fan_speed_pwm(adev, &speed);
2528
2529         pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
2530         pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
2531
2532         if (err)
2533                 return err;
2534
2535         return sysfs_emit(buf, "%i\n", speed);
2536 }
2537
2538 static ssize_t amdgpu_hwmon_get_fan1_input(struct device *dev,
2539                                            struct device_attribute *attr,
2540                                            char *buf)
2541 {
2542         struct amdgpu_device *adev = dev_get_drvdata(dev);
2543         int err;
2544         u32 speed = 0;
2545
2546         if (amdgpu_in_reset(adev))
2547                 return -EPERM;
2548         if (adev->in_suspend && !adev->in_runpm)
2549                 return -EPERM;
2550
2551         err = pm_runtime_get_sync(adev_to_drm(adev)->dev);
2552         if (err < 0) {
2553                 pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
2554                 return err;
2555         }
2556
2557         err = amdgpu_dpm_get_fan_speed_rpm(adev, &speed);
2558
2559         pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
2560         pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
2561
2562         if (err)
2563                 return err;
2564
2565         return sysfs_emit(buf, "%i\n", speed);
2566 }
2567
2568 static ssize_t amdgpu_hwmon_get_fan1_min(struct device *dev,
2569                                          struct device_attribute *attr,
2570                                          char *buf)
2571 {
2572         struct amdgpu_device *adev = dev_get_drvdata(dev);
2573         u32 min_rpm = 0;
2574         int r;
2575
2576         r = amdgpu_hwmon_get_sensor_generic(adev, AMDGPU_PP_SENSOR_MIN_FAN_RPM,
2577                                    (void *)&min_rpm);
2578
2579         if (r)
2580                 return r;
2581
2582         return sysfs_emit(buf, "%d\n", min_rpm);
2583 }
2584
2585 static ssize_t amdgpu_hwmon_get_fan1_max(struct device *dev,
2586                                          struct device_attribute *attr,
2587                                          char *buf)
2588 {
2589         struct amdgpu_device *adev = dev_get_drvdata(dev);
2590         u32 max_rpm = 0;
2591         int r;
2592
2593         r = amdgpu_hwmon_get_sensor_generic(adev, AMDGPU_PP_SENSOR_MAX_FAN_RPM,
2594                                    (void *)&max_rpm);
2595
2596         if (r)
2597                 return r;
2598
2599         return sysfs_emit(buf, "%d\n", max_rpm);
2600 }
2601
2602 static ssize_t amdgpu_hwmon_get_fan1_target(struct device *dev,
2603                                            struct device_attribute *attr,
2604                                            char *buf)
2605 {
2606         struct amdgpu_device *adev = dev_get_drvdata(dev);
2607         int err;
2608         u32 rpm = 0;
2609
2610         if (amdgpu_in_reset(adev))
2611                 return -EPERM;
2612         if (adev->in_suspend && !adev->in_runpm)
2613                 return -EPERM;
2614
2615         err = pm_runtime_get_sync(adev_to_drm(adev)->dev);
2616         if (err < 0) {
2617                 pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
2618                 return err;
2619         }
2620
2621         err = amdgpu_dpm_get_fan_speed_rpm(adev, &rpm);
2622
2623         pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
2624         pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
2625
2626         if (err)
2627                 return err;
2628
2629         return sysfs_emit(buf, "%i\n", rpm);
2630 }
2631
2632 static ssize_t amdgpu_hwmon_set_fan1_target(struct device *dev,
2633                                      struct device_attribute *attr,
2634                                      const char *buf, size_t count)
2635 {
2636         struct amdgpu_device *adev = dev_get_drvdata(dev);
2637         int err;
2638         u32 value;
2639         u32 pwm_mode;
2640
2641         if (amdgpu_in_reset(adev))
2642                 return -EPERM;
2643         if (adev->in_suspend && !adev->in_runpm)
2644                 return -EPERM;
2645
2646         err = kstrtou32(buf, 10, &value);
2647         if (err)
2648                 return err;
2649
2650         err = pm_runtime_get_sync(adev_to_drm(adev)->dev);
2651         if (err < 0) {
2652                 pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
2653                 return err;
2654         }
2655
2656         err = amdgpu_dpm_get_fan_control_mode(adev, &pwm_mode);
2657         if (err)
2658                 goto out;
2659
2660         if (pwm_mode != AMD_FAN_CTRL_MANUAL) {
2661                 err = -ENODATA;
2662                 goto out;
2663         }
2664
2665         err = amdgpu_dpm_set_fan_speed_rpm(adev, value);
2666
2667 out:
2668         pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
2669         pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
2670
2671         if (err)
2672                 return err;
2673
2674         return count;
2675 }
2676
2677 static ssize_t amdgpu_hwmon_get_fan1_enable(struct device *dev,
2678                                             struct device_attribute *attr,
2679                                             char *buf)
2680 {
2681         struct amdgpu_device *adev = dev_get_drvdata(dev);
2682         u32 pwm_mode = 0;
2683         int ret;
2684
2685         if (amdgpu_in_reset(adev))
2686                 return -EPERM;
2687         if (adev->in_suspend && !adev->in_runpm)
2688                 return -EPERM;
2689
2690         ret = pm_runtime_get_sync(adev_to_drm(adev)->dev);
2691         if (ret < 0) {
2692                 pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
2693                 return ret;
2694         }
2695
2696         ret = amdgpu_dpm_get_fan_control_mode(adev, &pwm_mode);
2697
2698         pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
2699         pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
2700
2701         if (ret)
2702                 return -EINVAL;
2703
2704         return sysfs_emit(buf, "%i\n", pwm_mode == AMD_FAN_CTRL_AUTO ? 0 : 1);
2705 }
2706
2707 static ssize_t amdgpu_hwmon_set_fan1_enable(struct device *dev,
2708                                             struct device_attribute *attr,
2709                                             const char *buf,
2710                                             size_t count)
2711 {
2712         struct amdgpu_device *adev = dev_get_drvdata(dev);
2713         int err;
2714         int value;
2715         u32 pwm_mode;
2716
2717         if (amdgpu_in_reset(adev))
2718                 return -EPERM;
2719         if (adev->in_suspend && !adev->in_runpm)
2720                 return -EPERM;
2721
2722         err = kstrtoint(buf, 10, &value);
2723         if (err)
2724                 return err;
2725
2726         if (value == 0)
2727                 pwm_mode = AMD_FAN_CTRL_AUTO;
2728         else if (value == 1)
2729                 pwm_mode = AMD_FAN_CTRL_MANUAL;
2730         else
2731                 return -EINVAL;
2732
2733         err = pm_runtime_get_sync(adev_to_drm(adev)->dev);
2734         if (err < 0) {
2735                 pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
2736                 return err;
2737         }
2738
2739         err = amdgpu_dpm_set_fan_control_mode(adev, pwm_mode);
2740
2741         pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
2742         pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
2743
2744         if (err)
2745                 return -EINVAL;
2746
2747         return count;
2748 }
2749
2750 static ssize_t amdgpu_hwmon_show_vddgfx(struct device *dev,
2751                                         struct device_attribute *attr,
2752                                         char *buf)
2753 {
2754         struct amdgpu_device *adev = dev_get_drvdata(dev);
2755         u32 vddgfx;
2756         int r;
2757
2758         /* get the voltage */
2759         r = amdgpu_hwmon_get_sensor_generic(adev, AMDGPU_PP_SENSOR_VDDGFX,
2760                                    (void *)&vddgfx);
2761         if (r)
2762                 return r;
2763
2764         return sysfs_emit(buf, "%d\n", vddgfx);
2765 }
2766
2767 static ssize_t amdgpu_hwmon_show_vddgfx_label(struct device *dev,
2768                                               struct device_attribute *attr,
2769                                               char *buf)
2770 {
2771         return sysfs_emit(buf, "vddgfx\n");
2772 }
2773
2774 static ssize_t amdgpu_hwmon_show_vddnb(struct device *dev,
2775                                        struct device_attribute *attr,
2776                                        char *buf)
2777 {
2778         struct amdgpu_device *adev = dev_get_drvdata(dev);
2779         u32 vddnb;
2780         int r;
2781
2782         /* only APUs have vddnb */
2783         if  (!(adev->flags & AMD_IS_APU))
2784                 return -EINVAL;
2785
2786         /* get the voltage */
2787         r = amdgpu_hwmon_get_sensor_generic(adev, AMDGPU_PP_SENSOR_VDDNB,
2788                                    (void *)&vddnb);
2789         if (r)
2790                 return r;
2791
2792         return sysfs_emit(buf, "%d\n", vddnb);
2793 }
2794
2795 static ssize_t amdgpu_hwmon_show_vddnb_label(struct device *dev,
2796                                               struct device_attribute *attr,
2797                                               char *buf)
2798 {
2799         return sysfs_emit(buf, "vddnb\n");
2800 }
2801
2802 static int amdgpu_hwmon_get_power(struct device *dev,
2803                                   enum amd_pp_sensors sensor)
2804 {
2805         struct amdgpu_device *adev = dev_get_drvdata(dev);
2806         unsigned int uw;
2807         u32 query = 0;
2808         int r;
2809
2810         r = amdgpu_hwmon_get_sensor_generic(adev, sensor, (void *)&query);
2811         if (r)
2812                 return r;
2813
2814         /* convert to microwatts */
2815         uw = (query >> 8) * 1000000 + (query & 0xff) * 1000;
2816
2817         return uw;
2818 }
2819
2820 static ssize_t amdgpu_hwmon_show_power_avg(struct device *dev,
2821                                            struct device_attribute *attr,
2822                                            char *buf)
2823 {
2824         ssize_t val;
2825
2826         val = amdgpu_hwmon_get_power(dev, AMDGPU_PP_SENSOR_GPU_AVG_POWER);
2827         if (val < 0)
2828                 return val;
2829
2830         return sysfs_emit(buf, "%zd\n", val);
2831 }
2832
2833 static ssize_t amdgpu_hwmon_show_power_input(struct device *dev,
2834                                              struct device_attribute *attr,
2835                                              char *buf)
2836 {
2837         ssize_t val;
2838
2839         val = amdgpu_hwmon_get_power(dev, AMDGPU_PP_SENSOR_GPU_INPUT_POWER);
2840         if (val < 0)
2841                 return val;
2842
2843         return sysfs_emit(buf, "%zd\n", val);
2844 }
2845
2846 static ssize_t amdgpu_hwmon_show_power_cap_min(struct device *dev,
2847                                          struct device_attribute *attr,
2848                                          char *buf)
2849 {
2850         return sysfs_emit(buf, "%i\n", 0);
2851 }
2852
2853
2854 static ssize_t amdgpu_hwmon_show_power_cap_generic(struct device *dev,
2855                                         struct device_attribute *attr,
2856                                         char *buf,
2857                                         enum pp_power_limit_level pp_limit_level)
2858 {
2859         struct amdgpu_device *adev = dev_get_drvdata(dev);
2860         enum pp_power_type power_type = to_sensor_dev_attr(attr)->index;
2861         uint32_t limit;
2862         ssize_t size;
2863         int r;
2864
2865         if (amdgpu_in_reset(adev))
2866                 return -EPERM;
2867         if (adev->in_suspend && !adev->in_runpm)
2868                 return -EPERM;
2869
2870         r = pm_runtime_get_sync(adev_to_drm(adev)->dev);
2871         if (r < 0) {
2872                 pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
2873                 return r;
2874         }
2875
2876         r = amdgpu_dpm_get_power_limit(adev, &limit,
2877                                       pp_limit_level, power_type);
2878
2879         if (!r)
2880                 size = sysfs_emit(buf, "%u\n", limit * 1000000);
2881         else
2882                 size = sysfs_emit(buf, "\n");
2883
2884         pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
2885         pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
2886
2887         return size;
2888 }
2889
2890
2891 static ssize_t amdgpu_hwmon_show_power_cap_max(struct device *dev,
2892                                          struct device_attribute *attr,
2893                                          char *buf)
2894 {
2895         return amdgpu_hwmon_show_power_cap_generic(dev, attr, buf, PP_PWR_LIMIT_MAX);
2896
2897 }
2898
2899 static ssize_t amdgpu_hwmon_show_power_cap(struct device *dev,
2900                                          struct device_attribute *attr,
2901                                          char *buf)
2902 {
2903         return amdgpu_hwmon_show_power_cap_generic(dev, attr, buf, PP_PWR_LIMIT_CURRENT);
2904
2905 }
2906
2907 static ssize_t amdgpu_hwmon_show_power_cap_default(struct device *dev,
2908                                          struct device_attribute *attr,
2909                                          char *buf)
2910 {
2911         return amdgpu_hwmon_show_power_cap_generic(dev, attr, buf, PP_PWR_LIMIT_DEFAULT);
2912
2913 }
2914
2915 static ssize_t amdgpu_hwmon_show_power_label(struct device *dev,
2916                                          struct device_attribute *attr,
2917                                          char *buf)
2918 {
2919         struct amdgpu_device *adev = dev_get_drvdata(dev);
2920         uint32_t gc_ver = amdgpu_ip_version(adev, GC_HWIP, 0);
2921
2922         if (gc_ver == IP_VERSION(10, 3, 1))
2923                 return sysfs_emit(buf, "%s\n",
2924                                   to_sensor_dev_attr(attr)->index == PP_PWR_TYPE_FAST ?
2925                                   "fastPPT" : "slowPPT");
2926         else
2927                 return sysfs_emit(buf, "PPT\n");
2928 }
2929
2930 static ssize_t amdgpu_hwmon_set_power_cap(struct device *dev,
2931                 struct device_attribute *attr,
2932                 const char *buf,
2933                 size_t count)
2934 {
2935         struct amdgpu_device *adev = dev_get_drvdata(dev);
2936         int limit_type = to_sensor_dev_attr(attr)->index;
2937         int err;
2938         u32 value;
2939
2940         if (amdgpu_in_reset(adev))
2941                 return -EPERM;
2942         if (adev->in_suspend && !adev->in_runpm)
2943                 return -EPERM;
2944
2945         if (amdgpu_sriov_vf(adev))
2946                 return -EINVAL;
2947
2948         err = kstrtou32(buf, 10, &value);
2949         if (err)
2950                 return err;
2951
2952         value = value / 1000000; /* convert to Watt */
2953         value |= limit_type << 24;
2954
2955         err = pm_runtime_get_sync(adev_to_drm(adev)->dev);
2956         if (err < 0) {
2957                 pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
2958                 return err;
2959         }
2960
2961         err = amdgpu_dpm_set_power_limit(adev, value);
2962
2963         pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
2964         pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
2965
2966         if (err)
2967                 return err;
2968
2969         return count;
2970 }
2971
2972 static ssize_t amdgpu_hwmon_show_sclk(struct device *dev,
2973                                       struct device_attribute *attr,
2974                                       char *buf)
2975 {
2976         struct amdgpu_device *adev = dev_get_drvdata(dev);
2977         uint32_t sclk;
2978         int r;
2979
2980         /* get the sclk */
2981         r = amdgpu_hwmon_get_sensor_generic(adev, AMDGPU_PP_SENSOR_GFX_SCLK,
2982                                    (void *)&sclk);
2983         if (r)
2984                 return r;
2985
2986         return sysfs_emit(buf, "%u\n", sclk * 10 * 1000);
2987 }
2988
2989 static ssize_t amdgpu_hwmon_show_sclk_label(struct device *dev,
2990                                             struct device_attribute *attr,
2991                                             char *buf)
2992 {
2993         return sysfs_emit(buf, "sclk\n");
2994 }
2995
2996 static ssize_t amdgpu_hwmon_show_mclk(struct device *dev,
2997                                       struct device_attribute *attr,
2998                                       char *buf)
2999 {
3000         struct amdgpu_device *adev = dev_get_drvdata(dev);
3001         uint32_t mclk;
3002         int r;
3003
3004         /* get the sclk */
3005         r = amdgpu_hwmon_get_sensor_generic(adev, AMDGPU_PP_SENSOR_GFX_MCLK,
3006                                    (void *)&mclk);
3007         if (r)
3008                 return r;
3009
3010         return sysfs_emit(buf, "%u\n", mclk * 10 * 1000);
3011 }
3012
3013 static ssize_t amdgpu_hwmon_show_mclk_label(struct device *dev,
3014                                             struct device_attribute *attr,
3015                                             char *buf)
3016 {
3017         return sysfs_emit(buf, "mclk\n");
3018 }
3019
3020 /**
3021  * DOC: hwmon
3022  *
3023  * The amdgpu driver exposes the following sensor interfaces:
3024  *
3025  * - GPU temperature (via the on-die sensor)
3026  *
3027  * - GPU voltage
3028  *
3029  * - Northbridge voltage (APUs only)
3030  *
3031  * - GPU power
3032  *
3033  * - GPU fan
3034  *
3035  * - GPU gfx/compute engine clock
3036  *
3037  * - GPU memory clock (dGPU only)
3038  *
3039  * hwmon interfaces for GPU temperature:
3040  *
3041  * - temp[1-3]_input: the on die GPU temperature in millidegrees Celsius
3042  *   - temp2_input and temp3_input are supported on SOC15 dGPUs only
3043  *
3044  * - temp[1-3]_label: temperature channel label
3045  *   - temp2_label and temp3_label are supported on SOC15 dGPUs only
3046  *
3047  * - temp[1-3]_crit: temperature critical max value in millidegrees Celsius
3048  *   - temp2_crit and temp3_crit are supported on SOC15 dGPUs only
3049  *
3050  * - temp[1-3]_crit_hyst: temperature hysteresis for critical limit in millidegrees Celsius
3051  *   - temp2_crit_hyst and temp3_crit_hyst are supported on SOC15 dGPUs only
3052  *
3053  * - temp[1-3]_emergency: temperature emergency max value(asic shutdown) in millidegrees Celsius
3054  *   - these are supported on SOC15 dGPUs only
3055  *
3056  * hwmon interfaces for GPU voltage:
3057  *
3058  * - in0_input: the voltage on the GPU in millivolts
3059  *
3060  * - in1_input: the voltage on the Northbridge in millivolts
3061  *
3062  * hwmon interfaces for GPU power:
3063  *
3064  * - power1_average: average power used by the SoC in microWatts.  On APUs this includes the CPU.
3065  *
3066  * - power1_input: instantaneous power used by the SoC in microWatts.  On APUs this includes the CPU.
3067  *
3068  * - power1_cap_min: minimum cap supported in microWatts
3069  *
3070  * - power1_cap_max: maximum cap supported in microWatts
3071  *
3072  * - power1_cap: selected power cap in microWatts
3073  *
3074  * hwmon interfaces for GPU fan:
3075  *
3076  * - pwm1: pulse width modulation fan level (0-255)
3077  *
3078  * - pwm1_enable: pulse width modulation fan control method (0: no fan speed control, 1: manual fan speed control using pwm interface, 2: automatic fan speed control)
3079  *
3080  * - pwm1_min: pulse width modulation fan control minimum level (0)
3081  *
3082  * - pwm1_max: pulse width modulation fan control maximum level (255)
3083  *
3084  * - fan1_min: a minimum value Unit: revolution/min (RPM)
3085  *
3086  * - fan1_max: a maximum value Unit: revolution/max (RPM)
3087  *
3088  * - fan1_input: fan speed in RPM
3089  *
3090  * - fan[1-\*]_target: Desired fan speed Unit: revolution/min (RPM)
3091  *
3092  * - fan[1-\*]_enable: Enable or disable the sensors.1: Enable 0: Disable
3093  *
3094  * NOTE: DO NOT set the fan speed via "pwm1" and "fan[1-\*]_target" interfaces at the same time.
3095  *       That will get the former one overridden.
3096  *
3097  * hwmon interfaces for GPU clocks:
3098  *
3099  * - freq1_input: the gfx/compute clock in hertz
3100  *
3101  * - freq2_input: the memory clock in hertz
3102  *
3103  * You can use hwmon tools like sensors to view this information on your system.
3104  *
3105  */
3106
3107 static SENSOR_DEVICE_ATTR(temp1_input, S_IRUGO, amdgpu_hwmon_show_temp, NULL, PP_TEMP_EDGE);
3108 static SENSOR_DEVICE_ATTR(temp1_crit, S_IRUGO, amdgpu_hwmon_show_temp_thresh, NULL, 0);
3109 static SENSOR_DEVICE_ATTR(temp1_crit_hyst, S_IRUGO, amdgpu_hwmon_show_temp_thresh, NULL, 1);
3110 static SENSOR_DEVICE_ATTR(temp1_emergency, S_IRUGO, amdgpu_hwmon_show_temp_emergency, NULL, PP_TEMP_EDGE);
3111 static SENSOR_DEVICE_ATTR(temp2_input, S_IRUGO, amdgpu_hwmon_show_temp, NULL, PP_TEMP_JUNCTION);
3112 static SENSOR_DEVICE_ATTR(temp2_crit, S_IRUGO, amdgpu_hwmon_show_hotspot_temp_thresh, NULL, 0);
3113 static SENSOR_DEVICE_ATTR(temp2_crit_hyst, S_IRUGO, amdgpu_hwmon_show_hotspot_temp_thresh, NULL, 1);
3114 static SENSOR_DEVICE_ATTR(temp2_emergency, S_IRUGO, amdgpu_hwmon_show_temp_emergency, NULL, PP_TEMP_JUNCTION);
3115 static SENSOR_DEVICE_ATTR(temp3_input, S_IRUGO, amdgpu_hwmon_show_temp, NULL, PP_TEMP_MEM);
3116 static SENSOR_DEVICE_ATTR(temp3_crit, S_IRUGO, amdgpu_hwmon_show_mem_temp_thresh, NULL, 0);
3117 static SENSOR_DEVICE_ATTR(temp3_crit_hyst, S_IRUGO, amdgpu_hwmon_show_mem_temp_thresh, NULL, 1);
3118 static SENSOR_DEVICE_ATTR(temp3_emergency, S_IRUGO, amdgpu_hwmon_show_temp_emergency, NULL, PP_TEMP_MEM);
3119 static SENSOR_DEVICE_ATTR(temp1_label, S_IRUGO, amdgpu_hwmon_show_temp_label, NULL, PP_TEMP_EDGE);
3120 static SENSOR_DEVICE_ATTR(temp2_label, S_IRUGO, amdgpu_hwmon_show_temp_label, NULL, PP_TEMP_JUNCTION);
3121 static SENSOR_DEVICE_ATTR(temp3_label, S_IRUGO, amdgpu_hwmon_show_temp_label, NULL, PP_TEMP_MEM);
3122 static SENSOR_DEVICE_ATTR(pwm1, S_IRUGO | S_IWUSR, amdgpu_hwmon_get_pwm1, amdgpu_hwmon_set_pwm1, 0);
3123 static SENSOR_DEVICE_ATTR(pwm1_enable, S_IRUGO | S_IWUSR, amdgpu_hwmon_get_pwm1_enable, amdgpu_hwmon_set_pwm1_enable, 0);
3124 static SENSOR_DEVICE_ATTR(pwm1_min, S_IRUGO, amdgpu_hwmon_get_pwm1_min, NULL, 0);
3125 static SENSOR_DEVICE_ATTR(pwm1_max, S_IRUGO, amdgpu_hwmon_get_pwm1_max, NULL, 0);
3126 static SENSOR_DEVICE_ATTR(fan1_input, S_IRUGO, amdgpu_hwmon_get_fan1_input, NULL, 0);
3127 static SENSOR_DEVICE_ATTR(fan1_min, S_IRUGO, amdgpu_hwmon_get_fan1_min, NULL, 0);
3128 static SENSOR_DEVICE_ATTR(fan1_max, S_IRUGO, amdgpu_hwmon_get_fan1_max, NULL, 0);
3129 static SENSOR_DEVICE_ATTR(fan1_target, S_IRUGO | S_IWUSR, amdgpu_hwmon_get_fan1_target, amdgpu_hwmon_set_fan1_target, 0);
3130 static SENSOR_DEVICE_ATTR(fan1_enable, S_IRUGO | S_IWUSR, amdgpu_hwmon_get_fan1_enable, amdgpu_hwmon_set_fan1_enable, 0);
3131 static SENSOR_DEVICE_ATTR(in0_input, S_IRUGO, amdgpu_hwmon_show_vddgfx, NULL, 0);
3132 static SENSOR_DEVICE_ATTR(in0_label, S_IRUGO, amdgpu_hwmon_show_vddgfx_label, NULL, 0);
3133 static SENSOR_DEVICE_ATTR(in1_input, S_IRUGO, amdgpu_hwmon_show_vddnb, NULL, 0);
3134 static SENSOR_DEVICE_ATTR(in1_label, S_IRUGO, amdgpu_hwmon_show_vddnb_label, NULL, 0);
3135 static SENSOR_DEVICE_ATTR(power1_average, S_IRUGO, amdgpu_hwmon_show_power_avg, NULL, 0);
3136 static SENSOR_DEVICE_ATTR(power1_input, S_IRUGO, amdgpu_hwmon_show_power_input, NULL, 0);
3137 static SENSOR_DEVICE_ATTR(power1_cap_max, S_IRUGO, amdgpu_hwmon_show_power_cap_max, NULL, 0);
3138 static SENSOR_DEVICE_ATTR(power1_cap_min, S_IRUGO, amdgpu_hwmon_show_power_cap_min, NULL, 0);
3139 static SENSOR_DEVICE_ATTR(power1_cap, S_IRUGO | S_IWUSR, amdgpu_hwmon_show_power_cap, amdgpu_hwmon_set_power_cap, 0);
3140 static SENSOR_DEVICE_ATTR(power1_cap_default, S_IRUGO, amdgpu_hwmon_show_power_cap_default, NULL, 0);
3141 static SENSOR_DEVICE_ATTR(power1_label, S_IRUGO, amdgpu_hwmon_show_power_label, NULL, 0);
3142 static SENSOR_DEVICE_ATTR(power2_average, S_IRUGO, amdgpu_hwmon_show_power_avg, NULL, 1);
3143 static SENSOR_DEVICE_ATTR(power2_cap_max, S_IRUGO, amdgpu_hwmon_show_power_cap_max, NULL, 1);
3144 static SENSOR_DEVICE_ATTR(power2_cap_min, S_IRUGO, amdgpu_hwmon_show_power_cap_min, NULL, 1);
3145 static SENSOR_DEVICE_ATTR(power2_cap, S_IRUGO | S_IWUSR, amdgpu_hwmon_show_power_cap, amdgpu_hwmon_set_power_cap, 1);
3146 static SENSOR_DEVICE_ATTR(power2_cap_default, S_IRUGO, amdgpu_hwmon_show_power_cap_default, NULL, 1);
3147 static SENSOR_DEVICE_ATTR(power2_label, S_IRUGO, amdgpu_hwmon_show_power_label, NULL, 1);
3148 static SENSOR_DEVICE_ATTR(freq1_input, S_IRUGO, amdgpu_hwmon_show_sclk, NULL, 0);
3149 static SENSOR_DEVICE_ATTR(freq1_label, S_IRUGO, amdgpu_hwmon_show_sclk_label, NULL, 0);
3150 static SENSOR_DEVICE_ATTR(freq2_input, S_IRUGO, amdgpu_hwmon_show_mclk, NULL, 0);
3151 static SENSOR_DEVICE_ATTR(freq2_label, S_IRUGO, amdgpu_hwmon_show_mclk_label, NULL, 0);
3152
3153 static struct attribute *hwmon_attributes[] = {
3154         &sensor_dev_attr_temp1_input.dev_attr.attr,
3155         &sensor_dev_attr_temp1_crit.dev_attr.attr,
3156         &sensor_dev_attr_temp1_crit_hyst.dev_attr.attr,
3157         &sensor_dev_attr_temp2_input.dev_attr.attr,
3158         &sensor_dev_attr_temp2_crit.dev_attr.attr,
3159         &sensor_dev_attr_temp2_crit_hyst.dev_attr.attr,
3160         &sensor_dev_attr_temp3_input.dev_attr.attr,
3161         &sensor_dev_attr_temp3_crit.dev_attr.attr,
3162         &sensor_dev_attr_temp3_crit_hyst.dev_attr.attr,
3163         &sensor_dev_attr_temp1_emergency.dev_attr.attr,
3164         &sensor_dev_attr_temp2_emergency.dev_attr.attr,
3165         &sensor_dev_attr_temp3_emergency.dev_attr.attr,
3166         &sensor_dev_attr_temp1_label.dev_attr.attr,
3167         &sensor_dev_attr_temp2_label.dev_attr.attr,
3168         &sensor_dev_attr_temp3_label.dev_attr.attr,
3169         &sensor_dev_attr_pwm1.dev_attr.attr,
3170         &sensor_dev_attr_pwm1_enable.dev_attr.attr,
3171         &sensor_dev_attr_pwm1_min.dev_attr.attr,
3172         &sensor_dev_attr_pwm1_max.dev_attr.attr,
3173         &sensor_dev_attr_fan1_input.dev_attr.attr,
3174         &sensor_dev_attr_fan1_min.dev_attr.attr,
3175         &sensor_dev_attr_fan1_max.dev_attr.attr,
3176         &sensor_dev_attr_fan1_target.dev_attr.attr,
3177         &sensor_dev_attr_fan1_enable.dev_attr.attr,
3178         &sensor_dev_attr_in0_input.dev_attr.attr,
3179         &sensor_dev_attr_in0_label.dev_attr.attr,
3180         &sensor_dev_attr_in1_input.dev_attr.attr,
3181         &sensor_dev_attr_in1_label.dev_attr.attr,
3182         &sensor_dev_attr_power1_average.dev_attr.attr,
3183         &sensor_dev_attr_power1_input.dev_attr.attr,
3184         &sensor_dev_attr_power1_cap_max.dev_attr.attr,
3185         &sensor_dev_attr_power1_cap_min.dev_attr.attr,
3186         &sensor_dev_attr_power1_cap.dev_attr.attr,
3187         &sensor_dev_attr_power1_cap_default.dev_attr.attr,
3188         &sensor_dev_attr_power1_label.dev_attr.attr,
3189         &sensor_dev_attr_power2_average.dev_attr.attr,
3190         &sensor_dev_attr_power2_cap_max.dev_attr.attr,
3191         &sensor_dev_attr_power2_cap_min.dev_attr.attr,
3192         &sensor_dev_attr_power2_cap.dev_attr.attr,
3193         &sensor_dev_attr_power2_cap_default.dev_attr.attr,
3194         &sensor_dev_attr_power2_label.dev_attr.attr,
3195         &sensor_dev_attr_freq1_input.dev_attr.attr,
3196         &sensor_dev_attr_freq1_label.dev_attr.attr,
3197         &sensor_dev_attr_freq2_input.dev_attr.attr,
3198         &sensor_dev_attr_freq2_label.dev_attr.attr,
3199         NULL
3200 };
3201
3202 static umode_t hwmon_attributes_visible(struct kobject *kobj,
3203                                         struct attribute *attr, int index)
3204 {
3205         struct device *dev = kobj_to_dev(kobj);
3206         struct amdgpu_device *adev = dev_get_drvdata(dev);
3207         umode_t effective_mode = attr->mode;
3208         uint32_t gc_ver = amdgpu_ip_version(adev, GC_HWIP, 0);
3209         uint32_t tmp;
3210
3211         /* under multi-vf mode, the hwmon attributes are all not supported */
3212         if (amdgpu_sriov_vf(adev) && !amdgpu_sriov_is_pp_one_vf(adev))
3213                 return 0;
3214
3215         /* under pp one vf mode manage of hwmon attributes is not supported */
3216         if (amdgpu_sriov_is_pp_one_vf(adev))
3217                 effective_mode &= ~S_IWUSR;
3218
3219         /* Skip fan attributes if fan is not present */
3220         if (adev->pm.no_fan && (attr == &sensor_dev_attr_pwm1.dev_attr.attr ||
3221             attr == &sensor_dev_attr_pwm1_enable.dev_attr.attr ||
3222             attr == &sensor_dev_attr_pwm1_max.dev_attr.attr ||
3223             attr == &sensor_dev_attr_pwm1_min.dev_attr.attr ||
3224             attr == &sensor_dev_attr_fan1_input.dev_attr.attr ||
3225             attr == &sensor_dev_attr_fan1_min.dev_attr.attr ||
3226             attr == &sensor_dev_attr_fan1_max.dev_attr.attr ||
3227             attr == &sensor_dev_attr_fan1_target.dev_attr.attr ||
3228             attr == &sensor_dev_attr_fan1_enable.dev_attr.attr))
3229                 return 0;
3230
3231         /* Skip fan attributes on APU */
3232         if ((adev->flags & AMD_IS_APU) &&
3233             (attr == &sensor_dev_attr_pwm1.dev_attr.attr ||
3234              attr == &sensor_dev_attr_pwm1_enable.dev_attr.attr ||
3235              attr == &sensor_dev_attr_pwm1_max.dev_attr.attr ||
3236              attr == &sensor_dev_attr_pwm1_min.dev_attr.attr ||
3237              attr == &sensor_dev_attr_fan1_input.dev_attr.attr ||
3238              attr == &sensor_dev_attr_fan1_min.dev_attr.attr ||
3239              attr == &sensor_dev_attr_fan1_max.dev_attr.attr ||
3240              attr == &sensor_dev_attr_fan1_target.dev_attr.attr ||
3241              attr == &sensor_dev_attr_fan1_enable.dev_attr.attr))
3242                 return 0;
3243
3244         /* Skip crit temp on APU */
3245         if ((((adev->flags & AMD_IS_APU) && (adev->family >= AMDGPU_FAMILY_CZ)) ||
3246             (gc_ver == IP_VERSION(9, 4, 3))) &&
3247             (attr == &sensor_dev_attr_temp1_crit.dev_attr.attr ||
3248              attr == &sensor_dev_attr_temp1_crit_hyst.dev_attr.attr))
3249                 return 0;
3250
3251         /* Skip limit attributes if DPM is not enabled */
3252         if (!adev->pm.dpm_enabled &&
3253             (attr == &sensor_dev_attr_temp1_crit.dev_attr.attr ||
3254              attr == &sensor_dev_attr_temp1_crit_hyst.dev_attr.attr ||
3255              attr == &sensor_dev_attr_pwm1.dev_attr.attr ||
3256              attr == &sensor_dev_attr_pwm1_enable.dev_attr.attr ||
3257              attr == &sensor_dev_attr_pwm1_max.dev_attr.attr ||
3258              attr == &sensor_dev_attr_pwm1_min.dev_attr.attr ||
3259              attr == &sensor_dev_attr_fan1_input.dev_attr.attr ||
3260              attr == &sensor_dev_attr_fan1_min.dev_attr.attr ||
3261              attr == &sensor_dev_attr_fan1_max.dev_attr.attr ||
3262              attr == &sensor_dev_attr_fan1_target.dev_attr.attr ||
3263              attr == &sensor_dev_attr_fan1_enable.dev_attr.attr))
3264                 return 0;
3265
3266         /* mask fan attributes if we have no bindings for this asic to expose */
3267         if (((amdgpu_dpm_get_fan_speed_pwm(adev, NULL) == -EOPNOTSUPP) &&
3268               attr == &sensor_dev_attr_pwm1.dev_attr.attr) || /* can't query fan */
3269             ((amdgpu_dpm_get_fan_control_mode(adev, NULL) == -EOPNOTSUPP) &&
3270              attr == &sensor_dev_attr_pwm1_enable.dev_attr.attr)) /* can't query state */
3271                 effective_mode &= ~S_IRUGO;
3272
3273         if (((amdgpu_dpm_set_fan_speed_pwm(adev, U32_MAX) == -EOPNOTSUPP) &&
3274               attr == &sensor_dev_attr_pwm1.dev_attr.attr) || /* can't manage fan */
3275               ((amdgpu_dpm_set_fan_control_mode(adev, U32_MAX) == -EOPNOTSUPP) &&
3276               attr == &sensor_dev_attr_pwm1_enable.dev_attr.attr)) /* can't manage state */
3277                 effective_mode &= ~S_IWUSR;
3278
3279         /* not implemented yet for APUs other than GC 10.3.1 (vangogh) and 9.4.3 */
3280         if (((adev->family == AMDGPU_FAMILY_SI) ||
3281              ((adev->flags & AMD_IS_APU) && (gc_ver != IP_VERSION(10, 3, 1)) &&
3282               (gc_ver != IP_VERSION(9, 4, 3)))) &&
3283             (attr == &sensor_dev_attr_power1_cap_max.dev_attr.attr ||
3284              attr == &sensor_dev_attr_power1_cap_min.dev_attr.attr ||
3285              attr == &sensor_dev_attr_power1_cap.dev_attr.attr ||
3286              attr == &sensor_dev_attr_power1_cap_default.dev_attr.attr))
3287                 return 0;
3288
3289         /* not implemented yet for APUs having < GC 9.3.0 (Renoir) */
3290         if (((adev->family == AMDGPU_FAMILY_SI) ||
3291              ((adev->flags & AMD_IS_APU) && (gc_ver < IP_VERSION(9, 3, 0)))) &&
3292             (attr == &sensor_dev_attr_power1_average.dev_attr.attr))
3293                 return 0;
3294
3295         /* not all products support both average and instantaneous */
3296         if (attr == &sensor_dev_attr_power1_average.dev_attr.attr &&
3297             amdgpu_hwmon_get_sensor_generic(adev, AMDGPU_PP_SENSOR_GPU_AVG_POWER, (void *)&tmp) == -EOPNOTSUPP)
3298                 return 0;
3299         if (attr == &sensor_dev_attr_power1_input.dev_attr.attr &&
3300             amdgpu_hwmon_get_sensor_generic(adev, AMDGPU_PP_SENSOR_GPU_INPUT_POWER, (void *)&tmp) == -EOPNOTSUPP)
3301                 return 0;
3302
3303         /* hide max/min values if we can't both query and manage the fan */
3304         if (((amdgpu_dpm_set_fan_speed_pwm(adev, U32_MAX) == -EOPNOTSUPP) &&
3305               (amdgpu_dpm_get_fan_speed_pwm(adev, NULL) == -EOPNOTSUPP) &&
3306               (amdgpu_dpm_set_fan_speed_rpm(adev, U32_MAX) == -EOPNOTSUPP) &&
3307               (amdgpu_dpm_get_fan_speed_rpm(adev, NULL) == -EOPNOTSUPP)) &&
3308             (attr == &sensor_dev_attr_pwm1_max.dev_attr.attr ||
3309              attr == &sensor_dev_attr_pwm1_min.dev_attr.attr))
3310                 return 0;
3311
3312         if ((amdgpu_dpm_set_fan_speed_rpm(adev, U32_MAX) == -EOPNOTSUPP) &&
3313              (amdgpu_dpm_get_fan_speed_rpm(adev, NULL) == -EOPNOTSUPP) &&
3314              (attr == &sensor_dev_attr_fan1_max.dev_attr.attr ||
3315              attr == &sensor_dev_attr_fan1_min.dev_attr.attr))
3316                 return 0;
3317
3318         if ((adev->family == AMDGPU_FAMILY_SI ||        /* not implemented yet */
3319              adev->family == AMDGPU_FAMILY_KV ||        /* not implemented yet */
3320              (gc_ver == IP_VERSION(9, 4, 3))) &&
3321             (attr == &sensor_dev_attr_in0_input.dev_attr.attr ||
3322              attr == &sensor_dev_attr_in0_label.dev_attr.attr))
3323                 return 0;
3324
3325         /* only APUs other than gc 9,4,3 have vddnb */
3326         if ((!(adev->flags & AMD_IS_APU) || (gc_ver == IP_VERSION(9, 4, 3))) &&
3327             (attr == &sensor_dev_attr_in1_input.dev_attr.attr ||
3328              attr == &sensor_dev_attr_in1_label.dev_attr.attr))
3329                 return 0;
3330
3331         /* no mclk on APUs other than gc 9,4,3*/
3332         if (((adev->flags & AMD_IS_APU) && (gc_ver != IP_VERSION(9, 4, 3))) &&
3333             (attr == &sensor_dev_attr_freq2_input.dev_attr.attr ||
3334              attr == &sensor_dev_attr_freq2_label.dev_attr.attr))
3335                 return 0;
3336
3337         if (((adev->flags & AMD_IS_APU) || gc_ver < IP_VERSION(9, 0, 0)) &&
3338             (gc_ver != IP_VERSION(9, 4, 3)) &&
3339             (attr == &sensor_dev_attr_temp2_input.dev_attr.attr ||
3340              attr == &sensor_dev_attr_temp2_label.dev_attr.attr ||
3341              attr == &sensor_dev_attr_temp2_crit.dev_attr.attr ||
3342              attr == &sensor_dev_attr_temp3_input.dev_attr.attr ||
3343              attr == &sensor_dev_attr_temp3_label.dev_attr.attr ||
3344              attr == &sensor_dev_attr_temp3_crit.dev_attr.attr))
3345                 return 0;
3346
3347         /* hotspot temperature for gc 9,4,3*/
3348         if ((gc_ver == IP_VERSION(9, 4, 3)) &&
3349             (attr == &sensor_dev_attr_temp1_input.dev_attr.attr ||
3350              attr == &sensor_dev_attr_temp1_label.dev_attr.attr))
3351                 return 0;
3352
3353         /* only SOC15 dGPUs support hotspot and mem temperatures */
3354         if (((adev->flags & AMD_IS_APU) || gc_ver < IP_VERSION(9, 0, 0) ||
3355             (gc_ver == IP_VERSION(9, 4, 3))) &&
3356              (attr == &sensor_dev_attr_temp2_crit_hyst.dev_attr.attr ||
3357              attr == &sensor_dev_attr_temp3_crit_hyst.dev_attr.attr ||
3358              attr == &sensor_dev_attr_temp1_emergency.dev_attr.attr ||
3359              attr == &sensor_dev_attr_temp2_emergency.dev_attr.attr ||
3360              attr == &sensor_dev_attr_temp3_emergency.dev_attr.attr))
3361                 return 0;
3362
3363         /* only Vangogh has fast PPT limit and power labels */
3364         if (!(gc_ver == IP_VERSION(10, 3, 1)) &&
3365             (attr == &sensor_dev_attr_power2_average.dev_attr.attr ||
3366              attr == &sensor_dev_attr_power2_cap_max.dev_attr.attr ||
3367              attr == &sensor_dev_attr_power2_cap_min.dev_attr.attr ||
3368              attr == &sensor_dev_attr_power2_cap.dev_attr.attr ||
3369              attr == &sensor_dev_attr_power2_cap_default.dev_attr.attr ||
3370              attr == &sensor_dev_attr_power2_label.dev_attr.attr))
3371                 return 0;
3372
3373         return effective_mode;
3374 }
3375
3376 static const struct attribute_group hwmon_attrgroup = {
3377         .attrs = hwmon_attributes,
3378         .is_visible = hwmon_attributes_visible,
3379 };
3380
3381 static const struct attribute_group *hwmon_groups[] = {
3382         &hwmon_attrgroup,
3383         NULL
3384 };
3385
3386 static int amdgpu_retrieve_od_settings(struct amdgpu_device *adev,
3387                                        enum pp_clock_type od_type,
3388                                        char *buf)
3389 {
3390         int size = 0;
3391         int ret;
3392
3393         if (amdgpu_in_reset(adev))
3394                 return -EPERM;
3395         if (adev->in_suspend && !adev->in_runpm)
3396                 return -EPERM;
3397
3398         ret = pm_runtime_get_sync(adev->dev);
3399         if (ret < 0) {
3400                 pm_runtime_put_autosuspend(adev->dev);
3401                 return ret;
3402         }
3403
3404         size = amdgpu_dpm_print_clock_levels(adev, od_type, buf);
3405         if (size == 0)
3406                 size = sysfs_emit(buf, "\n");
3407
3408         pm_runtime_mark_last_busy(adev->dev);
3409         pm_runtime_put_autosuspend(adev->dev);
3410
3411         return size;
3412 }
3413
3414 static int parse_input_od_command_lines(const char *buf,
3415                                         size_t count,
3416                                         u32 *type,
3417                                         long *params,
3418                                         uint32_t *num_of_params)
3419 {
3420         const char delimiter[3] = {' ', '\n', '\0'};
3421         uint32_t parameter_size = 0;
3422         char buf_cpy[128] = {0};
3423         char *tmp_str, *sub_str;
3424         int ret;
3425
3426         if (count > sizeof(buf_cpy) - 1)
3427                 return -EINVAL;
3428
3429         memcpy(buf_cpy, buf, count);
3430         tmp_str = buf_cpy;
3431
3432         /* skip heading spaces */
3433         while (isspace(*tmp_str))
3434                 tmp_str++;
3435
3436         switch (*tmp_str) {
3437         case 'c':
3438                 *type = PP_OD_COMMIT_DPM_TABLE;
3439                 return 0;
3440         default:
3441                 break;
3442         }
3443
3444         while ((sub_str = strsep(&tmp_str, delimiter)) != NULL) {
3445                 if (strlen(sub_str) == 0)
3446                         continue;
3447
3448                 ret = kstrtol(sub_str, 0, &params[parameter_size]);
3449                 if (ret)
3450                         return -EINVAL;
3451                 parameter_size++;
3452
3453                 while (isspace(*tmp_str))
3454                         tmp_str++;
3455         }
3456
3457         *num_of_params = parameter_size;
3458
3459         return 0;
3460 }
3461
3462 static int
3463 amdgpu_distribute_custom_od_settings(struct amdgpu_device *adev,
3464                                      enum PP_OD_DPM_TABLE_COMMAND cmd_type,
3465                                      const char *in_buf,
3466                                      size_t count)
3467 {
3468         uint32_t parameter_size = 0;
3469         long parameter[64];
3470         int ret;
3471
3472         if (amdgpu_in_reset(adev))
3473                 return -EPERM;
3474         if (adev->in_suspend && !adev->in_runpm)
3475                 return -EPERM;
3476
3477         ret = parse_input_od_command_lines(in_buf,
3478                                            count,
3479                                            &cmd_type,
3480                                            parameter,
3481                                            &parameter_size);
3482         if (ret)
3483                 return ret;
3484
3485         ret = pm_runtime_get_sync(adev->dev);
3486         if (ret < 0)
3487                 goto err_out0;
3488
3489         ret = amdgpu_dpm_odn_edit_dpm_table(adev,
3490                                             cmd_type,
3491                                             parameter,
3492                                             parameter_size);
3493         if (ret)
3494                 goto err_out1;
3495
3496         if (cmd_type == PP_OD_COMMIT_DPM_TABLE) {
3497                 ret = amdgpu_dpm_dispatch_task(adev,
3498                                                AMD_PP_TASK_READJUST_POWER_STATE,
3499                                                NULL);
3500                 if (ret)
3501                         goto err_out1;
3502         }
3503
3504         pm_runtime_mark_last_busy(adev->dev);
3505         pm_runtime_put_autosuspend(adev->dev);
3506
3507         return count;
3508
3509 err_out1:
3510         pm_runtime_mark_last_busy(adev->dev);
3511 err_out0:
3512         pm_runtime_put_autosuspend(adev->dev);
3513
3514         return ret;
3515 }
3516
3517 /**
3518  * DOC: fan_curve
3519  *
3520  * The amdgpu driver provides a sysfs API for checking and adjusting the fan
3521  * control curve line.
3522  *
3523  * Reading back the file shows you the current settings(temperature in Celsius
3524  * degree and fan speed in pwm) applied to every anchor point of the curve line
3525  * and their permitted ranges if changable.
3526  *
3527  * Writing a desired string(with the format like "anchor_point_index temperature
3528  * fan_speed_in_pwm") to the file, change the settings for the specific anchor
3529  * point accordingly.
3530  *
3531  * When you have finished the editing, write "c" (commit) to the file to commit
3532  * your changes.
3533  *
3534  * There are two fan control modes supported: auto and manual. With auto mode,
3535  * PMFW handles the fan speed control(how fan speed reacts to ASIC temperature).
3536  * While with manual mode, users can set their own fan curve line as what
3537  * described here. Normally the ASIC is booted up with auto mode. Any
3538  * settings via this interface will switch the fan control to manual mode
3539  * implicitly.
3540  */
3541 static ssize_t fan_curve_show(struct kobject *kobj,
3542                               struct kobj_attribute *attr,
3543                               char *buf)
3544 {
3545         struct od_kobj *container = container_of(kobj, struct od_kobj, kobj);
3546         struct amdgpu_device *adev = (struct amdgpu_device *)container->priv;
3547
3548         return (ssize_t)amdgpu_retrieve_od_settings(adev, OD_FAN_CURVE, buf);
3549 }
3550
3551 static ssize_t fan_curve_store(struct kobject *kobj,
3552                                struct kobj_attribute *attr,
3553                                const char *buf,
3554                                size_t count)
3555 {
3556         struct od_kobj *container = container_of(kobj, struct od_kobj, kobj);
3557         struct amdgpu_device *adev = (struct amdgpu_device *)container->priv;
3558
3559         return (ssize_t)amdgpu_distribute_custom_od_settings(adev,
3560                                                              PP_OD_EDIT_FAN_CURVE,
3561                                                              buf,
3562                                                              count);
3563 }
3564
3565 static umode_t fan_curve_visible(struct amdgpu_device *adev)
3566 {
3567         umode_t umode = 0000;
3568
3569         if (adev->pm.od_feature_mask & OD_OPS_SUPPORT_FAN_CURVE_RETRIEVE)
3570                 umode |= S_IRUSR | S_IRGRP | S_IROTH;
3571
3572         if (adev->pm.od_feature_mask & OD_OPS_SUPPORT_FAN_CURVE_SET)
3573                 umode |= S_IWUSR;
3574
3575         return umode;
3576 }
3577
3578 /**
3579  * DOC: acoustic_limit_rpm_threshold
3580  *
3581  * The amdgpu driver provides a sysfs API for checking and adjusting the
3582  * acoustic limit in RPM for fan control.
3583  *
3584  * Reading back the file shows you the current setting and the permitted
3585  * ranges if changable.
3586  *
3587  * Writing an integer to the file, change the setting accordingly.
3588  *
3589  * When you have finished the editing, write "c" (commit) to the file to commit
3590  * your changes.
3591  *
3592  * This setting works under auto fan control mode only. It adjusts the PMFW's
3593  * behavior about the maximum speed in RPM the fan can spin. Setting via this
3594  * interface will switch the fan control to auto mode implicitly.
3595  */
3596 static ssize_t acoustic_limit_threshold_show(struct kobject *kobj,
3597                                              struct kobj_attribute *attr,
3598                                              char *buf)
3599 {
3600         struct od_kobj *container = container_of(kobj, struct od_kobj, kobj);
3601         struct amdgpu_device *adev = (struct amdgpu_device *)container->priv;
3602
3603         return (ssize_t)amdgpu_retrieve_od_settings(adev, OD_ACOUSTIC_LIMIT, buf);
3604 }
3605
3606 static ssize_t acoustic_limit_threshold_store(struct kobject *kobj,
3607                                               struct kobj_attribute *attr,
3608                                               const char *buf,
3609                                               size_t count)
3610 {
3611         struct od_kobj *container = container_of(kobj, struct od_kobj, kobj);
3612         struct amdgpu_device *adev = (struct amdgpu_device *)container->priv;
3613
3614         return (ssize_t)amdgpu_distribute_custom_od_settings(adev,
3615                                                              PP_OD_EDIT_ACOUSTIC_LIMIT,
3616                                                              buf,
3617                                                              count);
3618 }
3619
3620 static umode_t acoustic_limit_threshold_visible(struct amdgpu_device *adev)
3621 {
3622         umode_t umode = 0000;
3623
3624         if (adev->pm.od_feature_mask & OD_OPS_SUPPORT_ACOUSTIC_LIMIT_THRESHOLD_RETRIEVE)
3625                 umode |= S_IRUSR | S_IRGRP | S_IROTH;
3626
3627         if (adev->pm.od_feature_mask & OD_OPS_SUPPORT_ACOUSTIC_LIMIT_THRESHOLD_SET)
3628                 umode |= S_IWUSR;
3629
3630         return umode;
3631 }
3632
3633 /**
3634  * DOC: acoustic_target_rpm_threshold
3635  *
3636  * The amdgpu driver provides a sysfs API for checking and adjusting the
3637  * acoustic target in RPM for fan control.
3638  *
3639  * Reading back the file shows you the current setting and the permitted
3640  * ranges if changable.
3641  *
3642  * Writing an integer to the file, change the setting accordingly.
3643  *
3644  * When you have finished the editing, write "c" (commit) to the file to commit
3645  * your changes.
3646  *
3647  * This setting works under auto fan control mode only. It can co-exist with
3648  * other settings which can work also under auto mode. It adjusts the PMFW's
3649  * behavior about the maximum speed in RPM the fan can spin when ASIC
3650  * temperature is not greater than target temperature. Setting via this
3651  * interface will switch the fan control to auto mode implicitly.
3652  */
3653 static ssize_t acoustic_target_threshold_show(struct kobject *kobj,
3654                                               struct kobj_attribute *attr,
3655                                               char *buf)
3656 {
3657         struct od_kobj *container = container_of(kobj, struct od_kobj, kobj);
3658         struct amdgpu_device *adev = (struct amdgpu_device *)container->priv;
3659
3660         return (ssize_t)amdgpu_retrieve_od_settings(adev, OD_ACOUSTIC_TARGET, buf);
3661 }
3662
3663 static ssize_t acoustic_target_threshold_store(struct kobject *kobj,
3664                                                struct kobj_attribute *attr,
3665                                                const char *buf,
3666                                                size_t count)
3667 {
3668         struct od_kobj *container = container_of(kobj, struct od_kobj, kobj);
3669         struct amdgpu_device *adev = (struct amdgpu_device *)container->priv;
3670
3671         return (ssize_t)amdgpu_distribute_custom_od_settings(adev,
3672                                                              PP_OD_EDIT_ACOUSTIC_TARGET,
3673                                                              buf,
3674                                                              count);
3675 }
3676
3677 static umode_t acoustic_target_threshold_visible(struct amdgpu_device *adev)
3678 {
3679         umode_t umode = 0000;
3680
3681         if (adev->pm.od_feature_mask & OD_OPS_SUPPORT_ACOUSTIC_TARGET_THRESHOLD_RETRIEVE)
3682                 umode |= S_IRUSR | S_IRGRP | S_IROTH;
3683
3684         if (adev->pm.od_feature_mask & OD_OPS_SUPPORT_ACOUSTIC_TARGET_THRESHOLD_SET)
3685                 umode |= S_IWUSR;
3686
3687         return umode;
3688 }
3689
3690 /**
3691  * DOC: fan_target_temperature
3692  *
3693  * The amdgpu driver provides a sysfs API for checking and adjusting the
3694  * target tempeature in Celsius degree for fan control.
3695  *
3696  * Reading back the file shows you the current setting and the permitted
3697  * ranges if changable.
3698  *
3699  * Writing an integer to the file, change the setting accordingly.
3700  *
3701  * When you have finished the editing, write "c" (commit) to the file to commit
3702  * your changes.
3703  *
3704  * This setting works under auto fan control mode only. It can co-exist with
3705  * other settings which can work also under auto mode. Paring with the
3706  * acoustic_target_rpm_threshold setting, they define the maximum speed in
3707  * RPM the fan can spin when ASIC temperature is not greater than target
3708  * temperature. Setting via this interface will switch the fan control to
3709  * auto mode implicitly.
3710  */
3711 static ssize_t fan_target_temperature_show(struct kobject *kobj,
3712                                            struct kobj_attribute *attr,
3713                                            char *buf)
3714 {
3715         struct od_kobj *container = container_of(kobj, struct od_kobj, kobj);
3716         struct amdgpu_device *adev = (struct amdgpu_device *)container->priv;
3717
3718         return (ssize_t)amdgpu_retrieve_od_settings(adev, OD_FAN_TARGET_TEMPERATURE, buf);
3719 }
3720
3721 static ssize_t fan_target_temperature_store(struct kobject *kobj,
3722                                             struct kobj_attribute *attr,
3723                                             const char *buf,
3724                                             size_t count)
3725 {
3726         struct od_kobj *container = container_of(kobj, struct od_kobj, kobj);
3727         struct amdgpu_device *adev = (struct amdgpu_device *)container->priv;
3728
3729         return (ssize_t)amdgpu_distribute_custom_od_settings(adev,
3730                                                              PP_OD_EDIT_FAN_TARGET_TEMPERATURE,
3731                                                              buf,
3732                                                              count);
3733 }
3734
3735 static umode_t fan_target_temperature_visible(struct amdgpu_device *adev)
3736 {
3737         umode_t umode = 0000;
3738
3739         if (adev->pm.od_feature_mask & OD_OPS_SUPPORT_FAN_TARGET_TEMPERATURE_RETRIEVE)
3740                 umode |= S_IRUSR | S_IRGRP | S_IROTH;
3741
3742         if (adev->pm.od_feature_mask & OD_OPS_SUPPORT_FAN_TARGET_TEMPERATURE_SET)
3743                 umode |= S_IWUSR;
3744
3745         return umode;
3746 }
3747
3748 /**
3749  * DOC: fan_minimum_pwm
3750  *
3751  * The amdgpu driver provides a sysfs API for checking and adjusting the
3752  * minimum fan speed in PWM.
3753  *
3754  * Reading back the file shows you the current setting and the permitted
3755  * ranges if changable.
3756  *
3757  * Writing an integer to the file, change the setting accordingly.
3758  *
3759  * When you have finished the editing, write "c" (commit) to the file to commit
3760  * your changes.
3761  *
3762  * This setting works under auto fan control mode only. It can co-exist with
3763  * other settings which can work also under auto mode. It adjusts the PMFW's
3764  * behavior about the minimum fan speed in PWM the fan should spin. Setting
3765  * via this interface will switch the fan control to auto mode implicitly.
3766  */
3767 static ssize_t fan_minimum_pwm_show(struct kobject *kobj,
3768                                     struct kobj_attribute *attr,
3769                                     char *buf)
3770 {
3771         struct od_kobj *container = container_of(kobj, struct od_kobj, kobj);
3772         struct amdgpu_device *adev = (struct amdgpu_device *)container->priv;
3773
3774         return (ssize_t)amdgpu_retrieve_od_settings(adev, OD_FAN_MINIMUM_PWM, buf);
3775 }
3776
3777 static ssize_t fan_minimum_pwm_store(struct kobject *kobj,
3778                                      struct kobj_attribute *attr,
3779                                      const char *buf,
3780                                      size_t count)
3781 {
3782         struct od_kobj *container = container_of(kobj, struct od_kobj, kobj);
3783         struct amdgpu_device *adev = (struct amdgpu_device *)container->priv;
3784
3785         return (ssize_t)amdgpu_distribute_custom_od_settings(adev,
3786                                                              PP_OD_EDIT_FAN_MINIMUM_PWM,
3787                                                              buf,
3788                                                              count);
3789 }
3790
3791 static umode_t fan_minimum_pwm_visible(struct amdgpu_device *adev)
3792 {
3793         umode_t umode = 0000;
3794
3795         if (adev->pm.od_feature_mask & OD_OPS_SUPPORT_FAN_MINIMUM_PWM_RETRIEVE)
3796                 umode |= S_IRUSR | S_IRGRP | S_IROTH;
3797
3798         if (adev->pm.od_feature_mask & OD_OPS_SUPPORT_FAN_MINIMUM_PWM_SET)
3799                 umode |= S_IWUSR;
3800
3801         return umode;
3802 }
3803
3804 static struct od_feature_set amdgpu_od_set = {
3805         .containers = {
3806                 [0] = {
3807                         .name = "fan_ctrl",
3808                         .sub_feature = {
3809                                 [0] = {
3810                                         .name = "fan_curve",
3811                                         .ops = {
3812                                                 .is_visible = fan_curve_visible,
3813                                                 .show = fan_curve_show,
3814                                                 .store = fan_curve_store,
3815                                         },
3816                                 },
3817                                 [1] = {
3818                                         .name = "acoustic_limit_rpm_threshold",
3819                                         .ops = {
3820                                                 .is_visible = acoustic_limit_threshold_visible,
3821                                                 .show = acoustic_limit_threshold_show,
3822                                                 .store = acoustic_limit_threshold_store,
3823                                         },
3824                                 },
3825                                 [2] = {
3826                                         .name = "acoustic_target_rpm_threshold",
3827                                         .ops = {
3828                                                 .is_visible = acoustic_target_threshold_visible,
3829                                                 .show = acoustic_target_threshold_show,
3830                                                 .store = acoustic_target_threshold_store,
3831                                         },
3832                                 },
3833                                 [3] = {
3834                                         .name = "fan_target_temperature",
3835                                         .ops = {
3836                                                 .is_visible = fan_target_temperature_visible,
3837                                                 .show = fan_target_temperature_show,
3838                                                 .store = fan_target_temperature_store,
3839                                         },
3840                                 },
3841                                 [4] = {
3842                                         .name = "fan_minimum_pwm",
3843                                         .ops = {
3844                                                 .is_visible = fan_minimum_pwm_visible,
3845                                                 .show = fan_minimum_pwm_show,
3846                                                 .store = fan_minimum_pwm_store,
3847                                         },
3848                                 },
3849                         },
3850                 },
3851         },
3852 };
3853
3854 static void od_kobj_release(struct kobject *kobj)
3855 {
3856         struct od_kobj *od_kobj = container_of(kobj, struct od_kobj, kobj);
3857
3858         kfree(od_kobj);
3859 }
3860
3861 static const struct kobj_type od_ktype = {
3862         .release        = od_kobj_release,
3863         .sysfs_ops      = &kobj_sysfs_ops,
3864 };
3865
3866 static void amdgpu_od_set_fini(struct amdgpu_device *adev)
3867 {
3868         struct od_kobj *container, *container_next;
3869         struct od_attribute *attribute, *attribute_next;
3870
3871         if (list_empty(&adev->pm.od_kobj_list))
3872                 return;
3873
3874         list_for_each_entry_safe(container, container_next,
3875                                  &adev->pm.od_kobj_list, entry) {
3876                 list_del(&container->entry);
3877
3878                 list_for_each_entry_safe(attribute, attribute_next,
3879                                          &container->attribute, entry) {
3880                         list_del(&attribute->entry);
3881                         sysfs_remove_file(&container->kobj,
3882                                           &attribute->attribute.attr);
3883                         kfree(attribute);
3884                 }
3885
3886                 kobject_put(&container->kobj);
3887         }
3888 }
3889
3890 static bool amdgpu_is_od_feature_supported(struct amdgpu_device *adev,
3891                                            struct od_feature_ops *feature_ops)
3892 {
3893         umode_t mode;
3894
3895         if (!feature_ops->is_visible)
3896                 return false;
3897
3898         /*
3899          * If the feature has no user read and write mode set,
3900          * we can assume the feature is actually not supported.(?)
3901          * And the revelant sysfs interface should not be exposed.
3902          */
3903         mode = feature_ops->is_visible(adev);
3904         if (mode & (S_IRUSR | S_IWUSR))
3905                 return true;
3906
3907         return false;
3908 }
3909
3910 static bool amdgpu_od_is_self_contained(struct amdgpu_device *adev,
3911                                         struct od_feature_container *container)
3912 {
3913         int i;
3914
3915         /*
3916          * If there is no valid entry within the container, the container
3917          * is recognized as a self contained container. And the valid entry
3918          * here means it has a valid naming and it is visible/supported by
3919          * the ASIC.
3920          */
3921         for (i = 0; i < ARRAY_SIZE(container->sub_feature); i++) {
3922                 if (container->sub_feature[i].name &&
3923                     amdgpu_is_od_feature_supported(adev,
3924                         &container->sub_feature[i].ops))
3925                         return false;
3926         }
3927
3928         return true;
3929 }
3930
3931 static int amdgpu_od_set_init(struct amdgpu_device *adev)
3932 {
3933         struct od_kobj *top_set, *sub_set;
3934         struct od_attribute *attribute;
3935         struct od_feature_container *container;
3936         struct od_feature_item *feature;
3937         int i, j;
3938         int ret;
3939
3940         /* Setup the top `gpu_od` directory which holds all other OD interfaces */
3941         top_set = kzalloc(sizeof(*top_set), GFP_KERNEL);
3942         if (!top_set)
3943                 return -ENOMEM;
3944         list_add(&top_set->entry, &adev->pm.od_kobj_list);
3945
3946         ret = kobject_init_and_add(&top_set->kobj,
3947                                    &od_ktype,
3948                                    &adev->dev->kobj,
3949                                    "%s",
3950                                    "gpu_od");
3951         if (ret)
3952                 goto err_out;
3953         INIT_LIST_HEAD(&top_set->attribute);
3954         top_set->priv = adev;
3955
3956         for (i = 0; i < ARRAY_SIZE(amdgpu_od_set.containers); i++) {
3957                 container = &amdgpu_od_set.containers[i];
3958
3959                 if (!container->name)
3960                         continue;
3961
3962                 /*
3963                  * If there is valid entries within the container, the container
3964                  * will be presented as a sub directory and all its holding entries
3965                  * will be presented as plain files under it.
3966                  * While if there is no valid entry within the container, the container
3967                  * itself will be presented as a plain file under top `gpu_od` directory.
3968                  */
3969                 if (amdgpu_od_is_self_contained(adev, container)) {
3970                         if (!amdgpu_is_od_feature_supported(adev,
3971                              &container->ops))
3972                                 continue;
3973
3974                         /*
3975                          * The container is presented as a plain file under top `gpu_od`
3976                          * directory.
3977                          */
3978                         attribute = kzalloc(sizeof(*attribute), GFP_KERNEL);
3979                         if (!attribute) {
3980                                 ret = -ENOMEM;
3981                                 goto err_out;
3982                         }
3983                         list_add(&attribute->entry, &top_set->attribute);
3984
3985                         attribute->attribute.attr.mode =
3986                                         container->ops.is_visible(adev);
3987                         attribute->attribute.attr.name = container->name;
3988                         attribute->attribute.show =
3989                                         container->ops.show;
3990                         attribute->attribute.store =
3991                                         container->ops.store;
3992                         ret = sysfs_create_file(&top_set->kobj,
3993                                                 &attribute->attribute.attr);
3994                         if (ret)
3995                                 goto err_out;
3996                 } else {
3997                         /* The container is presented as a sub directory. */
3998                         sub_set = kzalloc(sizeof(*sub_set), GFP_KERNEL);
3999                         if (!sub_set) {
4000                                 ret = -ENOMEM;
4001                                 goto err_out;
4002                         }
4003                         list_add(&sub_set->entry, &adev->pm.od_kobj_list);
4004
4005                         ret = kobject_init_and_add(&sub_set->kobj,
4006                                                    &od_ktype,
4007                                                    &top_set->kobj,
4008                                                    "%s",
4009                                                    container->name);
4010                         if (ret)
4011                                 goto err_out;
4012                         INIT_LIST_HEAD(&sub_set->attribute);
4013                         sub_set->priv = adev;
4014
4015                         for (j = 0; j < ARRAY_SIZE(container->sub_feature); j++) {
4016                                 feature = &container->sub_feature[j];
4017                                 if (!feature->name)
4018                                         continue;
4019
4020                                 if (!amdgpu_is_od_feature_supported(adev,
4021                                      &feature->ops))
4022                                         continue;
4023
4024                                 /*
4025                                  * With the container presented as a sub directory, the entry within
4026                                  * it is presented as a plain file under the sub directory.
4027                                  */
4028                                 attribute = kzalloc(sizeof(*attribute), GFP_KERNEL);
4029                                 if (!attribute) {
4030                                         ret = -ENOMEM;
4031                                         goto err_out;
4032                                 }
4033                                 list_add(&attribute->entry, &sub_set->attribute);
4034
4035                                 attribute->attribute.attr.mode =
4036                                                 feature->ops.is_visible(adev);
4037                                 attribute->attribute.attr.name = feature->name;
4038                                 attribute->attribute.show =
4039                                                 feature->ops.show;
4040                                 attribute->attribute.store =
4041                                                 feature->ops.store;
4042                                 ret = sysfs_create_file(&sub_set->kobj,
4043                                                         &attribute->attribute.attr);
4044                                 if (ret)
4045                                         goto err_out;
4046                         }
4047                 }
4048         }
4049
4050         return 0;
4051
4052 err_out:
4053         amdgpu_od_set_fini(adev);
4054
4055         return ret;
4056 }
4057
4058 int amdgpu_pm_sysfs_init(struct amdgpu_device *adev)
4059 {
4060         uint32_t mask = 0;
4061         int ret;
4062
4063         if (adev->pm.sysfs_initialized)
4064                 return 0;
4065
4066         INIT_LIST_HEAD(&adev->pm.pm_attr_list);
4067
4068         if (adev->pm.dpm_enabled == 0)
4069                 return 0;
4070
4071         adev->pm.int_hwmon_dev = hwmon_device_register_with_groups(adev->dev,
4072                                                                    DRIVER_NAME, adev,
4073                                                                    hwmon_groups);
4074         if (IS_ERR(adev->pm.int_hwmon_dev)) {
4075                 ret = PTR_ERR(adev->pm.int_hwmon_dev);
4076                 dev_err(adev->dev,
4077                         "Unable to register hwmon device: %d\n", ret);
4078                 return ret;
4079         }
4080
4081         switch (amdgpu_virt_get_sriov_vf_mode(adev)) {
4082         case SRIOV_VF_MODE_ONE_VF:
4083                 mask = ATTR_FLAG_ONEVF;
4084                 break;
4085         case SRIOV_VF_MODE_MULTI_VF:
4086                 mask = 0;
4087                 break;
4088         case SRIOV_VF_MODE_BARE_METAL:
4089         default:
4090                 mask = ATTR_FLAG_MASK_ALL;
4091                 break;
4092         }
4093
4094         ret = amdgpu_device_attr_create_groups(adev,
4095                                                amdgpu_device_attrs,
4096                                                ARRAY_SIZE(amdgpu_device_attrs),
4097                                                mask,
4098                                                &adev->pm.pm_attr_list);
4099         if (ret)
4100                 goto err_out0;
4101
4102         if (amdgpu_dpm_is_overdrive_supported(adev)) {
4103                 ret = amdgpu_od_set_init(adev);
4104                 if (ret)
4105                         goto err_out1;
4106         }
4107
4108         adev->pm.sysfs_initialized = true;
4109
4110         return 0;
4111
4112 err_out1:
4113         amdgpu_device_attr_remove_groups(adev, &adev->pm.pm_attr_list);
4114 err_out0:
4115         if (adev->pm.int_hwmon_dev)
4116                 hwmon_device_unregister(adev->pm.int_hwmon_dev);
4117
4118         return ret;
4119 }
4120
4121 void amdgpu_pm_sysfs_fini(struct amdgpu_device *adev)
4122 {
4123         amdgpu_od_set_fini(adev);
4124
4125         if (adev->pm.int_hwmon_dev)
4126                 hwmon_device_unregister(adev->pm.int_hwmon_dev);
4127
4128         amdgpu_device_attr_remove_groups(adev, &adev->pm.pm_attr_list);
4129 }
4130
4131 /*
4132  * Debugfs info
4133  */
4134 #if defined(CONFIG_DEBUG_FS)
4135
4136 static void amdgpu_debugfs_prints_cpu_info(struct seq_file *m,
4137                                            struct amdgpu_device *adev)
4138 {
4139         uint16_t *p_val;
4140         uint32_t size;
4141         int i;
4142         uint32_t num_cpu_cores = amdgpu_dpm_get_num_cpu_cores(adev);
4143
4144         if (amdgpu_dpm_is_cclk_dpm_supported(adev)) {
4145                 p_val = kcalloc(num_cpu_cores, sizeof(uint16_t),
4146                                 GFP_KERNEL);
4147
4148                 if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_CPU_CLK,
4149                                             (void *)p_val, &size)) {
4150                         for (i = 0; i < num_cpu_cores; i++)
4151                                 seq_printf(m, "\t%u MHz (CPU%d)\n",
4152                                            *(p_val + i), i);
4153                 }
4154
4155                 kfree(p_val);
4156         }
4157 }
4158
4159 static int amdgpu_debugfs_pm_info_pp(struct seq_file *m, struct amdgpu_device *adev)
4160 {
4161         uint32_t mp1_ver = amdgpu_ip_version(adev, MP1_HWIP, 0);
4162         uint32_t gc_ver = amdgpu_ip_version(adev, GC_HWIP, 0);
4163         uint32_t value;
4164         uint64_t value64 = 0;
4165         uint32_t query = 0;
4166         int size;
4167
4168         /* GPU Clocks */
4169         size = sizeof(value);
4170         seq_printf(m, "GFX Clocks and Power:\n");
4171
4172         amdgpu_debugfs_prints_cpu_info(m, adev);
4173
4174         if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_GFX_MCLK, (void *)&value, &size))
4175                 seq_printf(m, "\t%u MHz (MCLK)\n", value/100);
4176         if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_GFX_SCLK, (void *)&value, &size))
4177                 seq_printf(m, "\t%u MHz (SCLK)\n", value/100);
4178         if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_STABLE_PSTATE_SCLK, (void *)&value, &size))
4179                 seq_printf(m, "\t%u MHz (PSTATE_SCLK)\n", value/100);
4180         if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_STABLE_PSTATE_MCLK, (void *)&value, &size))
4181                 seq_printf(m, "\t%u MHz (PSTATE_MCLK)\n", value/100);
4182         if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_VDDGFX, (void *)&value, &size))
4183                 seq_printf(m, "\t%u mV (VDDGFX)\n", value);
4184         if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_VDDNB, (void *)&value, &size))
4185                 seq_printf(m, "\t%u mV (VDDNB)\n", value);
4186         size = sizeof(uint32_t);
4187         if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_GPU_AVG_POWER, (void *)&query, &size))
4188                 seq_printf(m, "\t%u.%u W (average GPU)\n", query >> 8, query & 0xff);
4189         size = sizeof(uint32_t);
4190         if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_GPU_INPUT_POWER, (void *)&query, &size))
4191                 seq_printf(m, "\t%u.%u W (current GPU)\n", query >> 8, query & 0xff);
4192         size = sizeof(value);
4193         seq_printf(m, "\n");
4194
4195         /* GPU Temp */
4196         if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_GPU_TEMP, (void *)&value, &size))
4197                 seq_printf(m, "GPU Temperature: %u C\n", value/1000);
4198
4199         /* GPU Load */
4200         if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_GPU_LOAD, (void *)&value, &size))
4201                 seq_printf(m, "GPU Load: %u %%\n", value);
4202         /* MEM Load */
4203         if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_MEM_LOAD, (void *)&value, &size))
4204                 seq_printf(m, "MEM Load: %u %%\n", value);
4205
4206         seq_printf(m, "\n");
4207
4208         /* SMC feature mask */
4209         if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_ENABLED_SMC_FEATURES_MASK, (void *)&value64, &size))
4210                 seq_printf(m, "SMC Feature Mask: 0x%016llx\n", value64);
4211
4212         /* ASICs greater than CHIP_VEGA20 supports these sensors */
4213         if (gc_ver != IP_VERSION(9, 4, 0) && mp1_ver > IP_VERSION(9, 0, 0)) {
4214                 /* VCN clocks */
4215                 if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_VCN_POWER_STATE, (void *)&value, &size)) {
4216                         if (!value) {
4217                                 seq_printf(m, "VCN: Disabled\n");
4218                         } else {
4219                                 seq_printf(m, "VCN: Enabled\n");
4220                                 if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_UVD_DCLK, (void *)&value, &size))
4221                                         seq_printf(m, "\t%u MHz (DCLK)\n", value/100);
4222                                 if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_UVD_VCLK, (void *)&value, &size))
4223                                         seq_printf(m, "\t%u MHz (VCLK)\n", value/100);
4224                         }
4225                 }
4226                 seq_printf(m, "\n");
4227         } else {
4228                 /* UVD clocks */
4229                 if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_UVD_POWER, (void *)&value, &size)) {
4230                         if (!value) {
4231                                 seq_printf(m, "UVD: Disabled\n");
4232                         } else {
4233                                 seq_printf(m, "UVD: Enabled\n");
4234                                 if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_UVD_DCLK, (void *)&value, &size))
4235                                         seq_printf(m, "\t%u MHz (DCLK)\n", value/100);
4236                                 if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_UVD_VCLK, (void *)&value, &size))
4237                                         seq_printf(m, "\t%u MHz (VCLK)\n", value/100);
4238                         }
4239                 }
4240                 seq_printf(m, "\n");
4241
4242                 /* VCE clocks */
4243                 if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_VCE_POWER, (void *)&value, &size)) {
4244                         if (!value) {
4245                                 seq_printf(m, "VCE: Disabled\n");
4246                         } else {
4247                                 seq_printf(m, "VCE: Enabled\n");
4248                                 if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_VCE_ECCLK, (void *)&value, &size))
4249                                         seq_printf(m, "\t%u MHz (ECCLK)\n", value/100);
4250                         }
4251                 }
4252         }
4253
4254         return 0;
4255 }
4256
4257 static const struct cg_flag_name clocks[] = {
4258         {AMD_CG_SUPPORT_GFX_FGCG, "Graphics Fine Grain Clock Gating"},
4259         {AMD_CG_SUPPORT_GFX_MGCG, "Graphics Medium Grain Clock Gating"},
4260         {AMD_CG_SUPPORT_GFX_MGLS, "Graphics Medium Grain memory Light Sleep"},
4261         {AMD_CG_SUPPORT_GFX_CGCG, "Graphics Coarse Grain Clock Gating"},
4262         {AMD_CG_SUPPORT_GFX_CGLS, "Graphics Coarse Grain memory Light Sleep"},
4263         {AMD_CG_SUPPORT_GFX_CGTS, "Graphics Coarse Grain Tree Shader Clock Gating"},
4264         {AMD_CG_SUPPORT_GFX_CGTS_LS, "Graphics Coarse Grain Tree Shader Light Sleep"},
4265         {AMD_CG_SUPPORT_GFX_CP_LS, "Graphics Command Processor Light Sleep"},
4266         {AMD_CG_SUPPORT_GFX_RLC_LS, "Graphics Run List Controller Light Sleep"},
4267         {AMD_CG_SUPPORT_GFX_3D_CGCG, "Graphics 3D Coarse Grain Clock Gating"},
4268         {AMD_CG_SUPPORT_GFX_3D_CGLS, "Graphics 3D Coarse Grain memory Light Sleep"},
4269         {AMD_CG_SUPPORT_MC_LS, "Memory Controller Light Sleep"},
4270         {AMD_CG_SUPPORT_MC_MGCG, "Memory Controller Medium Grain Clock Gating"},
4271         {AMD_CG_SUPPORT_SDMA_LS, "System Direct Memory Access Light Sleep"},
4272         {AMD_CG_SUPPORT_SDMA_MGCG, "System Direct Memory Access Medium Grain Clock Gating"},
4273         {AMD_CG_SUPPORT_BIF_MGCG, "Bus Interface Medium Grain Clock Gating"},
4274         {AMD_CG_SUPPORT_BIF_LS, "Bus Interface Light Sleep"},
4275         {AMD_CG_SUPPORT_UVD_MGCG, "Unified Video Decoder Medium Grain Clock Gating"},
4276         {AMD_CG_SUPPORT_VCE_MGCG, "Video Compression Engine Medium Grain Clock Gating"},
4277         {AMD_CG_SUPPORT_HDP_LS, "Host Data Path Light Sleep"},
4278         {AMD_CG_SUPPORT_HDP_MGCG, "Host Data Path Medium Grain Clock Gating"},
4279         {AMD_CG_SUPPORT_DRM_MGCG, "Digital Right Management Medium Grain Clock Gating"},
4280         {AMD_CG_SUPPORT_DRM_LS, "Digital Right Management Light Sleep"},
4281         {AMD_CG_SUPPORT_ROM_MGCG, "Rom Medium Grain Clock Gating"},
4282         {AMD_CG_SUPPORT_DF_MGCG, "Data Fabric Medium Grain Clock Gating"},
4283         {AMD_CG_SUPPORT_VCN_MGCG, "VCN Medium Grain Clock Gating"},
4284         {AMD_CG_SUPPORT_HDP_DS, "Host Data Path Deep Sleep"},
4285         {AMD_CG_SUPPORT_HDP_SD, "Host Data Path Shutdown"},
4286         {AMD_CG_SUPPORT_IH_CG, "Interrupt Handler Clock Gating"},
4287         {AMD_CG_SUPPORT_JPEG_MGCG, "JPEG Medium Grain Clock Gating"},
4288         {AMD_CG_SUPPORT_REPEATER_FGCG, "Repeater Fine Grain Clock Gating"},
4289         {AMD_CG_SUPPORT_GFX_PERF_CLK, "Perfmon Clock Gating"},
4290         {AMD_CG_SUPPORT_ATHUB_MGCG, "Address Translation Hub Medium Grain Clock Gating"},
4291         {AMD_CG_SUPPORT_ATHUB_LS, "Address Translation Hub Light Sleep"},
4292         {0, NULL},
4293 };
4294
4295 static void amdgpu_parse_cg_state(struct seq_file *m, u64 flags)
4296 {
4297         int i;
4298
4299         for (i = 0; clocks[i].flag; i++)
4300                 seq_printf(m, "\t%s: %s\n", clocks[i].name,
4301                            (flags & clocks[i].flag) ? "On" : "Off");
4302 }
4303
4304 static int amdgpu_debugfs_pm_info_show(struct seq_file *m, void *unused)
4305 {
4306         struct amdgpu_device *adev = (struct amdgpu_device *)m->private;
4307         struct drm_device *dev = adev_to_drm(adev);
4308         u64 flags = 0;
4309         int r;
4310
4311         if (amdgpu_in_reset(adev))
4312                 return -EPERM;
4313         if (adev->in_suspend && !adev->in_runpm)
4314                 return -EPERM;
4315
4316         r = pm_runtime_get_sync(dev->dev);
4317         if (r < 0) {
4318                 pm_runtime_put_autosuspend(dev->dev);
4319                 return r;
4320         }
4321
4322         if (amdgpu_dpm_debugfs_print_current_performance_level(adev, m)) {
4323                 r = amdgpu_debugfs_pm_info_pp(m, adev);
4324                 if (r)
4325                         goto out;
4326         }
4327
4328         amdgpu_device_ip_get_clockgating_state(adev, &flags);
4329
4330         seq_printf(m, "Clock Gating Flags Mask: 0x%llx\n", flags);
4331         amdgpu_parse_cg_state(m, flags);
4332         seq_printf(m, "\n");
4333
4334 out:
4335         pm_runtime_mark_last_busy(dev->dev);
4336         pm_runtime_put_autosuspend(dev->dev);
4337
4338         return r;
4339 }
4340
4341 DEFINE_SHOW_ATTRIBUTE(amdgpu_debugfs_pm_info);
4342
4343 /*
4344  * amdgpu_pm_priv_buffer_read - Read memory region allocated to FW
4345  *
4346  * Reads debug memory region allocated to PMFW
4347  */
4348 static ssize_t amdgpu_pm_prv_buffer_read(struct file *f, char __user *buf,
4349                                          size_t size, loff_t *pos)
4350 {
4351         struct amdgpu_device *adev = file_inode(f)->i_private;
4352         size_t smu_prv_buf_size;
4353         void *smu_prv_buf;
4354         int ret = 0;
4355
4356         if (amdgpu_in_reset(adev))
4357                 return -EPERM;
4358         if (adev->in_suspend && !adev->in_runpm)
4359                 return -EPERM;
4360
4361         ret = amdgpu_dpm_get_smu_prv_buf_details(adev, &smu_prv_buf, &smu_prv_buf_size);
4362         if (ret)
4363                 return ret;
4364
4365         if (!smu_prv_buf || !smu_prv_buf_size)
4366                 return -EINVAL;
4367
4368         return simple_read_from_buffer(buf, size, pos, smu_prv_buf,
4369                                        smu_prv_buf_size);
4370 }
4371
4372 static const struct file_operations amdgpu_debugfs_pm_prv_buffer_fops = {
4373         .owner = THIS_MODULE,
4374         .open = simple_open,
4375         .read = amdgpu_pm_prv_buffer_read,
4376         .llseek = default_llseek,
4377 };
4378
4379 #endif
4380
4381 void amdgpu_debugfs_pm_init(struct amdgpu_device *adev)
4382 {
4383 #if defined(CONFIG_DEBUG_FS)
4384         struct drm_minor *minor = adev_to_drm(adev)->primary;
4385         struct dentry *root = minor->debugfs_root;
4386
4387         if (!adev->pm.dpm_enabled)
4388                 return;
4389
4390         debugfs_create_file("amdgpu_pm_info", 0444, root, adev,
4391                             &amdgpu_debugfs_pm_info_fops);
4392
4393         if (adev->pm.smu_prv_buffer_size > 0)
4394                 debugfs_create_file_size("amdgpu_pm_prv_buffer", 0444, root,
4395                                          adev,
4396                                          &amdgpu_debugfs_pm_prv_buffer_fops,
4397                                          adev->pm.smu_prv_buffer_size);
4398
4399         amdgpu_dpm_stb_debug_fs_init(adev);
4400 #endif
4401 }
This page took 0.300404 seconds and 4 git commands to generate.