]> Git Repo - linux.git/blob - drivers/gpu/drm/amd/pm/amdgpu_dpm.c
Merge branch 'pm-cpufreq'
[linux.git] / drivers / gpu / drm / amd / pm / amdgpu_dpm.c
1 /*
2  * Copyright 2011 Advanced Micro Devices, Inc.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20  * OTHER DEALINGS IN THE SOFTWARE.
21  *
22  * Authors: Alex Deucher
23  */
24
25 #include "amdgpu.h"
26 #include "amdgpu_atombios.h"
27 #include "amdgpu_i2c.h"
28 #include "amdgpu_dpm.h"
29 #include "atom.h"
30 #include "amd_pcie.h"
31 #include "amdgpu_display.h"
32 #include "hwmgr.h"
33 #include <linux/power_supply.h>
34
35 #define WIDTH_4K 3840
36
37 void amdgpu_dpm_print_class_info(u32 class, u32 class2)
38 {
39         const char *s;
40
41         switch (class & ATOM_PPLIB_CLASSIFICATION_UI_MASK) {
42         case ATOM_PPLIB_CLASSIFICATION_UI_NONE:
43         default:
44                 s = "none";
45                 break;
46         case ATOM_PPLIB_CLASSIFICATION_UI_BATTERY:
47                 s = "battery";
48                 break;
49         case ATOM_PPLIB_CLASSIFICATION_UI_BALANCED:
50                 s = "balanced";
51                 break;
52         case ATOM_PPLIB_CLASSIFICATION_UI_PERFORMANCE:
53                 s = "performance";
54                 break;
55         }
56         printk("\tui class: %s\n", s);
57         printk("\tinternal class:");
58         if (((class & ~ATOM_PPLIB_CLASSIFICATION_UI_MASK) == 0) &&
59             (class2 == 0))
60                 pr_cont(" none");
61         else {
62                 if (class & ATOM_PPLIB_CLASSIFICATION_BOOT)
63                         pr_cont(" boot");
64                 if (class & ATOM_PPLIB_CLASSIFICATION_THERMAL)
65                         pr_cont(" thermal");
66                 if (class & ATOM_PPLIB_CLASSIFICATION_LIMITEDPOWERSOURCE)
67                         pr_cont(" limited_pwr");
68                 if (class & ATOM_PPLIB_CLASSIFICATION_REST)
69                         pr_cont(" rest");
70                 if (class & ATOM_PPLIB_CLASSIFICATION_FORCED)
71                         pr_cont(" forced");
72                 if (class & ATOM_PPLIB_CLASSIFICATION_3DPERFORMANCE)
73                         pr_cont(" 3d_perf");
74                 if (class & ATOM_PPLIB_CLASSIFICATION_OVERDRIVETEMPLATE)
75                         pr_cont(" ovrdrv");
76                 if (class & ATOM_PPLIB_CLASSIFICATION_UVDSTATE)
77                         pr_cont(" uvd");
78                 if (class & ATOM_PPLIB_CLASSIFICATION_3DLOW)
79                         pr_cont(" 3d_low");
80                 if (class & ATOM_PPLIB_CLASSIFICATION_ACPI)
81                         pr_cont(" acpi");
82                 if (class & ATOM_PPLIB_CLASSIFICATION_HD2STATE)
83                         pr_cont(" uvd_hd2");
84                 if (class & ATOM_PPLIB_CLASSIFICATION_HDSTATE)
85                         pr_cont(" uvd_hd");
86                 if (class & ATOM_PPLIB_CLASSIFICATION_SDSTATE)
87                         pr_cont(" uvd_sd");
88                 if (class2 & ATOM_PPLIB_CLASSIFICATION2_LIMITEDPOWERSOURCE_2)
89                         pr_cont(" limited_pwr2");
90                 if (class2 & ATOM_PPLIB_CLASSIFICATION2_ULV)
91                         pr_cont(" ulv");
92                 if (class2 & ATOM_PPLIB_CLASSIFICATION2_MVC)
93                         pr_cont(" uvd_mvc");
94         }
95         pr_cont("\n");
96 }
97
98 void amdgpu_dpm_print_cap_info(u32 caps)
99 {
100         printk("\tcaps:");
101         if (caps & ATOM_PPLIB_SINGLE_DISPLAY_ONLY)
102                 pr_cont(" single_disp");
103         if (caps & ATOM_PPLIB_SUPPORTS_VIDEO_PLAYBACK)
104                 pr_cont(" video");
105         if (caps & ATOM_PPLIB_DISALLOW_ON_DC)
106                 pr_cont(" no_dc");
107         pr_cont("\n");
108 }
109
110 void amdgpu_dpm_print_ps_status(struct amdgpu_device *adev,
111                                 struct amdgpu_ps *rps)
112 {
113         printk("\tstatus:");
114         if (rps == adev->pm.dpm.current_ps)
115                 pr_cont(" c");
116         if (rps == adev->pm.dpm.requested_ps)
117                 pr_cont(" r");
118         if (rps == adev->pm.dpm.boot_ps)
119                 pr_cont(" b");
120         pr_cont("\n");
121 }
122
123 void amdgpu_dpm_get_active_displays(struct amdgpu_device *adev)
124 {
125         struct drm_device *ddev = adev_to_drm(adev);
126         struct drm_crtc *crtc;
127         struct amdgpu_crtc *amdgpu_crtc;
128
129         adev->pm.dpm.new_active_crtcs = 0;
130         adev->pm.dpm.new_active_crtc_count = 0;
131         if (adev->mode_info.num_crtc && adev->mode_info.mode_config_initialized) {
132                 list_for_each_entry(crtc,
133                                     &ddev->mode_config.crtc_list, head) {
134                         amdgpu_crtc = to_amdgpu_crtc(crtc);
135                         if (amdgpu_crtc->enabled) {
136                                 adev->pm.dpm.new_active_crtcs |= (1 << amdgpu_crtc->crtc_id);
137                                 adev->pm.dpm.new_active_crtc_count++;
138                         }
139                 }
140         }
141 }
142
143
144 u32 amdgpu_dpm_get_vblank_time(struct amdgpu_device *adev)
145 {
146         struct drm_device *dev = adev_to_drm(adev);
147         struct drm_crtc *crtc;
148         struct amdgpu_crtc *amdgpu_crtc;
149         u32 vblank_in_pixels;
150         u32 vblank_time_us = 0xffffffff; /* if the displays are off, vblank time is max */
151
152         if (adev->mode_info.num_crtc && adev->mode_info.mode_config_initialized) {
153                 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
154                         amdgpu_crtc = to_amdgpu_crtc(crtc);
155                         if (crtc->enabled && amdgpu_crtc->enabled && amdgpu_crtc->hw_mode.clock) {
156                                 vblank_in_pixels =
157                                         amdgpu_crtc->hw_mode.crtc_htotal *
158                                         (amdgpu_crtc->hw_mode.crtc_vblank_end -
159                                         amdgpu_crtc->hw_mode.crtc_vdisplay +
160                                         (amdgpu_crtc->v_border * 2));
161
162                                 vblank_time_us = vblank_in_pixels * 1000 / amdgpu_crtc->hw_mode.clock;
163                                 break;
164                         }
165                 }
166         }
167
168         return vblank_time_us;
169 }
170
171 u32 amdgpu_dpm_get_vrefresh(struct amdgpu_device *adev)
172 {
173         struct drm_device *dev = adev_to_drm(adev);
174         struct drm_crtc *crtc;
175         struct amdgpu_crtc *amdgpu_crtc;
176         u32 vrefresh = 0;
177
178         if (adev->mode_info.num_crtc && adev->mode_info.mode_config_initialized) {
179                 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
180                         amdgpu_crtc = to_amdgpu_crtc(crtc);
181                         if (crtc->enabled && amdgpu_crtc->enabled && amdgpu_crtc->hw_mode.clock) {
182                                 vrefresh = drm_mode_vrefresh(&amdgpu_crtc->hw_mode);
183                                 break;
184                         }
185                 }
186         }
187
188         return vrefresh;
189 }
190
191 bool amdgpu_is_internal_thermal_sensor(enum amdgpu_int_thermal_type sensor)
192 {
193         switch (sensor) {
194         case THERMAL_TYPE_RV6XX:
195         case THERMAL_TYPE_RV770:
196         case THERMAL_TYPE_EVERGREEN:
197         case THERMAL_TYPE_SUMO:
198         case THERMAL_TYPE_NI:
199         case THERMAL_TYPE_SI:
200         case THERMAL_TYPE_CI:
201         case THERMAL_TYPE_KV:
202                 return true;
203         case THERMAL_TYPE_ADT7473_WITH_INTERNAL:
204         case THERMAL_TYPE_EMC2103_WITH_INTERNAL:
205                 return false; /* need special handling */
206         case THERMAL_TYPE_NONE:
207         case THERMAL_TYPE_EXTERNAL:
208         case THERMAL_TYPE_EXTERNAL_GPIO:
209         default:
210                 return false;
211         }
212 }
213
214 union power_info {
215         struct _ATOM_POWERPLAY_INFO info;
216         struct _ATOM_POWERPLAY_INFO_V2 info_2;
217         struct _ATOM_POWERPLAY_INFO_V3 info_3;
218         struct _ATOM_PPLIB_POWERPLAYTABLE pplib;
219         struct _ATOM_PPLIB_POWERPLAYTABLE2 pplib2;
220         struct _ATOM_PPLIB_POWERPLAYTABLE3 pplib3;
221         struct _ATOM_PPLIB_POWERPLAYTABLE4 pplib4;
222         struct _ATOM_PPLIB_POWERPLAYTABLE5 pplib5;
223 };
224
225 union fan_info {
226         struct _ATOM_PPLIB_FANTABLE fan;
227         struct _ATOM_PPLIB_FANTABLE2 fan2;
228         struct _ATOM_PPLIB_FANTABLE3 fan3;
229 };
230
231 static int amdgpu_parse_clk_voltage_dep_table(struct amdgpu_clock_voltage_dependency_table *amdgpu_table,
232                                               ATOM_PPLIB_Clock_Voltage_Dependency_Table *atom_table)
233 {
234         u32 size = atom_table->ucNumEntries *
235                 sizeof(struct amdgpu_clock_voltage_dependency_entry);
236         int i;
237         ATOM_PPLIB_Clock_Voltage_Dependency_Record *entry;
238
239         amdgpu_table->entries = kzalloc(size, GFP_KERNEL);
240         if (!amdgpu_table->entries)
241                 return -ENOMEM;
242
243         entry = &atom_table->entries[0];
244         for (i = 0; i < atom_table->ucNumEntries; i++) {
245                 amdgpu_table->entries[i].clk = le16_to_cpu(entry->usClockLow) |
246                         (entry->ucClockHigh << 16);
247                 amdgpu_table->entries[i].v = le16_to_cpu(entry->usVoltage);
248                 entry = (ATOM_PPLIB_Clock_Voltage_Dependency_Record *)
249                         ((u8 *)entry + sizeof(ATOM_PPLIB_Clock_Voltage_Dependency_Record));
250         }
251         amdgpu_table->count = atom_table->ucNumEntries;
252
253         return 0;
254 }
255
256 int amdgpu_get_platform_caps(struct amdgpu_device *adev)
257 {
258         struct amdgpu_mode_info *mode_info = &adev->mode_info;
259         union power_info *power_info;
260         int index = GetIndexIntoMasterTable(DATA, PowerPlayInfo);
261         u16 data_offset;
262         u8 frev, crev;
263
264         if (!amdgpu_atom_parse_data_header(mode_info->atom_context, index, NULL,
265                                    &frev, &crev, &data_offset))
266                 return -EINVAL;
267         power_info = (union power_info *)(mode_info->atom_context->bios + data_offset);
268
269         adev->pm.dpm.platform_caps = le32_to_cpu(power_info->pplib.ulPlatformCaps);
270         adev->pm.dpm.backbias_response_time = le16_to_cpu(power_info->pplib.usBackbiasTime);
271         adev->pm.dpm.voltage_response_time = le16_to_cpu(power_info->pplib.usVoltageTime);
272
273         return 0;
274 }
275
276 /* sizeof(ATOM_PPLIB_EXTENDEDHEADER) */
277 #define SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V2 12
278 #define SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V3 14
279 #define SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V4 16
280 #define SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V5 18
281 #define SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V6 20
282 #define SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V7 22
283 #define SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V8 24
284 #define SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V9 26
285
286 int amdgpu_parse_extended_power_table(struct amdgpu_device *adev)
287 {
288         struct amdgpu_mode_info *mode_info = &adev->mode_info;
289         union power_info *power_info;
290         union fan_info *fan_info;
291         ATOM_PPLIB_Clock_Voltage_Dependency_Table *dep_table;
292         int index = GetIndexIntoMasterTable(DATA, PowerPlayInfo);
293         u16 data_offset;
294         u8 frev, crev;
295         int ret, i;
296
297         if (!amdgpu_atom_parse_data_header(mode_info->atom_context, index, NULL,
298                                    &frev, &crev, &data_offset))
299                 return -EINVAL;
300         power_info = (union power_info *)(mode_info->atom_context->bios + data_offset);
301
302         /* fan table */
303         if (le16_to_cpu(power_info->pplib.usTableSize) >=
304             sizeof(struct _ATOM_PPLIB_POWERPLAYTABLE3)) {
305                 if (power_info->pplib3.usFanTableOffset) {
306                         fan_info = (union fan_info *)(mode_info->atom_context->bios + data_offset +
307                                                       le16_to_cpu(power_info->pplib3.usFanTableOffset));
308                         adev->pm.dpm.fan.t_hyst = fan_info->fan.ucTHyst;
309                         adev->pm.dpm.fan.t_min = le16_to_cpu(fan_info->fan.usTMin);
310                         adev->pm.dpm.fan.t_med = le16_to_cpu(fan_info->fan.usTMed);
311                         adev->pm.dpm.fan.t_high = le16_to_cpu(fan_info->fan.usTHigh);
312                         adev->pm.dpm.fan.pwm_min = le16_to_cpu(fan_info->fan.usPWMMin);
313                         adev->pm.dpm.fan.pwm_med = le16_to_cpu(fan_info->fan.usPWMMed);
314                         adev->pm.dpm.fan.pwm_high = le16_to_cpu(fan_info->fan.usPWMHigh);
315                         if (fan_info->fan.ucFanTableFormat >= 2)
316                                 adev->pm.dpm.fan.t_max = le16_to_cpu(fan_info->fan2.usTMax);
317                         else
318                                 adev->pm.dpm.fan.t_max = 10900;
319                         adev->pm.dpm.fan.cycle_delay = 100000;
320                         if (fan_info->fan.ucFanTableFormat >= 3) {
321                                 adev->pm.dpm.fan.control_mode = fan_info->fan3.ucFanControlMode;
322                                 adev->pm.dpm.fan.default_max_fan_pwm =
323                                         le16_to_cpu(fan_info->fan3.usFanPWMMax);
324                                 adev->pm.dpm.fan.default_fan_output_sensitivity = 4836;
325                                 adev->pm.dpm.fan.fan_output_sensitivity =
326                                         le16_to_cpu(fan_info->fan3.usFanOutputSensitivity);
327                         }
328                         adev->pm.dpm.fan.ucode_fan_control = true;
329                 }
330         }
331
332         /* clock dependancy tables, shedding tables */
333         if (le16_to_cpu(power_info->pplib.usTableSize) >=
334             sizeof(struct _ATOM_PPLIB_POWERPLAYTABLE4)) {
335                 if (power_info->pplib4.usVddcDependencyOnSCLKOffset) {
336                         dep_table = (ATOM_PPLIB_Clock_Voltage_Dependency_Table *)
337                                 (mode_info->atom_context->bios + data_offset +
338                                  le16_to_cpu(power_info->pplib4.usVddcDependencyOnSCLKOffset));
339                         ret = amdgpu_parse_clk_voltage_dep_table(&adev->pm.dpm.dyn_state.vddc_dependency_on_sclk,
340                                                                  dep_table);
341                         if (ret) {
342                                 amdgpu_free_extended_power_table(adev);
343                                 return ret;
344                         }
345                 }
346                 if (power_info->pplib4.usVddciDependencyOnMCLKOffset) {
347                         dep_table = (ATOM_PPLIB_Clock_Voltage_Dependency_Table *)
348                                 (mode_info->atom_context->bios + data_offset +
349                                  le16_to_cpu(power_info->pplib4.usVddciDependencyOnMCLKOffset));
350                         ret = amdgpu_parse_clk_voltage_dep_table(&adev->pm.dpm.dyn_state.vddci_dependency_on_mclk,
351                                                                  dep_table);
352                         if (ret) {
353                                 amdgpu_free_extended_power_table(adev);
354                                 return ret;
355                         }
356                 }
357                 if (power_info->pplib4.usVddcDependencyOnMCLKOffset) {
358                         dep_table = (ATOM_PPLIB_Clock_Voltage_Dependency_Table *)
359                                 (mode_info->atom_context->bios + data_offset +
360                                  le16_to_cpu(power_info->pplib4.usVddcDependencyOnMCLKOffset));
361                         ret = amdgpu_parse_clk_voltage_dep_table(&adev->pm.dpm.dyn_state.vddc_dependency_on_mclk,
362                                                                  dep_table);
363                         if (ret) {
364                                 amdgpu_free_extended_power_table(adev);
365                                 return ret;
366                         }
367                 }
368                 if (power_info->pplib4.usMvddDependencyOnMCLKOffset) {
369                         dep_table = (ATOM_PPLIB_Clock_Voltage_Dependency_Table *)
370                                 (mode_info->atom_context->bios + data_offset +
371                                  le16_to_cpu(power_info->pplib4.usMvddDependencyOnMCLKOffset));
372                         ret = amdgpu_parse_clk_voltage_dep_table(&adev->pm.dpm.dyn_state.mvdd_dependency_on_mclk,
373                                                                  dep_table);
374                         if (ret) {
375                                 amdgpu_free_extended_power_table(adev);
376                                 return ret;
377                         }
378                 }
379                 if (power_info->pplib4.usMaxClockVoltageOnDCOffset) {
380                         ATOM_PPLIB_Clock_Voltage_Limit_Table *clk_v =
381                                 (ATOM_PPLIB_Clock_Voltage_Limit_Table *)
382                                 (mode_info->atom_context->bios + data_offset +
383                                  le16_to_cpu(power_info->pplib4.usMaxClockVoltageOnDCOffset));
384                         if (clk_v->ucNumEntries) {
385                                 adev->pm.dpm.dyn_state.max_clock_voltage_on_dc.sclk =
386                                         le16_to_cpu(clk_v->entries[0].usSclkLow) |
387                                         (clk_v->entries[0].ucSclkHigh << 16);
388                                 adev->pm.dpm.dyn_state.max_clock_voltage_on_dc.mclk =
389                                         le16_to_cpu(clk_v->entries[0].usMclkLow) |
390                                         (clk_v->entries[0].ucMclkHigh << 16);
391                                 adev->pm.dpm.dyn_state.max_clock_voltage_on_dc.vddc =
392                                         le16_to_cpu(clk_v->entries[0].usVddc);
393                                 adev->pm.dpm.dyn_state.max_clock_voltage_on_dc.vddci =
394                                         le16_to_cpu(clk_v->entries[0].usVddci);
395                         }
396                 }
397                 if (power_info->pplib4.usVddcPhaseShedLimitsTableOffset) {
398                         ATOM_PPLIB_PhaseSheddingLimits_Table *psl =
399                                 (ATOM_PPLIB_PhaseSheddingLimits_Table *)
400                                 (mode_info->atom_context->bios + data_offset +
401                                  le16_to_cpu(power_info->pplib4.usVddcPhaseShedLimitsTableOffset));
402                         ATOM_PPLIB_PhaseSheddingLimits_Record *entry;
403
404                         adev->pm.dpm.dyn_state.phase_shedding_limits_table.entries =
405                                 kcalloc(psl->ucNumEntries,
406                                         sizeof(struct amdgpu_phase_shedding_limits_entry),
407                                         GFP_KERNEL);
408                         if (!adev->pm.dpm.dyn_state.phase_shedding_limits_table.entries) {
409                                 amdgpu_free_extended_power_table(adev);
410                                 return -ENOMEM;
411                         }
412
413                         entry = &psl->entries[0];
414                         for (i = 0; i < psl->ucNumEntries; i++) {
415                                 adev->pm.dpm.dyn_state.phase_shedding_limits_table.entries[i].sclk =
416                                         le16_to_cpu(entry->usSclkLow) | (entry->ucSclkHigh << 16);
417                                 adev->pm.dpm.dyn_state.phase_shedding_limits_table.entries[i].mclk =
418                                         le16_to_cpu(entry->usMclkLow) | (entry->ucMclkHigh << 16);
419                                 adev->pm.dpm.dyn_state.phase_shedding_limits_table.entries[i].voltage =
420                                         le16_to_cpu(entry->usVoltage);
421                                 entry = (ATOM_PPLIB_PhaseSheddingLimits_Record *)
422                                         ((u8 *)entry + sizeof(ATOM_PPLIB_PhaseSheddingLimits_Record));
423                         }
424                         adev->pm.dpm.dyn_state.phase_shedding_limits_table.count =
425                                 psl->ucNumEntries;
426                 }
427         }
428
429         /* cac data */
430         if (le16_to_cpu(power_info->pplib.usTableSize) >=
431             sizeof(struct _ATOM_PPLIB_POWERPLAYTABLE5)) {
432                 adev->pm.dpm.tdp_limit = le32_to_cpu(power_info->pplib5.ulTDPLimit);
433                 adev->pm.dpm.near_tdp_limit = le32_to_cpu(power_info->pplib5.ulNearTDPLimit);
434                 adev->pm.dpm.near_tdp_limit_adjusted = adev->pm.dpm.near_tdp_limit;
435                 adev->pm.dpm.tdp_od_limit = le16_to_cpu(power_info->pplib5.usTDPODLimit);
436                 if (adev->pm.dpm.tdp_od_limit)
437                         adev->pm.dpm.power_control = true;
438                 else
439                         adev->pm.dpm.power_control = false;
440                 adev->pm.dpm.tdp_adjustment = 0;
441                 adev->pm.dpm.sq_ramping_threshold = le32_to_cpu(power_info->pplib5.ulSQRampingThreshold);
442                 adev->pm.dpm.cac_leakage = le32_to_cpu(power_info->pplib5.ulCACLeakage);
443                 adev->pm.dpm.load_line_slope = le16_to_cpu(power_info->pplib5.usLoadLineSlope);
444                 if (power_info->pplib5.usCACLeakageTableOffset) {
445                         ATOM_PPLIB_CAC_Leakage_Table *cac_table =
446                                 (ATOM_PPLIB_CAC_Leakage_Table *)
447                                 (mode_info->atom_context->bios + data_offset +
448                                  le16_to_cpu(power_info->pplib5.usCACLeakageTableOffset));
449                         ATOM_PPLIB_CAC_Leakage_Record *entry;
450                         u32 size = cac_table->ucNumEntries * sizeof(struct amdgpu_cac_leakage_table);
451                         adev->pm.dpm.dyn_state.cac_leakage_table.entries = kzalloc(size, GFP_KERNEL);
452                         if (!adev->pm.dpm.dyn_state.cac_leakage_table.entries) {
453                                 amdgpu_free_extended_power_table(adev);
454                                 return -ENOMEM;
455                         }
456                         entry = &cac_table->entries[0];
457                         for (i = 0; i < cac_table->ucNumEntries; i++) {
458                                 if (adev->pm.dpm.platform_caps & ATOM_PP_PLATFORM_CAP_EVV) {
459                                         adev->pm.dpm.dyn_state.cac_leakage_table.entries[i].vddc1 =
460                                                 le16_to_cpu(entry->usVddc1);
461                                         adev->pm.dpm.dyn_state.cac_leakage_table.entries[i].vddc2 =
462                                                 le16_to_cpu(entry->usVddc2);
463                                         adev->pm.dpm.dyn_state.cac_leakage_table.entries[i].vddc3 =
464                                                 le16_to_cpu(entry->usVddc3);
465                                 } else {
466                                         adev->pm.dpm.dyn_state.cac_leakage_table.entries[i].vddc =
467                                                 le16_to_cpu(entry->usVddc);
468                                         adev->pm.dpm.dyn_state.cac_leakage_table.entries[i].leakage =
469                                                 le32_to_cpu(entry->ulLeakageValue);
470                                 }
471                                 entry = (ATOM_PPLIB_CAC_Leakage_Record *)
472                                         ((u8 *)entry + sizeof(ATOM_PPLIB_CAC_Leakage_Record));
473                         }
474                         adev->pm.dpm.dyn_state.cac_leakage_table.count = cac_table->ucNumEntries;
475                 }
476         }
477
478         /* ext tables */
479         if (le16_to_cpu(power_info->pplib.usTableSize) >=
480             sizeof(struct _ATOM_PPLIB_POWERPLAYTABLE3)) {
481                 ATOM_PPLIB_EXTENDEDHEADER *ext_hdr = (ATOM_PPLIB_EXTENDEDHEADER *)
482                         (mode_info->atom_context->bios + data_offset +
483                          le16_to_cpu(power_info->pplib3.usExtendendedHeaderOffset));
484                 if ((le16_to_cpu(ext_hdr->usSize) >= SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V2) &&
485                         ext_hdr->usVCETableOffset) {
486                         VCEClockInfoArray *array = (VCEClockInfoArray *)
487                                 (mode_info->atom_context->bios + data_offset +
488                                  le16_to_cpu(ext_hdr->usVCETableOffset) + 1);
489                         ATOM_PPLIB_VCE_Clock_Voltage_Limit_Table *limits =
490                                 (ATOM_PPLIB_VCE_Clock_Voltage_Limit_Table *)
491                                 (mode_info->atom_context->bios + data_offset +
492                                  le16_to_cpu(ext_hdr->usVCETableOffset) + 1 +
493                                  1 + array->ucNumEntries * sizeof(VCEClockInfo));
494                         ATOM_PPLIB_VCE_State_Table *states =
495                                 (ATOM_PPLIB_VCE_State_Table *)
496                                 (mode_info->atom_context->bios + data_offset +
497                                  le16_to_cpu(ext_hdr->usVCETableOffset) + 1 +
498                                  1 + (array->ucNumEntries * sizeof (VCEClockInfo)) +
499                                  1 + (limits->numEntries * sizeof(ATOM_PPLIB_VCE_Clock_Voltage_Limit_Record)));
500                         ATOM_PPLIB_VCE_Clock_Voltage_Limit_Record *entry;
501                         ATOM_PPLIB_VCE_State_Record *state_entry;
502                         VCEClockInfo *vce_clk;
503                         u32 size = limits->numEntries *
504                                 sizeof(struct amdgpu_vce_clock_voltage_dependency_entry);
505                         adev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table.entries =
506                                 kzalloc(size, GFP_KERNEL);
507                         if (!adev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table.entries) {
508                                 amdgpu_free_extended_power_table(adev);
509                                 return -ENOMEM;
510                         }
511                         adev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table.count =
512                                 limits->numEntries;
513                         entry = &limits->entries[0];
514                         state_entry = &states->entries[0];
515                         for (i = 0; i < limits->numEntries; i++) {
516                                 vce_clk = (VCEClockInfo *)
517                                         ((u8 *)&array->entries[0] +
518                                          (entry->ucVCEClockInfoIndex * sizeof(VCEClockInfo)));
519                                 adev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table.entries[i].evclk =
520                                         le16_to_cpu(vce_clk->usEVClkLow) | (vce_clk->ucEVClkHigh << 16);
521                                 adev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table.entries[i].ecclk =
522                                         le16_to_cpu(vce_clk->usECClkLow) | (vce_clk->ucECClkHigh << 16);
523                                 adev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table.entries[i].v =
524                                         le16_to_cpu(entry->usVoltage);
525                                 entry = (ATOM_PPLIB_VCE_Clock_Voltage_Limit_Record *)
526                                         ((u8 *)entry + sizeof(ATOM_PPLIB_VCE_Clock_Voltage_Limit_Record));
527                         }
528                         adev->pm.dpm.num_of_vce_states =
529                                         states->numEntries > AMD_MAX_VCE_LEVELS ?
530                                         AMD_MAX_VCE_LEVELS : states->numEntries;
531                         for (i = 0; i < adev->pm.dpm.num_of_vce_states; i++) {
532                                 vce_clk = (VCEClockInfo *)
533                                         ((u8 *)&array->entries[0] +
534                                          (state_entry->ucVCEClockInfoIndex * sizeof(VCEClockInfo)));
535                                 adev->pm.dpm.vce_states[i].evclk =
536                                         le16_to_cpu(vce_clk->usEVClkLow) | (vce_clk->ucEVClkHigh << 16);
537                                 adev->pm.dpm.vce_states[i].ecclk =
538                                         le16_to_cpu(vce_clk->usECClkLow) | (vce_clk->ucECClkHigh << 16);
539                                 adev->pm.dpm.vce_states[i].clk_idx =
540                                         state_entry->ucClockInfoIndex & 0x3f;
541                                 adev->pm.dpm.vce_states[i].pstate =
542                                         (state_entry->ucClockInfoIndex & 0xc0) >> 6;
543                                 state_entry = (ATOM_PPLIB_VCE_State_Record *)
544                                         ((u8 *)state_entry + sizeof(ATOM_PPLIB_VCE_State_Record));
545                         }
546                 }
547                 if ((le16_to_cpu(ext_hdr->usSize) >= SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V3) &&
548                         ext_hdr->usUVDTableOffset) {
549                         UVDClockInfoArray *array = (UVDClockInfoArray *)
550                                 (mode_info->atom_context->bios + data_offset +
551                                  le16_to_cpu(ext_hdr->usUVDTableOffset) + 1);
552                         ATOM_PPLIB_UVD_Clock_Voltage_Limit_Table *limits =
553                                 (ATOM_PPLIB_UVD_Clock_Voltage_Limit_Table *)
554                                 (mode_info->atom_context->bios + data_offset +
555                                  le16_to_cpu(ext_hdr->usUVDTableOffset) + 1 +
556                                  1 + (array->ucNumEntries * sizeof (UVDClockInfo)));
557                         ATOM_PPLIB_UVD_Clock_Voltage_Limit_Record *entry;
558                         u32 size = limits->numEntries *
559                                 sizeof(struct amdgpu_uvd_clock_voltage_dependency_entry);
560                         adev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table.entries =
561                                 kzalloc(size, GFP_KERNEL);
562                         if (!adev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table.entries) {
563                                 amdgpu_free_extended_power_table(adev);
564                                 return -ENOMEM;
565                         }
566                         adev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table.count =
567                                 limits->numEntries;
568                         entry = &limits->entries[0];
569                         for (i = 0; i < limits->numEntries; i++) {
570                                 UVDClockInfo *uvd_clk = (UVDClockInfo *)
571                                         ((u8 *)&array->entries[0] +
572                                          (entry->ucUVDClockInfoIndex * sizeof(UVDClockInfo)));
573                                 adev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table.entries[i].vclk =
574                                         le16_to_cpu(uvd_clk->usVClkLow) | (uvd_clk->ucVClkHigh << 16);
575                                 adev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table.entries[i].dclk =
576                                         le16_to_cpu(uvd_clk->usDClkLow) | (uvd_clk->ucDClkHigh << 16);
577                                 adev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table.entries[i].v =
578                                         le16_to_cpu(entry->usVoltage);
579                                 entry = (ATOM_PPLIB_UVD_Clock_Voltage_Limit_Record *)
580                                         ((u8 *)entry + sizeof(ATOM_PPLIB_UVD_Clock_Voltage_Limit_Record));
581                         }
582                 }
583                 if ((le16_to_cpu(ext_hdr->usSize) >= SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V4) &&
584                         ext_hdr->usSAMUTableOffset) {
585                         ATOM_PPLIB_SAMClk_Voltage_Limit_Table *limits =
586                                 (ATOM_PPLIB_SAMClk_Voltage_Limit_Table *)
587                                 (mode_info->atom_context->bios + data_offset +
588                                  le16_to_cpu(ext_hdr->usSAMUTableOffset) + 1);
589                         ATOM_PPLIB_SAMClk_Voltage_Limit_Record *entry;
590                         u32 size = limits->numEntries *
591                                 sizeof(struct amdgpu_clock_voltage_dependency_entry);
592                         adev->pm.dpm.dyn_state.samu_clock_voltage_dependency_table.entries =
593                                 kzalloc(size, GFP_KERNEL);
594                         if (!adev->pm.dpm.dyn_state.samu_clock_voltage_dependency_table.entries) {
595                                 amdgpu_free_extended_power_table(adev);
596                                 return -ENOMEM;
597                         }
598                         adev->pm.dpm.dyn_state.samu_clock_voltage_dependency_table.count =
599                                 limits->numEntries;
600                         entry = &limits->entries[0];
601                         for (i = 0; i < limits->numEntries; i++) {
602                                 adev->pm.dpm.dyn_state.samu_clock_voltage_dependency_table.entries[i].clk =
603                                         le16_to_cpu(entry->usSAMClockLow) | (entry->ucSAMClockHigh << 16);
604                                 adev->pm.dpm.dyn_state.samu_clock_voltage_dependency_table.entries[i].v =
605                                         le16_to_cpu(entry->usVoltage);
606                                 entry = (ATOM_PPLIB_SAMClk_Voltage_Limit_Record *)
607                                         ((u8 *)entry + sizeof(ATOM_PPLIB_SAMClk_Voltage_Limit_Record));
608                         }
609                 }
610                 if ((le16_to_cpu(ext_hdr->usSize) >= SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V5) &&
611                     ext_hdr->usPPMTableOffset) {
612                         ATOM_PPLIB_PPM_Table *ppm = (ATOM_PPLIB_PPM_Table *)
613                                 (mode_info->atom_context->bios + data_offset +
614                                  le16_to_cpu(ext_hdr->usPPMTableOffset));
615                         adev->pm.dpm.dyn_state.ppm_table =
616                                 kzalloc(sizeof(struct amdgpu_ppm_table), GFP_KERNEL);
617                         if (!adev->pm.dpm.dyn_state.ppm_table) {
618                                 amdgpu_free_extended_power_table(adev);
619                                 return -ENOMEM;
620                         }
621                         adev->pm.dpm.dyn_state.ppm_table->ppm_design = ppm->ucPpmDesign;
622                         adev->pm.dpm.dyn_state.ppm_table->cpu_core_number =
623                                 le16_to_cpu(ppm->usCpuCoreNumber);
624                         adev->pm.dpm.dyn_state.ppm_table->platform_tdp =
625                                 le32_to_cpu(ppm->ulPlatformTDP);
626                         adev->pm.dpm.dyn_state.ppm_table->small_ac_platform_tdp =
627                                 le32_to_cpu(ppm->ulSmallACPlatformTDP);
628                         adev->pm.dpm.dyn_state.ppm_table->platform_tdc =
629                                 le32_to_cpu(ppm->ulPlatformTDC);
630                         adev->pm.dpm.dyn_state.ppm_table->small_ac_platform_tdc =
631                                 le32_to_cpu(ppm->ulSmallACPlatformTDC);
632                         adev->pm.dpm.dyn_state.ppm_table->apu_tdp =
633                                 le32_to_cpu(ppm->ulApuTDP);
634                         adev->pm.dpm.dyn_state.ppm_table->dgpu_tdp =
635                                 le32_to_cpu(ppm->ulDGpuTDP);
636                         adev->pm.dpm.dyn_state.ppm_table->dgpu_ulv_power =
637                                 le32_to_cpu(ppm->ulDGpuUlvPower);
638                         adev->pm.dpm.dyn_state.ppm_table->tj_max =
639                                 le32_to_cpu(ppm->ulTjmax);
640                 }
641                 if ((le16_to_cpu(ext_hdr->usSize) >= SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V6) &&
642                         ext_hdr->usACPTableOffset) {
643                         ATOM_PPLIB_ACPClk_Voltage_Limit_Table *limits =
644                                 (ATOM_PPLIB_ACPClk_Voltage_Limit_Table *)
645                                 (mode_info->atom_context->bios + data_offset +
646                                  le16_to_cpu(ext_hdr->usACPTableOffset) + 1);
647                         ATOM_PPLIB_ACPClk_Voltage_Limit_Record *entry;
648                         u32 size = limits->numEntries *
649                                 sizeof(struct amdgpu_clock_voltage_dependency_entry);
650                         adev->pm.dpm.dyn_state.acp_clock_voltage_dependency_table.entries =
651                                 kzalloc(size, GFP_KERNEL);
652                         if (!adev->pm.dpm.dyn_state.acp_clock_voltage_dependency_table.entries) {
653                                 amdgpu_free_extended_power_table(adev);
654                                 return -ENOMEM;
655                         }
656                         adev->pm.dpm.dyn_state.acp_clock_voltage_dependency_table.count =
657                                 limits->numEntries;
658                         entry = &limits->entries[0];
659                         for (i = 0; i < limits->numEntries; i++) {
660                                 adev->pm.dpm.dyn_state.acp_clock_voltage_dependency_table.entries[i].clk =
661                                         le16_to_cpu(entry->usACPClockLow) | (entry->ucACPClockHigh << 16);
662                                 adev->pm.dpm.dyn_state.acp_clock_voltage_dependency_table.entries[i].v =
663                                         le16_to_cpu(entry->usVoltage);
664                                 entry = (ATOM_PPLIB_ACPClk_Voltage_Limit_Record *)
665                                         ((u8 *)entry + sizeof(ATOM_PPLIB_ACPClk_Voltage_Limit_Record));
666                         }
667                 }
668                 if ((le16_to_cpu(ext_hdr->usSize) >= SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V7) &&
669                         ext_hdr->usPowerTuneTableOffset) {
670                         u8 rev = *(u8 *)(mode_info->atom_context->bios + data_offset +
671                                          le16_to_cpu(ext_hdr->usPowerTuneTableOffset));
672                         ATOM_PowerTune_Table *pt;
673                         adev->pm.dpm.dyn_state.cac_tdp_table =
674                                 kzalloc(sizeof(struct amdgpu_cac_tdp_table), GFP_KERNEL);
675                         if (!adev->pm.dpm.dyn_state.cac_tdp_table) {
676                                 amdgpu_free_extended_power_table(adev);
677                                 return -ENOMEM;
678                         }
679                         if (rev > 0) {
680                                 ATOM_PPLIB_POWERTUNE_Table_V1 *ppt = (ATOM_PPLIB_POWERTUNE_Table_V1 *)
681                                         (mode_info->atom_context->bios + data_offset +
682                                          le16_to_cpu(ext_hdr->usPowerTuneTableOffset));
683                                 adev->pm.dpm.dyn_state.cac_tdp_table->maximum_power_delivery_limit =
684                                         ppt->usMaximumPowerDeliveryLimit;
685                                 pt = &ppt->power_tune_table;
686                         } else {
687                                 ATOM_PPLIB_POWERTUNE_Table *ppt = (ATOM_PPLIB_POWERTUNE_Table *)
688                                         (mode_info->atom_context->bios + data_offset +
689                                          le16_to_cpu(ext_hdr->usPowerTuneTableOffset));
690                                 adev->pm.dpm.dyn_state.cac_tdp_table->maximum_power_delivery_limit = 255;
691                                 pt = &ppt->power_tune_table;
692                         }
693                         adev->pm.dpm.dyn_state.cac_tdp_table->tdp = le16_to_cpu(pt->usTDP);
694                         adev->pm.dpm.dyn_state.cac_tdp_table->configurable_tdp =
695                                 le16_to_cpu(pt->usConfigurableTDP);
696                         adev->pm.dpm.dyn_state.cac_tdp_table->tdc = le16_to_cpu(pt->usTDC);
697                         adev->pm.dpm.dyn_state.cac_tdp_table->battery_power_limit =
698                                 le16_to_cpu(pt->usBatteryPowerLimit);
699                         adev->pm.dpm.dyn_state.cac_tdp_table->small_power_limit =
700                                 le16_to_cpu(pt->usSmallPowerLimit);
701                         adev->pm.dpm.dyn_state.cac_tdp_table->low_cac_leakage =
702                                 le16_to_cpu(pt->usLowCACLeakage);
703                         adev->pm.dpm.dyn_state.cac_tdp_table->high_cac_leakage =
704                                 le16_to_cpu(pt->usHighCACLeakage);
705                 }
706                 if ((le16_to_cpu(ext_hdr->usSize) >= SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V8) &&
707                                 ext_hdr->usSclkVddgfxTableOffset) {
708                         dep_table = (ATOM_PPLIB_Clock_Voltage_Dependency_Table *)
709                                 (mode_info->atom_context->bios + data_offset +
710                                  le16_to_cpu(ext_hdr->usSclkVddgfxTableOffset));
711                         ret = amdgpu_parse_clk_voltage_dep_table(
712                                         &adev->pm.dpm.dyn_state.vddgfx_dependency_on_sclk,
713                                         dep_table);
714                         if (ret) {
715                                 kfree(adev->pm.dpm.dyn_state.vddgfx_dependency_on_sclk.entries);
716                                 return ret;
717                         }
718                 }
719         }
720
721         return 0;
722 }
723
724 void amdgpu_free_extended_power_table(struct amdgpu_device *adev)
725 {
726         struct amdgpu_dpm_dynamic_state *dyn_state = &adev->pm.dpm.dyn_state;
727
728         kfree(dyn_state->vddc_dependency_on_sclk.entries);
729         kfree(dyn_state->vddci_dependency_on_mclk.entries);
730         kfree(dyn_state->vddc_dependency_on_mclk.entries);
731         kfree(dyn_state->mvdd_dependency_on_mclk.entries);
732         kfree(dyn_state->cac_leakage_table.entries);
733         kfree(dyn_state->phase_shedding_limits_table.entries);
734         kfree(dyn_state->ppm_table);
735         kfree(dyn_state->cac_tdp_table);
736         kfree(dyn_state->vce_clock_voltage_dependency_table.entries);
737         kfree(dyn_state->uvd_clock_voltage_dependency_table.entries);
738         kfree(dyn_state->samu_clock_voltage_dependency_table.entries);
739         kfree(dyn_state->acp_clock_voltage_dependency_table.entries);
740         kfree(dyn_state->vddgfx_dependency_on_sclk.entries);
741 }
742
743 static const char *pp_lib_thermal_controller_names[] = {
744         "NONE",
745         "lm63",
746         "adm1032",
747         "adm1030",
748         "max6649",
749         "lm64",
750         "f75375",
751         "RV6xx",
752         "RV770",
753         "adt7473",
754         "NONE",
755         "External GPIO",
756         "Evergreen",
757         "emc2103",
758         "Sumo",
759         "Northern Islands",
760         "Southern Islands",
761         "lm96163",
762         "Sea Islands",
763         "Kaveri/Kabini",
764 };
765
766 void amdgpu_add_thermal_controller(struct amdgpu_device *adev)
767 {
768         struct amdgpu_mode_info *mode_info = &adev->mode_info;
769         ATOM_PPLIB_POWERPLAYTABLE *power_table;
770         int index = GetIndexIntoMasterTable(DATA, PowerPlayInfo);
771         ATOM_PPLIB_THERMALCONTROLLER *controller;
772         struct amdgpu_i2c_bus_rec i2c_bus;
773         u16 data_offset;
774         u8 frev, crev;
775
776         if (!amdgpu_atom_parse_data_header(mode_info->atom_context, index, NULL,
777                                    &frev, &crev, &data_offset))
778                 return;
779         power_table = (ATOM_PPLIB_POWERPLAYTABLE *)
780                 (mode_info->atom_context->bios + data_offset);
781         controller = &power_table->sThermalController;
782
783         /* add the i2c bus for thermal/fan chip */
784         if (controller->ucType > 0) {
785                 if (controller->ucFanParameters & ATOM_PP_FANPARAMETERS_NOFAN)
786                         adev->pm.no_fan = true;
787                 adev->pm.fan_pulses_per_revolution =
788                         controller->ucFanParameters & ATOM_PP_FANPARAMETERS_TACHOMETER_PULSES_PER_REVOLUTION_MASK;
789                 if (adev->pm.fan_pulses_per_revolution) {
790                         adev->pm.fan_min_rpm = controller->ucFanMinRPM;
791                         adev->pm.fan_max_rpm = controller->ucFanMaxRPM;
792                 }
793                 if (controller->ucType == ATOM_PP_THERMALCONTROLLER_RV6xx) {
794                         DRM_INFO("Internal thermal controller %s fan control\n",
795                                  (controller->ucFanParameters &
796                                   ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with");
797                         adev->pm.int_thermal_type = THERMAL_TYPE_RV6XX;
798                 } else if (controller->ucType == ATOM_PP_THERMALCONTROLLER_RV770) {
799                         DRM_INFO("Internal thermal controller %s fan control\n",
800                                  (controller->ucFanParameters &
801                                   ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with");
802                         adev->pm.int_thermal_type = THERMAL_TYPE_RV770;
803                 } else if (controller->ucType == ATOM_PP_THERMALCONTROLLER_EVERGREEN) {
804                         DRM_INFO("Internal thermal controller %s fan control\n",
805                                  (controller->ucFanParameters &
806                                   ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with");
807                         adev->pm.int_thermal_type = THERMAL_TYPE_EVERGREEN;
808                 } else if (controller->ucType == ATOM_PP_THERMALCONTROLLER_SUMO) {
809                         DRM_INFO("Internal thermal controller %s fan control\n",
810                                  (controller->ucFanParameters &
811                                   ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with");
812                         adev->pm.int_thermal_type = THERMAL_TYPE_SUMO;
813                 } else if (controller->ucType == ATOM_PP_THERMALCONTROLLER_NISLANDS) {
814                         DRM_INFO("Internal thermal controller %s fan control\n",
815                                  (controller->ucFanParameters &
816                                   ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with");
817                         adev->pm.int_thermal_type = THERMAL_TYPE_NI;
818                 } else if (controller->ucType == ATOM_PP_THERMALCONTROLLER_SISLANDS) {
819                         DRM_INFO("Internal thermal controller %s fan control\n",
820                                  (controller->ucFanParameters &
821                                   ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with");
822                         adev->pm.int_thermal_type = THERMAL_TYPE_SI;
823                 } else if (controller->ucType == ATOM_PP_THERMALCONTROLLER_CISLANDS) {
824                         DRM_INFO("Internal thermal controller %s fan control\n",
825                                  (controller->ucFanParameters &
826                                   ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with");
827                         adev->pm.int_thermal_type = THERMAL_TYPE_CI;
828                 } else if (controller->ucType == ATOM_PP_THERMALCONTROLLER_KAVERI) {
829                         DRM_INFO("Internal thermal controller %s fan control\n",
830                                  (controller->ucFanParameters &
831                                   ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with");
832                         adev->pm.int_thermal_type = THERMAL_TYPE_KV;
833                 } else if (controller->ucType == ATOM_PP_THERMALCONTROLLER_EXTERNAL_GPIO) {
834                         DRM_INFO("External GPIO thermal controller %s fan control\n",
835                                  (controller->ucFanParameters &
836                                   ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with");
837                         adev->pm.int_thermal_type = THERMAL_TYPE_EXTERNAL_GPIO;
838                 } else if (controller->ucType ==
839                            ATOM_PP_THERMALCONTROLLER_ADT7473_WITH_INTERNAL) {
840                         DRM_INFO("ADT7473 with internal thermal controller %s fan control\n",
841                                  (controller->ucFanParameters &
842                                   ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with");
843                         adev->pm.int_thermal_type = THERMAL_TYPE_ADT7473_WITH_INTERNAL;
844                 } else if (controller->ucType ==
845                            ATOM_PP_THERMALCONTROLLER_EMC2103_WITH_INTERNAL) {
846                         DRM_INFO("EMC2103 with internal thermal controller %s fan control\n",
847                                  (controller->ucFanParameters &
848                                   ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with");
849                         adev->pm.int_thermal_type = THERMAL_TYPE_EMC2103_WITH_INTERNAL;
850                 } else if (controller->ucType < ARRAY_SIZE(pp_lib_thermal_controller_names)) {
851                         DRM_INFO("Possible %s thermal controller at 0x%02x %s fan control\n",
852                                  pp_lib_thermal_controller_names[controller->ucType],
853                                  controller->ucI2cAddress >> 1,
854                                  (controller->ucFanParameters &
855                                   ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with");
856                         adev->pm.int_thermal_type = THERMAL_TYPE_EXTERNAL;
857                         i2c_bus = amdgpu_atombios_lookup_i2c_gpio(adev, controller->ucI2cLine);
858                         adev->pm.i2c_bus = amdgpu_i2c_lookup(adev, &i2c_bus);
859                         if (adev->pm.i2c_bus) {
860                                 struct i2c_board_info info = { };
861                                 const char *name = pp_lib_thermal_controller_names[controller->ucType];
862                                 info.addr = controller->ucI2cAddress >> 1;
863                                 strlcpy(info.type, name, sizeof(info.type));
864                                 i2c_new_client_device(&adev->pm.i2c_bus->adapter, &info);
865                         }
866                 } else {
867                         DRM_INFO("Unknown thermal controller type %d at 0x%02x %s fan control\n",
868                                  controller->ucType,
869                                  controller->ucI2cAddress >> 1,
870                                  (controller->ucFanParameters &
871                                   ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with");
872                 }
873         }
874 }
875
876 enum amdgpu_pcie_gen amdgpu_get_pcie_gen_support(struct amdgpu_device *adev,
877                                                  u32 sys_mask,
878                                                  enum amdgpu_pcie_gen asic_gen,
879                                                  enum amdgpu_pcie_gen default_gen)
880 {
881         switch (asic_gen) {
882         case AMDGPU_PCIE_GEN1:
883                 return AMDGPU_PCIE_GEN1;
884         case AMDGPU_PCIE_GEN2:
885                 return AMDGPU_PCIE_GEN2;
886         case AMDGPU_PCIE_GEN3:
887                 return AMDGPU_PCIE_GEN3;
888         default:
889                 if ((sys_mask & CAIL_PCIE_LINK_SPEED_SUPPORT_GEN3) &&
890                     (default_gen == AMDGPU_PCIE_GEN3))
891                         return AMDGPU_PCIE_GEN3;
892                 else if ((sys_mask & CAIL_PCIE_LINK_SPEED_SUPPORT_GEN2) &&
893                          (default_gen == AMDGPU_PCIE_GEN2))
894                         return AMDGPU_PCIE_GEN2;
895                 else
896                         return AMDGPU_PCIE_GEN1;
897         }
898         return AMDGPU_PCIE_GEN1;
899 }
900
901 struct amd_vce_state*
902 amdgpu_get_vce_clock_state(void *handle, u32 idx)
903 {
904         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
905
906         if (idx < adev->pm.dpm.num_of_vce_states)
907                 return &adev->pm.dpm.vce_states[idx];
908
909         return NULL;
910 }
911
912 int amdgpu_dpm_get_sclk(struct amdgpu_device *adev, bool low)
913 {
914         uint32_t clk_freq;
915         int ret = 0;
916         if (is_support_sw_smu(adev)) {
917                 ret = smu_get_dpm_freq_range(&adev->smu, SMU_GFXCLK,
918                                              low ? &clk_freq : NULL,
919                                              !low ? &clk_freq : NULL);
920                 if (ret)
921                         return 0;
922                 return clk_freq * 100;
923
924         } else {
925                 return (adev)->powerplay.pp_funcs->get_sclk((adev)->powerplay.pp_handle, (low));
926         }
927 }
928
929 int amdgpu_dpm_get_mclk(struct amdgpu_device *adev, bool low)
930 {
931         uint32_t clk_freq;
932         int ret = 0;
933         if (is_support_sw_smu(adev)) {
934                 ret = smu_get_dpm_freq_range(&adev->smu, SMU_UCLK,
935                                              low ? &clk_freq : NULL,
936                                              !low ? &clk_freq : NULL);
937                 if (ret)
938                         return 0;
939                 return clk_freq * 100;
940
941         } else {
942                 return (adev)->powerplay.pp_funcs->get_mclk((adev)->powerplay.pp_handle, (low));
943         }
944 }
945
946 int amdgpu_dpm_set_powergating_by_smu(struct amdgpu_device *adev, uint32_t block_type, bool gate)
947 {
948         int ret = 0;
949         bool swsmu = is_support_sw_smu(adev);
950
951         switch (block_type) {
952         case AMD_IP_BLOCK_TYPE_UVD:
953         case AMD_IP_BLOCK_TYPE_VCE:
954                 if (swsmu) {
955                         ret = smu_dpm_set_power_gate(&adev->smu, block_type, gate);
956                 } else if (adev->powerplay.pp_funcs &&
957                            adev->powerplay.pp_funcs->set_powergating_by_smu) {
958                         /*
959                          * TODO: need a better lock mechanism
960                          *
961                          * Here adev->pm.mutex lock protection is enforced on
962                          * UVD and VCE cases only. Since for other cases, there
963                          * may be already lock protection in amdgpu_pm.c.
964                          * This is a quick fix for the deadlock issue below.
965                          *     NFO: task ocltst:2028 blocked for more than 120 seconds.
966                          *     Tainted: G           OE     5.0.0-37-generic #40~18.04.1-Ubuntu
967                          *     echo 0 > /proc/sys/kernel/hung_task_timeout_secs" disables this message.
968                          *     cltst          D    0  2028   2026 0x00000000
969                          *     all Trace:
970                          *     __schedule+0x2c0/0x870
971                          *     schedule+0x2c/0x70
972                          *     schedule_preempt_disabled+0xe/0x10
973                          *     __mutex_lock.isra.9+0x26d/0x4e0
974                          *     __mutex_lock_slowpath+0x13/0x20
975                          *     ? __mutex_lock_slowpath+0x13/0x20
976                          *     mutex_lock+0x2f/0x40
977                          *     amdgpu_dpm_set_powergating_by_smu+0x64/0xe0 [amdgpu]
978                          *     gfx_v8_0_enable_gfx_static_mg_power_gating+0x3c/0x70 [amdgpu]
979                          *     gfx_v8_0_set_powergating_state+0x66/0x260 [amdgpu]
980                          *     amdgpu_device_ip_set_powergating_state+0x62/0xb0 [amdgpu]
981                          *     pp_dpm_force_performance_level+0xe7/0x100 [amdgpu]
982                          *     amdgpu_set_dpm_forced_performance_level+0x129/0x330 [amdgpu]
983                          */
984                         mutex_lock(&adev->pm.mutex);
985                         ret = ((adev)->powerplay.pp_funcs->set_powergating_by_smu(
986                                 (adev)->powerplay.pp_handle, block_type, gate));
987                         mutex_unlock(&adev->pm.mutex);
988                 }
989                 break;
990         case AMD_IP_BLOCK_TYPE_GFX:
991         case AMD_IP_BLOCK_TYPE_VCN:
992         case AMD_IP_BLOCK_TYPE_SDMA:
993                 if (swsmu)
994                         ret = smu_dpm_set_power_gate(&adev->smu, block_type, gate);
995                 else if (adev->powerplay.pp_funcs &&
996                          adev->powerplay.pp_funcs->set_powergating_by_smu)
997                         ret = ((adev)->powerplay.pp_funcs->set_powergating_by_smu(
998                                 (adev)->powerplay.pp_handle, block_type, gate));
999                 break;
1000         case AMD_IP_BLOCK_TYPE_JPEG:
1001                 if (swsmu)
1002                         ret = smu_dpm_set_power_gate(&adev->smu, block_type, gate);
1003                 break;
1004         case AMD_IP_BLOCK_TYPE_GMC:
1005         case AMD_IP_BLOCK_TYPE_ACP:
1006                 if (adev->powerplay.pp_funcs &&
1007                     adev->powerplay.pp_funcs->set_powergating_by_smu)
1008                         ret = ((adev)->powerplay.pp_funcs->set_powergating_by_smu(
1009                                 (adev)->powerplay.pp_handle, block_type, gate));
1010                 break;
1011         default:
1012                 break;
1013         }
1014
1015         return ret;
1016 }
1017
1018 int amdgpu_dpm_baco_enter(struct amdgpu_device *adev)
1019 {
1020         const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1021         void *pp_handle = adev->powerplay.pp_handle;
1022         struct smu_context *smu = &adev->smu;
1023         int ret = 0;
1024
1025         if (is_support_sw_smu(adev)) {
1026                 ret = smu_baco_enter(smu);
1027         } else {
1028                 if (!pp_funcs || !pp_funcs->set_asic_baco_state)
1029                         return -ENOENT;
1030
1031                 /* enter BACO state */
1032                 ret = pp_funcs->set_asic_baco_state(pp_handle, 1);
1033         }
1034
1035         return ret;
1036 }
1037
1038 int amdgpu_dpm_baco_exit(struct amdgpu_device *adev)
1039 {
1040         const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1041         void *pp_handle = adev->powerplay.pp_handle;
1042         struct smu_context *smu = &adev->smu;
1043         int ret = 0;
1044
1045         if (is_support_sw_smu(adev)) {
1046                 ret = smu_baco_exit(smu);
1047         } else {
1048                 if (!pp_funcs || !pp_funcs->set_asic_baco_state)
1049                         return -ENOENT;
1050
1051                 /* exit BACO state */
1052                 ret = pp_funcs->set_asic_baco_state(pp_handle, 0);
1053         }
1054
1055         return ret;
1056 }
1057
1058 int amdgpu_dpm_set_mp1_state(struct amdgpu_device *adev,
1059                              enum pp_mp1_state mp1_state)
1060 {
1061         int ret = 0;
1062
1063         if (is_support_sw_smu(adev)) {
1064                 ret = smu_set_mp1_state(&adev->smu, mp1_state);
1065         } else if (adev->powerplay.pp_funcs &&
1066                    adev->powerplay.pp_funcs->set_mp1_state) {
1067                 ret = adev->powerplay.pp_funcs->set_mp1_state(
1068                                 adev->powerplay.pp_handle,
1069                                 mp1_state);
1070         }
1071
1072         return ret;
1073 }
1074
1075 bool amdgpu_dpm_is_baco_supported(struct amdgpu_device *adev)
1076 {
1077         const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1078         void *pp_handle = adev->powerplay.pp_handle;
1079         struct smu_context *smu = &adev->smu;
1080         bool baco_cap;
1081
1082         if (is_support_sw_smu(adev)) {
1083                 return smu_baco_is_support(smu);
1084         } else {
1085                 if (!pp_funcs || !pp_funcs->get_asic_baco_capability)
1086                         return false;
1087
1088                 if (pp_funcs->get_asic_baco_capability(pp_handle, &baco_cap))
1089                         return false;
1090
1091                 return baco_cap ? true : false;
1092         }
1093 }
1094
1095 int amdgpu_dpm_mode2_reset(struct amdgpu_device *adev)
1096 {
1097         const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1098         void *pp_handle = adev->powerplay.pp_handle;
1099         struct smu_context *smu = &adev->smu;
1100
1101         if (is_support_sw_smu(adev)) {
1102                 return smu_mode2_reset(smu);
1103         } else {
1104                 if (!pp_funcs || !pp_funcs->asic_reset_mode_2)
1105                         return -ENOENT;
1106
1107                 return pp_funcs->asic_reset_mode_2(pp_handle);
1108         }
1109 }
1110
1111 int amdgpu_dpm_baco_reset(struct amdgpu_device *adev)
1112 {
1113         const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1114         void *pp_handle = adev->powerplay.pp_handle;
1115         struct smu_context *smu = &adev->smu;
1116         int ret = 0;
1117
1118         if (is_support_sw_smu(adev)) {
1119                 ret = smu_baco_enter(smu);
1120                 if (ret)
1121                         return ret;
1122
1123                 ret = smu_baco_exit(smu);
1124                 if (ret)
1125                         return ret;
1126         } else {
1127                 if (!pp_funcs
1128                     || !pp_funcs->set_asic_baco_state)
1129                         return -ENOENT;
1130
1131                 /* enter BACO state */
1132                 ret = pp_funcs->set_asic_baco_state(pp_handle, 1);
1133                 if (ret)
1134                         return ret;
1135
1136                 /* exit BACO state */
1137                 ret = pp_funcs->set_asic_baco_state(pp_handle, 0);
1138                 if (ret)
1139                         return ret;
1140         }
1141
1142         return 0;
1143 }
1144
1145 bool amdgpu_dpm_is_mode1_reset_supported(struct amdgpu_device *adev)
1146 {
1147         struct smu_context *smu = &adev->smu;
1148
1149         if (is_support_sw_smu(adev))
1150                 return smu_mode1_reset_is_support(smu);
1151
1152         return false;
1153 }
1154
1155 int amdgpu_dpm_mode1_reset(struct amdgpu_device *adev)
1156 {
1157         struct smu_context *smu = &adev->smu;
1158
1159         if (is_support_sw_smu(adev))
1160                 return smu_mode1_reset(smu);
1161
1162         return -EOPNOTSUPP;
1163 }
1164
1165 int amdgpu_dpm_switch_power_profile(struct amdgpu_device *adev,
1166                                     enum PP_SMC_POWER_PROFILE type,
1167                                     bool en)
1168 {
1169         int ret = 0;
1170
1171         if (amdgpu_sriov_vf(adev))
1172                 return 0;
1173
1174         if (is_support_sw_smu(adev))
1175                 ret = smu_switch_power_profile(&adev->smu, type, en);
1176         else if (adev->powerplay.pp_funcs &&
1177                  adev->powerplay.pp_funcs->switch_power_profile)
1178                 ret = adev->powerplay.pp_funcs->switch_power_profile(
1179                         adev->powerplay.pp_handle, type, en);
1180
1181         return ret;
1182 }
1183
1184 int amdgpu_dpm_set_xgmi_pstate(struct amdgpu_device *adev,
1185                                uint32_t pstate)
1186 {
1187         int ret = 0;
1188
1189         if (is_support_sw_smu(adev))
1190                 ret = smu_set_xgmi_pstate(&adev->smu, pstate);
1191         else if (adev->powerplay.pp_funcs &&
1192                  adev->powerplay.pp_funcs->set_xgmi_pstate)
1193                 ret = adev->powerplay.pp_funcs->set_xgmi_pstate(adev->powerplay.pp_handle,
1194                                                                 pstate);
1195
1196         return ret;
1197 }
1198
1199 int amdgpu_dpm_set_df_cstate(struct amdgpu_device *adev,
1200                              uint32_t cstate)
1201 {
1202         int ret = 0;
1203         const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1204         void *pp_handle = adev->powerplay.pp_handle;
1205         struct smu_context *smu = &adev->smu;
1206
1207         if (is_support_sw_smu(adev))
1208                 ret = smu_set_df_cstate(smu, cstate);
1209         else if (pp_funcs &&
1210                  pp_funcs->set_df_cstate)
1211                 ret = pp_funcs->set_df_cstate(pp_handle, cstate);
1212
1213         return ret;
1214 }
1215
1216 int amdgpu_dpm_allow_xgmi_power_down(struct amdgpu_device *adev, bool en)
1217 {
1218         struct smu_context *smu = &adev->smu;
1219
1220         if (is_support_sw_smu(adev))
1221                 return smu_allow_xgmi_power_down(smu, en);
1222
1223         return 0;
1224 }
1225
1226 int amdgpu_dpm_enable_mgpu_fan_boost(struct amdgpu_device *adev)
1227 {
1228         void *pp_handle = adev->powerplay.pp_handle;
1229         const struct amd_pm_funcs *pp_funcs =
1230                         adev->powerplay.pp_funcs;
1231         struct smu_context *smu = &adev->smu;
1232         int ret = 0;
1233
1234         if (is_support_sw_smu(adev))
1235                 ret = smu_enable_mgpu_fan_boost(smu);
1236         else if (pp_funcs && pp_funcs->enable_mgpu_fan_boost)
1237                 ret = pp_funcs->enable_mgpu_fan_boost(pp_handle);
1238
1239         return ret;
1240 }
1241
1242 int amdgpu_dpm_set_clockgating_by_smu(struct amdgpu_device *adev,
1243                                       uint32_t msg_id)
1244 {
1245         void *pp_handle = adev->powerplay.pp_handle;
1246         const struct amd_pm_funcs *pp_funcs =
1247                         adev->powerplay.pp_funcs;
1248         int ret = 0;
1249
1250         if (pp_funcs && pp_funcs->set_clockgating_by_smu)
1251                 ret = pp_funcs->set_clockgating_by_smu(pp_handle,
1252                                                        msg_id);
1253
1254         return ret;
1255 }
1256
1257 int amdgpu_dpm_smu_i2c_bus_access(struct amdgpu_device *adev,
1258                                   bool acquire)
1259 {
1260         void *pp_handle = adev->powerplay.pp_handle;
1261         const struct amd_pm_funcs *pp_funcs =
1262                         adev->powerplay.pp_funcs;
1263         int ret = -EOPNOTSUPP;
1264
1265         if (pp_funcs && pp_funcs->smu_i2c_bus_access)
1266                 ret = pp_funcs->smu_i2c_bus_access(pp_handle,
1267                                                    acquire);
1268
1269         return ret;
1270 }
1271
1272 void amdgpu_pm_acpi_event_handler(struct amdgpu_device *adev)
1273 {
1274         if (adev->pm.dpm_enabled) {
1275                 mutex_lock(&adev->pm.mutex);
1276                 if (power_supply_is_system_supplied() > 0)
1277                         adev->pm.ac_power = true;
1278                 else
1279                         adev->pm.ac_power = false;
1280                 if (adev->powerplay.pp_funcs &&
1281                     adev->powerplay.pp_funcs->enable_bapm)
1282                         amdgpu_dpm_enable_bapm(adev, adev->pm.ac_power);
1283                 mutex_unlock(&adev->pm.mutex);
1284
1285                 if (is_support_sw_smu(adev))
1286                         smu_set_ac_dc(&adev->smu);
1287         }
1288 }
1289
1290 int amdgpu_dpm_read_sensor(struct amdgpu_device *adev, enum amd_pp_sensors sensor,
1291                            void *data, uint32_t *size)
1292 {
1293         int ret = 0;
1294
1295         if (!data || !size)
1296                 return -EINVAL;
1297
1298         if (is_support_sw_smu(adev))
1299                 ret = smu_read_sensor(&adev->smu, sensor, data, size);
1300         else {
1301                 if (adev->powerplay.pp_funcs && adev->powerplay.pp_funcs->read_sensor)
1302                         ret = adev->powerplay.pp_funcs->read_sensor((adev)->powerplay.pp_handle,
1303                                                                     sensor, data, size);
1304                 else
1305                         ret = -EINVAL;
1306         }
1307
1308         return ret;
1309 }
1310
1311 void amdgpu_dpm_thermal_work_handler(struct work_struct *work)
1312 {
1313         struct amdgpu_device *adev =
1314                 container_of(work, struct amdgpu_device,
1315                              pm.dpm.thermal.work);
1316         /* switch to the thermal state */
1317         enum amd_pm_state_type dpm_state = POWER_STATE_TYPE_INTERNAL_THERMAL;
1318         int temp, size = sizeof(temp);
1319
1320         if (!adev->pm.dpm_enabled)
1321                 return;
1322
1323         if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_GPU_TEMP,
1324                                     (void *)&temp, &size)) {
1325                 if (temp < adev->pm.dpm.thermal.min_temp)
1326                         /* switch back the user state */
1327                         dpm_state = adev->pm.dpm.user_state;
1328         } else {
1329                 if (adev->pm.dpm.thermal.high_to_low)
1330                         /* switch back the user state */
1331                         dpm_state = adev->pm.dpm.user_state;
1332         }
1333         mutex_lock(&adev->pm.mutex);
1334         if (dpm_state == POWER_STATE_TYPE_INTERNAL_THERMAL)
1335                 adev->pm.dpm.thermal_active = true;
1336         else
1337                 adev->pm.dpm.thermal_active = false;
1338         adev->pm.dpm.state = dpm_state;
1339         mutex_unlock(&adev->pm.mutex);
1340
1341         amdgpu_pm_compute_clocks(adev);
1342 }
1343
1344 static struct amdgpu_ps *amdgpu_dpm_pick_power_state(struct amdgpu_device *adev,
1345                                                      enum amd_pm_state_type dpm_state)
1346 {
1347         int i;
1348         struct amdgpu_ps *ps;
1349         u32 ui_class;
1350         bool single_display = (adev->pm.dpm.new_active_crtc_count < 2) ?
1351                 true : false;
1352
1353         /* check if the vblank period is too short to adjust the mclk */
1354         if (single_display && adev->powerplay.pp_funcs->vblank_too_short) {
1355                 if (amdgpu_dpm_vblank_too_short(adev))
1356                         single_display = false;
1357         }
1358
1359         /* certain older asics have a separare 3D performance state,
1360          * so try that first if the user selected performance
1361          */
1362         if (dpm_state == POWER_STATE_TYPE_PERFORMANCE)
1363                 dpm_state = POWER_STATE_TYPE_INTERNAL_3DPERF;
1364         /* balanced states don't exist at the moment */
1365         if (dpm_state == POWER_STATE_TYPE_BALANCED)
1366                 dpm_state = POWER_STATE_TYPE_PERFORMANCE;
1367
1368 restart_search:
1369         /* Pick the best power state based on current conditions */
1370         for (i = 0; i < adev->pm.dpm.num_ps; i++) {
1371                 ps = &adev->pm.dpm.ps[i];
1372                 ui_class = ps->class & ATOM_PPLIB_CLASSIFICATION_UI_MASK;
1373                 switch (dpm_state) {
1374                 /* user states */
1375                 case POWER_STATE_TYPE_BATTERY:
1376                         if (ui_class == ATOM_PPLIB_CLASSIFICATION_UI_BATTERY) {
1377                                 if (ps->caps & ATOM_PPLIB_SINGLE_DISPLAY_ONLY) {
1378                                         if (single_display)
1379                                                 return ps;
1380                                 } else
1381                                         return ps;
1382                         }
1383                         break;
1384                 case POWER_STATE_TYPE_BALANCED:
1385                         if (ui_class == ATOM_PPLIB_CLASSIFICATION_UI_BALANCED) {
1386                                 if (ps->caps & ATOM_PPLIB_SINGLE_DISPLAY_ONLY) {
1387                                         if (single_display)
1388                                                 return ps;
1389                                 } else
1390                                         return ps;
1391                         }
1392                         break;
1393                 case POWER_STATE_TYPE_PERFORMANCE:
1394                         if (ui_class == ATOM_PPLIB_CLASSIFICATION_UI_PERFORMANCE) {
1395                                 if (ps->caps & ATOM_PPLIB_SINGLE_DISPLAY_ONLY) {
1396                                         if (single_display)
1397                                                 return ps;
1398                                 } else
1399                                         return ps;
1400                         }
1401                         break;
1402                 /* internal states */
1403                 case POWER_STATE_TYPE_INTERNAL_UVD:
1404                         if (adev->pm.dpm.uvd_ps)
1405                                 return adev->pm.dpm.uvd_ps;
1406                         else
1407                                 break;
1408                 case POWER_STATE_TYPE_INTERNAL_UVD_SD:
1409                         if (ps->class & ATOM_PPLIB_CLASSIFICATION_SDSTATE)
1410                                 return ps;
1411                         break;
1412                 case POWER_STATE_TYPE_INTERNAL_UVD_HD:
1413                         if (ps->class & ATOM_PPLIB_CLASSIFICATION_HDSTATE)
1414                                 return ps;
1415                         break;
1416                 case POWER_STATE_TYPE_INTERNAL_UVD_HD2:
1417                         if (ps->class & ATOM_PPLIB_CLASSIFICATION_HD2STATE)
1418                                 return ps;
1419                         break;
1420                 case POWER_STATE_TYPE_INTERNAL_UVD_MVC:
1421                         if (ps->class2 & ATOM_PPLIB_CLASSIFICATION2_MVC)
1422                                 return ps;
1423                         break;
1424                 case POWER_STATE_TYPE_INTERNAL_BOOT:
1425                         return adev->pm.dpm.boot_ps;
1426                 case POWER_STATE_TYPE_INTERNAL_THERMAL:
1427                         if (ps->class & ATOM_PPLIB_CLASSIFICATION_THERMAL)
1428                                 return ps;
1429                         break;
1430                 case POWER_STATE_TYPE_INTERNAL_ACPI:
1431                         if (ps->class & ATOM_PPLIB_CLASSIFICATION_ACPI)
1432                                 return ps;
1433                         break;
1434                 case POWER_STATE_TYPE_INTERNAL_ULV:
1435                         if (ps->class2 & ATOM_PPLIB_CLASSIFICATION2_ULV)
1436                                 return ps;
1437                         break;
1438                 case POWER_STATE_TYPE_INTERNAL_3DPERF:
1439                         if (ps->class & ATOM_PPLIB_CLASSIFICATION_3DPERFORMANCE)
1440                                 return ps;
1441                         break;
1442                 default:
1443                         break;
1444                 }
1445         }
1446         /* use a fallback state if we didn't match */
1447         switch (dpm_state) {
1448         case POWER_STATE_TYPE_INTERNAL_UVD_SD:
1449                 dpm_state = POWER_STATE_TYPE_INTERNAL_UVD_HD;
1450                 goto restart_search;
1451         case POWER_STATE_TYPE_INTERNAL_UVD_HD:
1452         case POWER_STATE_TYPE_INTERNAL_UVD_HD2:
1453         case POWER_STATE_TYPE_INTERNAL_UVD_MVC:
1454                 if (adev->pm.dpm.uvd_ps) {
1455                         return adev->pm.dpm.uvd_ps;
1456                 } else {
1457                         dpm_state = POWER_STATE_TYPE_PERFORMANCE;
1458                         goto restart_search;
1459                 }
1460         case POWER_STATE_TYPE_INTERNAL_THERMAL:
1461                 dpm_state = POWER_STATE_TYPE_INTERNAL_ACPI;
1462                 goto restart_search;
1463         case POWER_STATE_TYPE_INTERNAL_ACPI:
1464                 dpm_state = POWER_STATE_TYPE_BATTERY;
1465                 goto restart_search;
1466         case POWER_STATE_TYPE_BATTERY:
1467         case POWER_STATE_TYPE_BALANCED:
1468         case POWER_STATE_TYPE_INTERNAL_3DPERF:
1469                 dpm_state = POWER_STATE_TYPE_PERFORMANCE;
1470                 goto restart_search;
1471         default:
1472                 break;
1473         }
1474
1475         return NULL;
1476 }
1477
1478 static void amdgpu_dpm_change_power_state_locked(struct amdgpu_device *adev)
1479 {
1480         struct amdgpu_ps *ps;
1481         enum amd_pm_state_type dpm_state;
1482         int ret;
1483         bool equal = false;
1484
1485         /* if dpm init failed */
1486         if (!adev->pm.dpm_enabled)
1487                 return;
1488
1489         if (adev->pm.dpm.user_state != adev->pm.dpm.state) {
1490                 /* add other state override checks here */
1491                 if ((!adev->pm.dpm.thermal_active) &&
1492                     (!adev->pm.dpm.uvd_active))
1493                         adev->pm.dpm.state = adev->pm.dpm.user_state;
1494         }
1495         dpm_state = adev->pm.dpm.state;
1496
1497         ps = amdgpu_dpm_pick_power_state(adev, dpm_state);
1498         if (ps)
1499                 adev->pm.dpm.requested_ps = ps;
1500         else
1501                 return;
1502
1503         if (amdgpu_dpm == 1 && adev->powerplay.pp_funcs->print_power_state) {
1504                 printk("switching from power state:\n");
1505                 amdgpu_dpm_print_power_state(adev, adev->pm.dpm.current_ps);
1506                 printk("switching to power state:\n");
1507                 amdgpu_dpm_print_power_state(adev, adev->pm.dpm.requested_ps);
1508         }
1509
1510         /* update whether vce is active */
1511         ps->vce_active = adev->pm.dpm.vce_active;
1512         if (adev->powerplay.pp_funcs->display_configuration_changed)
1513                 amdgpu_dpm_display_configuration_changed(adev);
1514
1515         ret = amdgpu_dpm_pre_set_power_state(adev);
1516         if (ret)
1517                 return;
1518
1519         if (adev->powerplay.pp_funcs->check_state_equal) {
1520                 if (0 != amdgpu_dpm_check_state_equal(adev, adev->pm.dpm.current_ps, adev->pm.dpm.requested_ps, &equal))
1521                         equal = false;
1522         }
1523
1524         if (equal)
1525                 return;
1526
1527         amdgpu_dpm_set_power_state(adev);
1528         amdgpu_dpm_post_set_power_state(adev);
1529
1530         adev->pm.dpm.current_active_crtcs = adev->pm.dpm.new_active_crtcs;
1531         adev->pm.dpm.current_active_crtc_count = adev->pm.dpm.new_active_crtc_count;
1532
1533         if (adev->powerplay.pp_funcs->force_performance_level) {
1534                 if (adev->pm.dpm.thermal_active) {
1535                         enum amd_dpm_forced_level level = adev->pm.dpm.forced_level;
1536                         /* force low perf level for thermal */
1537                         amdgpu_dpm_force_performance_level(adev, AMD_DPM_FORCED_LEVEL_LOW);
1538                         /* save the user's level */
1539                         adev->pm.dpm.forced_level = level;
1540                 } else {
1541                         /* otherwise, user selected level */
1542                         amdgpu_dpm_force_performance_level(adev, adev->pm.dpm.forced_level);
1543                 }
1544         }
1545 }
1546
1547 void amdgpu_pm_compute_clocks(struct amdgpu_device *adev)
1548 {
1549         int i = 0;
1550
1551         if (!adev->pm.dpm_enabled)
1552                 return;
1553
1554         if (adev->mode_info.num_crtc)
1555                 amdgpu_display_bandwidth_update(adev);
1556
1557         for (i = 0; i < AMDGPU_MAX_RINGS; i++) {
1558                 struct amdgpu_ring *ring = adev->rings[i];
1559                 if (ring && ring->sched.ready)
1560                         amdgpu_fence_wait_empty(ring);
1561         }
1562
1563         if (is_support_sw_smu(adev)) {
1564                 struct smu_dpm_context *smu_dpm = &adev->smu.smu_dpm;
1565                 smu_handle_task(&adev->smu,
1566                                 smu_dpm->dpm_level,
1567                                 AMD_PP_TASK_DISPLAY_CONFIG_CHANGE,
1568                                 true);
1569         } else {
1570                 if (adev->powerplay.pp_funcs->dispatch_tasks) {
1571                         if (!amdgpu_device_has_dc_support(adev)) {
1572                                 mutex_lock(&adev->pm.mutex);
1573                                 amdgpu_dpm_get_active_displays(adev);
1574                                 adev->pm.pm_display_cfg.num_display = adev->pm.dpm.new_active_crtc_count;
1575                                 adev->pm.pm_display_cfg.vrefresh = amdgpu_dpm_get_vrefresh(adev);
1576                                 adev->pm.pm_display_cfg.min_vblank_time = amdgpu_dpm_get_vblank_time(adev);
1577                                 /* we have issues with mclk switching with refresh rates over 120 hz on the non-DC code. */
1578                                 if (adev->pm.pm_display_cfg.vrefresh > 120)
1579                                         adev->pm.pm_display_cfg.min_vblank_time = 0;
1580                                 if (adev->powerplay.pp_funcs->display_configuration_change)
1581                                         adev->powerplay.pp_funcs->display_configuration_change(
1582                                                                         adev->powerplay.pp_handle,
1583                                                                         &adev->pm.pm_display_cfg);
1584                                 mutex_unlock(&adev->pm.mutex);
1585                         }
1586                         amdgpu_dpm_dispatch_task(adev, AMD_PP_TASK_DISPLAY_CONFIG_CHANGE, NULL);
1587                 } else {
1588                         mutex_lock(&adev->pm.mutex);
1589                         amdgpu_dpm_get_active_displays(adev);
1590                         amdgpu_dpm_change_power_state_locked(adev);
1591                         mutex_unlock(&adev->pm.mutex);
1592                 }
1593         }
1594 }
1595
1596 void amdgpu_dpm_enable_uvd(struct amdgpu_device *adev, bool enable)
1597 {
1598         int ret = 0;
1599
1600         if (adev->family == AMDGPU_FAMILY_SI) {
1601                 mutex_lock(&adev->pm.mutex);
1602                 if (enable) {
1603                         adev->pm.dpm.uvd_active = true;
1604                         adev->pm.dpm.state = POWER_STATE_TYPE_INTERNAL_UVD;
1605                 } else {
1606                         adev->pm.dpm.uvd_active = false;
1607                 }
1608                 mutex_unlock(&adev->pm.mutex);
1609
1610                 amdgpu_pm_compute_clocks(adev);
1611         } else {
1612                 ret = amdgpu_dpm_set_powergating_by_smu(adev, AMD_IP_BLOCK_TYPE_UVD, !enable);
1613                 if (ret)
1614                         DRM_ERROR("Dpm %s uvd failed, ret = %d. \n",
1615                                   enable ? "enable" : "disable", ret);
1616
1617                 /* enable/disable Low Memory PState for UVD (4k videos) */
1618                 if (adev->asic_type == CHIP_STONEY &&
1619                         adev->uvd.decode_image_width >= WIDTH_4K) {
1620                         struct pp_hwmgr *hwmgr = adev->powerplay.pp_handle;
1621
1622                         if (hwmgr && hwmgr->hwmgr_func &&
1623                             hwmgr->hwmgr_func->update_nbdpm_pstate)
1624                                 hwmgr->hwmgr_func->update_nbdpm_pstate(hwmgr,
1625                                                                        !enable,
1626                                                                        true);
1627                 }
1628         }
1629 }
1630
1631 void amdgpu_dpm_enable_vce(struct amdgpu_device *adev, bool enable)
1632 {
1633         int ret = 0;
1634
1635         if (adev->family == AMDGPU_FAMILY_SI) {
1636                 mutex_lock(&adev->pm.mutex);
1637                 if (enable) {
1638                         adev->pm.dpm.vce_active = true;
1639                         /* XXX select vce level based on ring/task */
1640                         adev->pm.dpm.vce_level = AMD_VCE_LEVEL_AC_ALL;
1641                 } else {
1642                         adev->pm.dpm.vce_active = false;
1643                 }
1644                 mutex_unlock(&adev->pm.mutex);
1645
1646                 amdgpu_pm_compute_clocks(adev);
1647         } else {
1648                 ret = amdgpu_dpm_set_powergating_by_smu(adev, AMD_IP_BLOCK_TYPE_VCE, !enable);
1649                 if (ret)
1650                         DRM_ERROR("Dpm %s vce failed, ret = %d. \n",
1651                                   enable ? "enable" : "disable", ret);
1652         }
1653 }
1654
1655 void amdgpu_pm_print_power_states(struct amdgpu_device *adev)
1656 {
1657         int i;
1658
1659         if (adev->powerplay.pp_funcs->print_power_state == NULL)
1660                 return;
1661
1662         for (i = 0; i < adev->pm.dpm.num_ps; i++)
1663                 amdgpu_dpm_print_power_state(adev, &adev->pm.dpm.ps[i]);
1664
1665 }
1666
1667 void amdgpu_dpm_enable_jpeg(struct amdgpu_device *adev, bool enable)
1668 {
1669         int ret = 0;
1670
1671         ret = amdgpu_dpm_set_powergating_by_smu(adev, AMD_IP_BLOCK_TYPE_JPEG, !enable);
1672         if (ret)
1673                 DRM_ERROR("Dpm %s jpeg failed, ret = %d. \n",
1674                           enable ? "enable" : "disable", ret);
1675 }
1676
1677 int amdgpu_pm_load_smu_firmware(struct amdgpu_device *adev, uint32_t *smu_version)
1678 {
1679         int r;
1680
1681         if (adev->powerplay.pp_funcs && adev->powerplay.pp_funcs->load_firmware) {
1682                 r = adev->powerplay.pp_funcs->load_firmware(adev->powerplay.pp_handle);
1683                 if (r) {
1684                         pr_err("smu firmware loading failed\n");
1685                         return r;
1686                 }
1687                 *smu_version = adev->pm.fw_version;
1688         }
1689         return 0;
1690 }
This page took 0.1347 seconds and 4 git commands to generate.