]> Git Repo - linux.git/blob - drivers/gpu/drm/amd/pm/amdgpu_dpm.c
Merge tag 'pci-v5.17-fixes-1' of git://git.kernel.org/pub/scm/linux/kernel/git/helgaa...
[linux.git] / drivers / gpu / drm / amd / pm / amdgpu_dpm.c
1 /*
2  * Copyright 2011 Advanced Micro Devices, Inc.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20  * OTHER DEALINGS IN THE SOFTWARE.
21  *
22  * Authors: Alex Deucher
23  */
24
25 #include "amdgpu.h"
26 #include "amdgpu_atombios.h"
27 #include "amdgpu_i2c.h"
28 #include "amdgpu_dpm.h"
29 #include "atom.h"
30 #include "amd_pcie.h"
31 #include "amdgpu_display.h"
32 #include "hwmgr.h"
33 #include <linux/power_supply.h>
34
35 #define WIDTH_4K 3840
36
37 void amdgpu_dpm_print_class_info(u32 class, u32 class2)
38 {
39         const char *s;
40
41         switch (class & ATOM_PPLIB_CLASSIFICATION_UI_MASK) {
42         case ATOM_PPLIB_CLASSIFICATION_UI_NONE:
43         default:
44                 s = "none";
45                 break;
46         case ATOM_PPLIB_CLASSIFICATION_UI_BATTERY:
47                 s = "battery";
48                 break;
49         case ATOM_PPLIB_CLASSIFICATION_UI_BALANCED:
50                 s = "balanced";
51                 break;
52         case ATOM_PPLIB_CLASSIFICATION_UI_PERFORMANCE:
53                 s = "performance";
54                 break;
55         }
56         printk("\tui class: %s\n", s);
57         printk("\tinternal class:");
58         if (((class & ~ATOM_PPLIB_CLASSIFICATION_UI_MASK) == 0) &&
59             (class2 == 0))
60                 pr_cont(" none");
61         else {
62                 if (class & ATOM_PPLIB_CLASSIFICATION_BOOT)
63                         pr_cont(" boot");
64                 if (class & ATOM_PPLIB_CLASSIFICATION_THERMAL)
65                         pr_cont(" thermal");
66                 if (class & ATOM_PPLIB_CLASSIFICATION_LIMITEDPOWERSOURCE)
67                         pr_cont(" limited_pwr");
68                 if (class & ATOM_PPLIB_CLASSIFICATION_REST)
69                         pr_cont(" rest");
70                 if (class & ATOM_PPLIB_CLASSIFICATION_FORCED)
71                         pr_cont(" forced");
72                 if (class & ATOM_PPLIB_CLASSIFICATION_3DPERFORMANCE)
73                         pr_cont(" 3d_perf");
74                 if (class & ATOM_PPLIB_CLASSIFICATION_OVERDRIVETEMPLATE)
75                         pr_cont(" ovrdrv");
76                 if (class & ATOM_PPLIB_CLASSIFICATION_UVDSTATE)
77                         pr_cont(" uvd");
78                 if (class & ATOM_PPLIB_CLASSIFICATION_3DLOW)
79                         pr_cont(" 3d_low");
80                 if (class & ATOM_PPLIB_CLASSIFICATION_ACPI)
81                         pr_cont(" acpi");
82                 if (class & ATOM_PPLIB_CLASSIFICATION_HD2STATE)
83                         pr_cont(" uvd_hd2");
84                 if (class & ATOM_PPLIB_CLASSIFICATION_HDSTATE)
85                         pr_cont(" uvd_hd");
86                 if (class & ATOM_PPLIB_CLASSIFICATION_SDSTATE)
87                         pr_cont(" uvd_sd");
88                 if (class2 & ATOM_PPLIB_CLASSIFICATION2_LIMITEDPOWERSOURCE_2)
89                         pr_cont(" limited_pwr2");
90                 if (class2 & ATOM_PPLIB_CLASSIFICATION2_ULV)
91                         pr_cont(" ulv");
92                 if (class2 & ATOM_PPLIB_CLASSIFICATION2_MVC)
93                         pr_cont(" uvd_mvc");
94         }
95         pr_cont("\n");
96 }
97
98 void amdgpu_dpm_print_cap_info(u32 caps)
99 {
100         printk("\tcaps:");
101         if (caps & ATOM_PPLIB_SINGLE_DISPLAY_ONLY)
102                 pr_cont(" single_disp");
103         if (caps & ATOM_PPLIB_SUPPORTS_VIDEO_PLAYBACK)
104                 pr_cont(" video");
105         if (caps & ATOM_PPLIB_DISALLOW_ON_DC)
106                 pr_cont(" no_dc");
107         pr_cont("\n");
108 }
109
110 void amdgpu_dpm_print_ps_status(struct amdgpu_device *adev,
111                                 struct amdgpu_ps *rps)
112 {
113         printk("\tstatus:");
114         if (rps == adev->pm.dpm.current_ps)
115                 pr_cont(" c");
116         if (rps == adev->pm.dpm.requested_ps)
117                 pr_cont(" r");
118         if (rps == adev->pm.dpm.boot_ps)
119                 pr_cont(" b");
120         pr_cont("\n");
121 }
122
123 void amdgpu_dpm_get_active_displays(struct amdgpu_device *adev)
124 {
125         struct drm_device *ddev = adev_to_drm(adev);
126         struct drm_crtc *crtc;
127         struct amdgpu_crtc *amdgpu_crtc;
128
129         adev->pm.dpm.new_active_crtcs = 0;
130         adev->pm.dpm.new_active_crtc_count = 0;
131         if (adev->mode_info.num_crtc && adev->mode_info.mode_config_initialized) {
132                 list_for_each_entry(crtc,
133                                     &ddev->mode_config.crtc_list, head) {
134                         amdgpu_crtc = to_amdgpu_crtc(crtc);
135                         if (amdgpu_crtc->enabled) {
136                                 adev->pm.dpm.new_active_crtcs |= (1 << amdgpu_crtc->crtc_id);
137                                 adev->pm.dpm.new_active_crtc_count++;
138                         }
139                 }
140         }
141 }
142
143
144 u32 amdgpu_dpm_get_vblank_time(struct amdgpu_device *adev)
145 {
146         struct drm_device *dev = adev_to_drm(adev);
147         struct drm_crtc *crtc;
148         struct amdgpu_crtc *amdgpu_crtc;
149         u32 vblank_in_pixels;
150         u32 vblank_time_us = 0xffffffff; /* if the displays are off, vblank time is max */
151
152         if (adev->mode_info.num_crtc && adev->mode_info.mode_config_initialized) {
153                 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
154                         amdgpu_crtc = to_amdgpu_crtc(crtc);
155                         if (crtc->enabled && amdgpu_crtc->enabled && amdgpu_crtc->hw_mode.clock) {
156                                 vblank_in_pixels =
157                                         amdgpu_crtc->hw_mode.crtc_htotal *
158                                         (amdgpu_crtc->hw_mode.crtc_vblank_end -
159                                         amdgpu_crtc->hw_mode.crtc_vdisplay +
160                                         (amdgpu_crtc->v_border * 2));
161
162                                 vblank_time_us = vblank_in_pixels * 1000 / amdgpu_crtc->hw_mode.clock;
163                                 break;
164                         }
165                 }
166         }
167
168         return vblank_time_us;
169 }
170
171 u32 amdgpu_dpm_get_vrefresh(struct amdgpu_device *adev)
172 {
173         struct drm_device *dev = adev_to_drm(adev);
174         struct drm_crtc *crtc;
175         struct amdgpu_crtc *amdgpu_crtc;
176         u32 vrefresh = 0;
177
178         if (adev->mode_info.num_crtc && adev->mode_info.mode_config_initialized) {
179                 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
180                         amdgpu_crtc = to_amdgpu_crtc(crtc);
181                         if (crtc->enabled && amdgpu_crtc->enabled && amdgpu_crtc->hw_mode.clock) {
182                                 vrefresh = drm_mode_vrefresh(&amdgpu_crtc->hw_mode);
183                                 break;
184                         }
185                 }
186         }
187
188         return vrefresh;
189 }
190
191 bool amdgpu_is_internal_thermal_sensor(enum amdgpu_int_thermal_type sensor)
192 {
193         switch (sensor) {
194         case THERMAL_TYPE_RV6XX:
195         case THERMAL_TYPE_RV770:
196         case THERMAL_TYPE_EVERGREEN:
197         case THERMAL_TYPE_SUMO:
198         case THERMAL_TYPE_NI:
199         case THERMAL_TYPE_SI:
200         case THERMAL_TYPE_CI:
201         case THERMAL_TYPE_KV:
202                 return true;
203         case THERMAL_TYPE_ADT7473_WITH_INTERNAL:
204         case THERMAL_TYPE_EMC2103_WITH_INTERNAL:
205                 return false; /* need special handling */
206         case THERMAL_TYPE_NONE:
207         case THERMAL_TYPE_EXTERNAL:
208         case THERMAL_TYPE_EXTERNAL_GPIO:
209         default:
210                 return false;
211         }
212 }
213
214 union power_info {
215         struct _ATOM_POWERPLAY_INFO info;
216         struct _ATOM_POWERPLAY_INFO_V2 info_2;
217         struct _ATOM_POWERPLAY_INFO_V3 info_3;
218         struct _ATOM_PPLIB_POWERPLAYTABLE pplib;
219         struct _ATOM_PPLIB_POWERPLAYTABLE2 pplib2;
220         struct _ATOM_PPLIB_POWERPLAYTABLE3 pplib3;
221         struct _ATOM_PPLIB_POWERPLAYTABLE4 pplib4;
222         struct _ATOM_PPLIB_POWERPLAYTABLE5 pplib5;
223 };
224
225 union fan_info {
226         struct _ATOM_PPLIB_FANTABLE fan;
227         struct _ATOM_PPLIB_FANTABLE2 fan2;
228         struct _ATOM_PPLIB_FANTABLE3 fan3;
229 };
230
231 static int amdgpu_parse_clk_voltage_dep_table(struct amdgpu_clock_voltage_dependency_table *amdgpu_table,
232                                               ATOM_PPLIB_Clock_Voltage_Dependency_Table *atom_table)
233 {
234         u32 size = atom_table->ucNumEntries *
235                 sizeof(struct amdgpu_clock_voltage_dependency_entry);
236         int i;
237         ATOM_PPLIB_Clock_Voltage_Dependency_Record *entry;
238
239         amdgpu_table->entries = kzalloc(size, GFP_KERNEL);
240         if (!amdgpu_table->entries)
241                 return -ENOMEM;
242
243         entry = &atom_table->entries[0];
244         for (i = 0; i < atom_table->ucNumEntries; i++) {
245                 amdgpu_table->entries[i].clk = le16_to_cpu(entry->usClockLow) |
246                         (entry->ucClockHigh << 16);
247                 amdgpu_table->entries[i].v = le16_to_cpu(entry->usVoltage);
248                 entry = (ATOM_PPLIB_Clock_Voltage_Dependency_Record *)
249                         ((u8 *)entry + sizeof(ATOM_PPLIB_Clock_Voltage_Dependency_Record));
250         }
251         amdgpu_table->count = atom_table->ucNumEntries;
252
253         return 0;
254 }
255
256 int amdgpu_get_platform_caps(struct amdgpu_device *adev)
257 {
258         struct amdgpu_mode_info *mode_info = &adev->mode_info;
259         union power_info *power_info;
260         int index = GetIndexIntoMasterTable(DATA, PowerPlayInfo);
261         u16 data_offset;
262         u8 frev, crev;
263
264         if (!amdgpu_atom_parse_data_header(mode_info->atom_context, index, NULL,
265                                    &frev, &crev, &data_offset))
266                 return -EINVAL;
267         power_info = (union power_info *)(mode_info->atom_context->bios + data_offset);
268
269         adev->pm.dpm.platform_caps = le32_to_cpu(power_info->pplib.ulPlatformCaps);
270         adev->pm.dpm.backbias_response_time = le16_to_cpu(power_info->pplib.usBackbiasTime);
271         adev->pm.dpm.voltage_response_time = le16_to_cpu(power_info->pplib.usVoltageTime);
272
273         return 0;
274 }
275
276 /* sizeof(ATOM_PPLIB_EXTENDEDHEADER) */
277 #define SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V2 12
278 #define SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V3 14
279 #define SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V4 16
280 #define SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V5 18
281 #define SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V6 20
282 #define SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V7 22
283 #define SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V8 24
284 #define SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V9 26
285
286 int amdgpu_parse_extended_power_table(struct amdgpu_device *adev)
287 {
288         struct amdgpu_mode_info *mode_info = &adev->mode_info;
289         union power_info *power_info;
290         union fan_info *fan_info;
291         ATOM_PPLIB_Clock_Voltage_Dependency_Table *dep_table;
292         int index = GetIndexIntoMasterTable(DATA, PowerPlayInfo);
293         u16 data_offset;
294         u8 frev, crev;
295         int ret, i;
296
297         if (!amdgpu_atom_parse_data_header(mode_info->atom_context, index, NULL,
298                                    &frev, &crev, &data_offset))
299                 return -EINVAL;
300         power_info = (union power_info *)(mode_info->atom_context->bios + data_offset);
301
302         /* fan table */
303         if (le16_to_cpu(power_info->pplib.usTableSize) >=
304             sizeof(struct _ATOM_PPLIB_POWERPLAYTABLE3)) {
305                 if (power_info->pplib3.usFanTableOffset) {
306                         fan_info = (union fan_info *)(mode_info->atom_context->bios + data_offset +
307                                                       le16_to_cpu(power_info->pplib3.usFanTableOffset));
308                         adev->pm.dpm.fan.t_hyst = fan_info->fan.ucTHyst;
309                         adev->pm.dpm.fan.t_min = le16_to_cpu(fan_info->fan.usTMin);
310                         adev->pm.dpm.fan.t_med = le16_to_cpu(fan_info->fan.usTMed);
311                         adev->pm.dpm.fan.t_high = le16_to_cpu(fan_info->fan.usTHigh);
312                         adev->pm.dpm.fan.pwm_min = le16_to_cpu(fan_info->fan.usPWMMin);
313                         adev->pm.dpm.fan.pwm_med = le16_to_cpu(fan_info->fan.usPWMMed);
314                         adev->pm.dpm.fan.pwm_high = le16_to_cpu(fan_info->fan.usPWMHigh);
315                         if (fan_info->fan.ucFanTableFormat >= 2)
316                                 adev->pm.dpm.fan.t_max = le16_to_cpu(fan_info->fan2.usTMax);
317                         else
318                                 adev->pm.dpm.fan.t_max = 10900;
319                         adev->pm.dpm.fan.cycle_delay = 100000;
320                         if (fan_info->fan.ucFanTableFormat >= 3) {
321                                 adev->pm.dpm.fan.control_mode = fan_info->fan3.ucFanControlMode;
322                                 adev->pm.dpm.fan.default_max_fan_pwm =
323                                         le16_to_cpu(fan_info->fan3.usFanPWMMax);
324                                 adev->pm.dpm.fan.default_fan_output_sensitivity = 4836;
325                                 adev->pm.dpm.fan.fan_output_sensitivity =
326                                         le16_to_cpu(fan_info->fan3.usFanOutputSensitivity);
327                         }
328                         adev->pm.dpm.fan.ucode_fan_control = true;
329                 }
330         }
331
332         /* clock dependancy tables, shedding tables */
333         if (le16_to_cpu(power_info->pplib.usTableSize) >=
334             sizeof(struct _ATOM_PPLIB_POWERPLAYTABLE4)) {
335                 if (power_info->pplib4.usVddcDependencyOnSCLKOffset) {
336                         dep_table = (ATOM_PPLIB_Clock_Voltage_Dependency_Table *)
337                                 (mode_info->atom_context->bios + data_offset +
338                                  le16_to_cpu(power_info->pplib4.usVddcDependencyOnSCLKOffset));
339                         ret = amdgpu_parse_clk_voltage_dep_table(&adev->pm.dpm.dyn_state.vddc_dependency_on_sclk,
340                                                                  dep_table);
341                         if (ret) {
342                                 amdgpu_free_extended_power_table(adev);
343                                 return ret;
344                         }
345                 }
346                 if (power_info->pplib4.usVddciDependencyOnMCLKOffset) {
347                         dep_table = (ATOM_PPLIB_Clock_Voltage_Dependency_Table *)
348                                 (mode_info->atom_context->bios + data_offset +
349                                  le16_to_cpu(power_info->pplib4.usVddciDependencyOnMCLKOffset));
350                         ret = amdgpu_parse_clk_voltage_dep_table(&adev->pm.dpm.dyn_state.vddci_dependency_on_mclk,
351                                                                  dep_table);
352                         if (ret) {
353                                 amdgpu_free_extended_power_table(adev);
354                                 return ret;
355                         }
356                 }
357                 if (power_info->pplib4.usVddcDependencyOnMCLKOffset) {
358                         dep_table = (ATOM_PPLIB_Clock_Voltage_Dependency_Table *)
359                                 (mode_info->atom_context->bios + data_offset +
360                                  le16_to_cpu(power_info->pplib4.usVddcDependencyOnMCLKOffset));
361                         ret = amdgpu_parse_clk_voltage_dep_table(&adev->pm.dpm.dyn_state.vddc_dependency_on_mclk,
362                                                                  dep_table);
363                         if (ret) {
364                                 amdgpu_free_extended_power_table(adev);
365                                 return ret;
366                         }
367                 }
368                 if (power_info->pplib4.usMvddDependencyOnMCLKOffset) {
369                         dep_table = (ATOM_PPLIB_Clock_Voltage_Dependency_Table *)
370                                 (mode_info->atom_context->bios + data_offset +
371                                  le16_to_cpu(power_info->pplib4.usMvddDependencyOnMCLKOffset));
372                         ret = amdgpu_parse_clk_voltage_dep_table(&adev->pm.dpm.dyn_state.mvdd_dependency_on_mclk,
373                                                                  dep_table);
374                         if (ret) {
375                                 amdgpu_free_extended_power_table(adev);
376                                 return ret;
377                         }
378                 }
379                 if (power_info->pplib4.usMaxClockVoltageOnDCOffset) {
380                         ATOM_PPLIB_Clock_Voltage_Limit_Table *clk_v =
381                                 (ATOM_PPLIB_Clock_Voltage_Limit_Table *)
382                                 (mode_info->atom_context->bios + data_offset +
383                                  le16_to_cpu(power_info->pplib4.usMaxClockVoltageOnDCOffset));
384                         if (clk_v->ucNumEntries) {
385                                 adev->pm.dpm.dyn_state.max_clock_voltage_on_dc.sclk =
386                                         le16_to_cpu(clk_v->entries[0].usSclkLow) |
387                                         (clk_v->entries[0].ucSclkHigh << 16);
388                                 adev->pm.dpm.dyn_state.max_clock_voltage_on_dc.mclk =
389                                         le16_to_cpu(clk_v->entries[0].usMclkLow) |
390                                         (clk_v->entries[0].ucMclkHigh << 16);
391                                 adev->pm.dpm.dyn_state.max_clock_voltage_on_dc.vddc =
392                                         le16_to_cpu(clk_v->entries[0].usVddc);
393                                 adev->pm.dpm.dyn_state.max_clock_voltage_on_dc.vddci =
394                                         le16_to_cpu(clk_v->entries[0].usVddci);
395                         }
396                 }
397                 if (power_info->pplib4.usVddcPhaseShedLimitsTableOffset) {
398                         ATOM_PPLIB_PhaseSheddingLimits_Table *psl =
399                                 (ATOM_PPLIB_PhaseSheddingLimits_Table *)
400                                 (mode_info->atom_context->bios + data_offset +
401                                  le16_to_cpu(power_info->pplib4.usVddcPhaseShedLimitsTableOffset));
402                         ATOM_PPLIB_PhaseSheddingLimits_Record *entry;
403
404                         adev->pm.dpm.dyn_state.phase_shedding_limits_table.entries =
405                                 kcalloc(psl->ucNumEntries,
406                                         sizeof(struct amdgpu_phase_shedding_limits_entry),
407                                         GFP_KERNEL);
408                         if (!adev->pm.dpm.dyn_state.phase_shedding_limits_table.entries) {
409                                 amdgpu_free_extended_power_table(adev);
410                                 return -ENOMEM;
411                         }
412
413                         entry = &psl->entries[0];
414                         for (i = 0; i < psl->ucNumEntries; i++) {
415                                 adev->pm.dpm.dyn_state.phase_shedding_limits_table.entries[i].sclk =
416                                         le16_to_cpu(entry->usSclkLow) | (entry->ucSclkHigh << 16);
417                                 adev->pm.dpm.dyn_state.phase_shedding_limits_table.entries[i].mclk =
418                                         le16_to_cpu(entry->usMclkLow) | (entry->ucMclkHigh << 16);
419                                 adev->pm.dpm.dyn_state.phase_shedding_limits_table.entries[i].voltage =
420                                         le16_to_cpu(entry->usVoltage);
421                                 entry = (ATOM_PPLIB_PhaseSheddingLimits_Record *)
422                                         ((u8 *)entry + sizeof(ATOM_PPLIB_PhaseSheddingLimits_Record));
423                         }
424                         adev->pm.dpm.dyn_state.phase_shedding_limits_table.count =
425                                 psl->ucNumEntries;
426                 }
427         }
428
429         /* cac data */
430         if (le16_to_cpu(power_info->pplib.usTableSize) >=
431             sizeof(struct _ATOM_PPLIB_POWERPLAYTABLE5)) {
432                 adev->pm.dpm.tdp_limit = le32_to_cpu(power_info->pplib5.ulTDPLimit);
433                 adev->pm.dpm.near_tdp_limit = le32_to_cpu(power_info->pplib5.ulNearTDPLimit);
434                 adev->pm.dpm.near_tdp_limit_adjusted = adev->pm.dpm.near_tdp_limit;
435                 adev->pm.dpm.tdp_od_limit = le16_to_cpu(power_info->pplib5.usTDPODLimit);
436                 if (adev->pm.dpm.tdp_od_limit)
437                         adev->pm.dpm.power_control = true;
438                 else
439                         adev->pm.dpm.power_control = false;
440                 adev->pm.dpm.tdp_adjustment = 0;
441                 adev->pm.dpm.sq_ramping_threshold = le32_to_cpu(power_info->pplib5.ulSQRampingThreshold);
442                 adev->pm.dpm.cac_leakage = le32_to_cpu(power_info->pplib5.ulCACLeakage);
443                 adev->pm.dpm.load_line_slope = le16_to_cpu(power_info->pplib5.usLoadLineSlope);
444                 if (power_info->pplib5.usCACLeakageTableOffset) {
445                         ATOM_PPLIB_CAC_Leakage_Table *cac_table =
446                                 (ATOM_PPLIB_CAC_Leakage_Table *)
447                                 (mode_info->atom_context->bios + data_offset +
448                                  le16_to_cpu(power_info->pplib5.usCACLeakageTableOffset));
449                         ATOM_PPLIB_CAC_Leakage_Record *entry;
450                         u32 size = cac_table->ucNumEntries * sizeof(struct amdgpu_cac_leakage_table);
451                         adev->pm.dpm.dyn_state.cac_leakage_table.entries = kzalloc(size, GFP_KERNEL);
452                         if (!adev->pm.dpm.dyn_state.cac_leakage_table.entries) {
453                                 amdgpu_free_extended_power_table(adev);
454                                 return -ENOMEM;
455                         }
456                         entry = &cac_table->entries[0];
457                         for (i = 0; i < cac_table->ucNumEntries; i++) {
458                                 if (adev->pm.dpm.platform_caps & ATOM_PP_PLATFORM_CAP_EVV) {
459                                         adev->pm.dpm.dyn_state.cac_leakage_table.entries[i].vddc1 =
460                                                 le16_to_cpu(entry->usVddc1);
461                                         adev->pm.dpm.dyn_state.cac_leakage_table.entries[i].vddc2 =
462                                                 le16_to_cpu(entry->usVddc2);
463                                         adev->pm.dpm.dyn_state.cac_leakage_table.entries[i].vddc3 =
464                                                 le16_to_cpu(entry->usVddc3);
465                                 } else {
466                                         adev->pm.dpm.dyn_state.cac_leakage_table.entries[i].vddc =
467                                                 le16_to_cpu(entry->usVddc);
468                                         adev->pm.dpm.dyn_state.cac_leakage_table.entries[i].leakage =
469                                                 le32_to_cpu(entry->ulLeakageValue);
470                                 }
471                                 entry = (ATOM_PPLIB_CAC_Leakage_Record *)
472                                         ((u8 *)entry + sizeof(ATOM_PPLIB_CAC_Leakage_Record));
473                         }
474                         adev->pm.dpm.dyn_state.cac_leakage_table.count = cac_table->ucNumEntries;
475                 }
476         }
477
478         /* ext tables */
479         if (le16_to_cpu(power_info->pplib.usTableSize) >=
480             sizeof(struct _ATOM_PPLIB_POWERPLAYTABLE3)) {
481                 ATOM_PPLIB_EXTENDEDHEADER *ext_hdr = (ATOM_PPLIB_EXTENDEDHEADER *)
482                         (mode_info->atom_context->bios + data_offset +
483                          le16_to_cpu(power_info->pplib3.usExtendendedHeaderOffset));
484                 if ((le16_to_cpu(ext_hdr->usSize) >= SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V2) &&
485                         ext_hdr->usVCETableOffset) {
486                         VCEClockInfoArray *array = (VCEClockInfoArray *)
487                                 (mode_info->atom_context->bios + data_offset +
488                                  le16_to_cpu(ext_hdr->usVCETableOffset) + 1);
489                         ATOM_PPLIB_VCE_Clock_Voltage_Limit_Table *limits =
490                                 (ATOM_PPLIB_VCE_Clock_Voltage_Limit_Table *)
491                                 (mode_info->atom_context->bios + data_offset +
492                                  le16_to_cpu(ext_hdr->usVCETableOffset) + 1 +
493                                  1 + array->ucNumEntries * sizeof(VCEClockInfo));
494                         ATOM_PPLIB_VCE_State_Table *states =
495                                 (ATOM_PPLIB_VCE_State_Table *)
496                                 (mode_info->atom_context->bios + data_offset +
497                                  le16_to_cpu(ext_hdr->usVCETableOffset) + 1 +
498                                  1 + (array->ucNumEntries * sizeof (VCEClockInfo)) +
499                                  1 + (limits->numEntries * sizeof(ATOM_PPLIB_VCE_Clock_Voltage_Limit_Record)));
500                         ATOM_PPLIB_VCE_Clock_Voltage_Limit_Record *entry;
501                         ATOM_PPLIB_VCE_State_Record *state_entry;
502                         VCEClockInfo *vce_clk;
503                         u32 size = limits->numEntries *
504                                 sizeof(struct amdgpu_vce_clock_voltage_dependency_entry);
505                         adev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table.entries =
506                                 kzalloc(size, GFP_KERNEL);
507                         if (!adev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table.entries) {
508                                 amdgpu_free_extended_power_table(adev);
509                                 return -ENOMEM;
510                         }
511                         adev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table.count =
512                                 limits->numEntries;
513                         entry = &limits->entries[0];
514                         state_entry = &states->entries[0];
515                         for (i = 0; i < limits->numEntries; i++) {
516                                 vce_clk = (VCEClockInfo *)
517                                         ((u8 *)&array->entries[0] +
518                                          (entry->ucVCEClockInfoIndex * sizeof(VCEClockInfo)));
519                                 adev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table.entries[i].evclk =
520                                         le16_to_cpu(vce_clk->usEVClkLow) | (vce_clk->ucEVClkHigh << 16);
521                                 adev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table.entries[i].ecclk =
522                                         le16_to_cpu(vce_clk->usECClkLow) | (vce_clk->ucECClkHigh << 16);
523                                 adev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table.entries[i].v =
524                                         le16_to_cpu(entry->usVoltage);
525                                 entry = (ATOM_PPLIB_VCE_Clock_Voltage_Limit_Record *)
526                                         ((u8 *)entry + sizeof(ATOM_PPLIB_VCE_Clock_Voltage_Limit_Record));
527                         }
528                         adev->pm.dpm.num_of_vce_states =
529                                         states->numEntries > AMD_MAX_VCE_LEVELS ?
530                                         AMD_MAX_VCE_LEVELS : states->numEntries;
531                         for (i = 0; i < adev->pm.dpm.num_of_vce_states; i++) {
532                                 vce_clk = (VCEClockInfo *)
533                                         ((u8 *)&array->entries[0] +
534                                          (state_entry->ucVCEClockInfoIndex * sizeof(VCEClockInfo)));
535                                 adev->pm.dpm.vce_states[i].evclk =
536                                         le16_to_cpu(vce_clk->usEVClkLow) | (vce_clk->ucEVClkHigh << 16);
537                                 adev->pm.dpm.vce_states[i].ecclk =
538                                         le16_to_cpu(vce_clk->usECClkLow) | (vce_clk->ucECClkHigh << 16);
539                                 adev->pm.dpm.vce_states[i].clk_idx =
540                                         state_entry->ucClockInfoIndex & 0x3f;
541                                 adev->pm.dpm.vce_states[i].pstate =
542                                         (state_entry->ucClockInfoIndex & 0xc0) >> 6;
543                                 state_entry = (ATOM_PPLIB_VCE_State_Record *)
544                                         ((u8 *)state_entry + sizeof(ATOM_PPLIB_VCE_State_Record));
545                         }
546                 }
547                 if ((le16_to_cpu(ext_hdr->usSize) >= SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V3) &&
548                         ext_hdr->usUVDTableOffset) {
549                         UVDClockInfoArray *array = (UVDClockInfoArray *)
550                                 (mode_info->atom_context->bios + data_offset +
551                                  le16_to_cpu(ext_hdr->usUVDTableOffset) + 1);
552                         ATOM_PPLIB_UVD_Clock_Voltage_Limit_Table *limits =
553                                 (ATOM_PPLIB_UVD_Clock_Voltage_Limit_Table *)
554                                 (mode_info->atom_context->bios + data_offset +
555                                  le16_to_cpu(ext_hdr->usUVDTableOffset) + 1 +
556                                  1 + (array->ucNumEntries * sizeof (UVDClockInfo)));
557                         ATOM_PPLIB_UVD_Clock_Voltage_Limit_Record *entry;
558                         u32 size = limits->numEntries *
559                                 sizeof(struct amdgpu_uvd_clock_voltage_dependency_entry);
560                         adev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table.entries =
561                                 kzalloc(size, GFP_KERNEL);
562                         if (!adev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table.entries) {
563                                 amdgpu_free_extended_power_table(adev);
564                                 return -ENOMEM;
565                         }
566                         adev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table.count =
567                                 limits->numEntries;
568                         entry = &limits->entries[0];
569                         for (i = 0; i < limits->numEntries; i++) {
570                                 UVDClockInfo *uvd_clk = (UVDClockInfo *)
571                                         ((u8 *)&array->entries[0] +
572                                          (entry->ucUVDClockInfoIndex * sizeof(UVDClockInfo)));
573                                 adev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table.entries[i].vclk =
574                                         le16_to_cpu(uvd_clk->usVClkLow) | (uvd_clk->ucVClkHigh << 16);
575                                 adev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table.entries[i].dclk =
576                                         le16_to_cpu(uvd_clk->usDClkLow) | (uvd_clk->ucDClkHigh << 16);
577                                 adev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table.entries[i].v =
578                                         le16_to_cpu(entry->usVoltage);
579                                 entry = (ATOM_PPLIB_UVD_Clock_Voltage_Limit_Record *)
580                                         ((u8 *)entry + sizeof(ATOM_PPLIB_UVD_Clock_Voltage_Limit_Record));
581                         }
582                 }
583                 if ((le16_to_cpu(ext_hdr->usSize) >= SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V4) &&
584                         ext_hdr->usSAMUTableOffset) {
585                         ATOM_PPLIB_SAMClk_Voltage_Limit_Table *limits =
586                                 (ATOM_PPLIB_SAMClk_Voltage_Limit_Table *)
587                                 (mode_info->atom_context->bios + data_offset +
588                                  le16_to_cpu(ext_hdr->usSAMUTableOffset) + 1);
589                         ATOM_PPLIB_SAMClk_Voltage_Limit_Record *entry;
590                         u32 size = limits->numEntries *
591                                 sizeof(struct amdgpu_clock_voltage_dependency_entry);
592                         adev->pm.dpm.dyn_state.samu_clock_voltage_dependency_table.entries =
593                                 kzalloc(size, GFP_KERNEL);
594                         if (!adev->pm.dpm.dyn_state.samu_clock_voltage_dependency_table.entries) {
595                                 amdgpu_free_extended_power_table(adev);
596                                 return -ENOMEM;
597                         }
598                         adev->pm.dpm.dyn_state.samu_clock_voltage_dependency_table.count =
599                                 limits->numEntries;
600                         entry = &limits->entries[0];
601                         for (i = 0; i < limits->numEntries; i++) {
602                                 adev->pm.dpm.dyn_state.samu_clock_voltage_dependency_table.entries[i].clk =
603                                         le16_to_cpu(entry->usSAMClockLow) | (entry->ucSAMClockHigh << 16);
604                                 adev->pm.dpm.dyn_state.samu_clock_voltage_dependency_table.entries[i].v =
605                                         le16_to_cpu(entry->usVoltage);
606                                 entry = (ATOM_PPLIB_SAMClk_Voltage_Limit_Record *)
607                                         ((u8 *)entry + sizeof(ATOM_PPLIB_SAMClk_Voltage_Limit_Record));
608                         }
609                 }
610                 if ((le16_to_cpu(ext_hdr->usSize) >= SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V5) &&
611                     ext_hdr->usPPMTableOffset) {
612                         ATOM_PPLIB_PPM_Table *ppm = (ATOM_PPLIB_PPM_Table *)
613                                 (mode_info->atom_context->bios + data_offset +
614                                  le16_to_cpu(ext_hdr->usPPMTableOffset));
615                         adev->pm.dpm.dyn_state.ppm_table =
616                                 kzalloc(sizeof(struct amdgpu_ppm_table), GFP_KERNEL);
617                         if (!adev->pm.dpm.dyn_state.ppm_table) {
618                                 amdgpu_free_extended_power_table(adev);
619                                 return -ENOMEM;
620                         }
621                         adev->pm.dpm.dyn_state.ppm_table->ppm_design = ppm->ucPpmDesign;
622                         adev->pm.dpm.dyn_state.ppm_table->cpu_core_number =
623                                 le16_to_cpu(ppm->usCpuCoreNumber);
624                         adev->pm.dpm.dyn_state.ppm_table->platform_tdp =
625                                 le32_to_cpu(ppm->ulPlatformTDP);
626                         adev->pm.dpm.dyn_state.ppm_table->small_ac_platform_tdp =
627                                 le32_to_cpu(ppm->ulSmallACPlatformTDP);
628                         adev->pm.dpm.dyn_state.ppm_table->platform_tdc =
629                                 le32_to_cpu(ppm->ulPlatformTDC);
630                         adev->pm.dpm.dyn_state.ppm_table->small_ac_platform_tdc =
631                                 le32_to_cpu(ppm->ulSmallACPlatformTDC);
632                         adev->pm.dpm.dyn_state.ppm_table->apu_tdp =
633                                 le32_to_cpu(ppm->ulApuTDP);
634                         adev->pm.dpm.dyn_state.ppm_table->dgpu_tdp =
635                                 le32_to_cpu(ppm->ulDGpuTDP);
636                         adev->pm.dpm.dyn_state.ppm_table->dgpu_ulv_power =
637                                 le32_to_cpu(ppm->ulDGpuUlvPower);
638                         adev->pm.dpm.dyn_state.ppm_table->tj_max =
639                                 le32_to_cpu(ppm->ulTjmax);
640                 }
641                 if ((le16_to_cpu(ext_hdr->usSize) >= SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V6) &&
642                         ext_hdr->usACPTableOffset) {
643                         ATOM_PPLIB_ACPClk_Voltage_Limit_Table *limits =
644                                 (ATOM_PPLIB_ACPClk_Voltage_Limit_Table *)
645                                 (mode_info->atom_context->bios + data_offset +
646                                  le16_to_cpu(ext_hdr->usACPTableOffset) + 1);
647                         ATOM_PPLIB_ACPClk_Voltage_Limit_Record *entry;
648                         u32 size = limits->numEntries *
649                                 sizeof(struct amdgpu_clock_voltage_dependency_entry);
650                         adev->pm.dpm.dyn_state.acp_clock_voltage_dependency_table.entries =
651                                 kzalloc(size, GFP_KERNEL);
652                         if (!adev->pm.dpm.dyn_state.acp_clock_voltage_dependency_table.entries) {
653                                 amdgpu_free_extended_power_table(adev);
654                                 return -ENOMEM;
655                         }
656                         adev->pm.dpm.dyn_state.acp_clock_voltage_dependency_table.count =
657                                 limits->numEntries;
658                         entry = &limits->entries[0];
659                         for (i = 0; i < limits->numEntries; i++) {
660                                 adev->pm.dpm.dyn_state.acp_clock_voltage_dependency_table.entries[i].clk =
661                                         le16_to_cpu(entry->usACPClockLow) | (entry->ucACPClockHigh << 16);
662                                 adev->pm.dpm.dyn_state.acp_clock_voltage_dependency_table.entries[i].v =
663                                         le16_to_cpu(entry->usVoltage);
664                                 entry = (ATOM_PPLIB_ACPClk_Voltage_Limit_Record *)
665                                         ((u8 *)entry + sizeof(ATOM_PPLIB_ACPClk_Voltage_Limit_Record));
666                         }
667                 }
668                 if ((le16_to_cpu(ext_hdr->usSize) >= SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V7) &&
669                         ext_hdr->usPowerTuneTableOffset) {
670                         u8 rev = *(u8 *)(mode_info->atom_context->bios + data_offset +
671                                          le16_to_cpu(ext_hdr->usPowerTuneTableOffset));
672                         ATOM_PowerTune_Table *pt;
673                         adev->pm.dpm.dyn_state.cac_tdp_table =
674                                 kzalloc(sizeof(struct amdgpu_cac_tdp_table), GFP_KERNEL);
675                         if (!adev->pm.dpm.dyn_state.cac_tdp_table) {
676                                 amdgpu_free_extended_power_table(adev);
677                                 return -ENOMEM;
678                         }
679                         if (rev > 0) {
680                                 ATOM_PPLIB_POWERTUNE_Table_V1 *ppt = (ATOM_PPLIB_POWERTUNE_Table_V1 *)
681                                         (mode_info->atom_context->bios + data_offset +
682                                          le16_to_cpu(ext_hdr->usPowerTuneTableOffset));
683                                 adev->pm.dpm.dyn_state.cac_tdp_table->maximum_power_delivery_limit =
684                                         ppt->usMaximumPowerDeliveryLimit;
685                                 pt = &ppt->power_tune_table;
686                         } else {
687                                 ATOM_PPLIB_POWERTUNE_Table *ppt = (ATOM_PPLIB_POWERTUNE_Table *)
688                                         (mode_info->atom_context->bios + data_offset +
689                                          le16_to_cpu(ext_hdr->usPowerTuneTableOffset));
690                                 adev->pm.dpm.dyn_state.cac_tdp_table->maximum_power_delivery_limit = 255;
691                                 pt = &ppt->power_tune_table;
692                         }
693                         adev->pm.dpm.dyn_state.cac_tdp_table->tdp = le16_to_cpu(pt->usTDP);
694                         adev->pm.dpm.dyn_state.cac_tdp_table->configurable_tdp =
695                                 le16_to_cpu(pt->usConfigurableTDP);
696                         adev->pm.dpm.dyn_state.cac_tdp_table->tdc = le16_to_cpu(pt->usTDC);
697                         adev->pm.dpm.dyn_state.cac_tdp_table->battery_power_limit =
698                                 le16_to_cpu(pt->usBatteryPowerLimit);
699                         adev->pm.dpm.dyn_state.cac_tdp_table->small_power_limit =
700                                 le16_to_cpu(pt->usSmallPowerLimit);
701                         adev->pm.dpm.dyn_state.cac_tdp_table->low_cac_leakage =
702                                 le16_to_cpu(pt->usLowCACLeakage);
703                         adev->pm.dpm.dyn_state.cac_tdp_table->high_cac_leakage =
704                                 le16_to_cpu(pt->usHighCACLeakage);
705                 }
706                 if ((le16_to_cpu(ext_hdr->usSize) >= SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V8) &&
707                                 ext_hdr->usSclkVddgfxTableOffset) {
708                         dep_table = (ATOM_PPLIB_Clock_Voltage_Dependency_Table *)
709                                 (mode_info->atom_context->bios + data_offset +
710                                  le16_to_cpu(ext_hdr->usSclkVddgfxTableOffset));
711                         ret = amdgpu_parse_clk_voltage_dep_table(
712                                         &adev->pm.dpm.dyn_state.vddgfx_dependency_on_sclk,
713                                         dep_table);
714                         if (ret) {
715                                 kfree(adev->pm.dpm.dyn_state.vddgfx_dependency_on_sclk.entries);
716                                 return ret;
717                         }
718                 }
719         }
720
721         return 0;
722 }
723
724 void amdgpu_free_extended_power_table(struct amdgpu_device *adev)
725 {
726         struct amdgpu_dpm_dynamic_state *dyn_state = &adev->pm.dpm.dyn_state;
727
728         kfree(dyn_state->vddc_dependency_on_sclk.entries);
729         kfree(dyn_state->vddci_dependency_on_mclk.entries);
730         kfree(dyn_state->vddc_dependency_on_mclk.entries);
731         kfree(dyn_state->mvdd_dependency_on_mclk.entries);
732         kfree(dyn_state->cac_leakage_table.entries);
733         kfree(dyn_state->phase_shedding_limits_table.entries);
734         kfree(dyn_state->ppm_table);
735         kfree(dyn_state->cac_tdp_table);
736         kfree(dyn_state->vce_clock_voltage_dependency_table.entries);
737         kfree(dyn_state->uvd_clock_voltage_dependency_table.entries);
738         kfree(dyn_state->samu_clock_voltage_dependency_table.entries);
739         kfree(dyn_state->acp_clock_voltage_dependency_table.entries);
740         kfree(dyn_state->vddgfx_dependency_on_sclk.entries);
741 }
742
743 static const char *pp_lib_thermal_controller_names[] = {
744         "NONE",
745         "lm63",
746         "adm1032",
747         "adm1030",
748         "max6649",
749         "lm64",
750         "f75375",
751         "RV6xx",
752         "RV770",
753         "adt7473",
754         "NONE",
755         "External GPIO",
756         "Evergreen",
757         "emc2103",
758         "Sumo",
759         "Northern Islands",
760         "Southern Islands",
761         "lm96163",
762         "Sea Islands",
763         "Kaveri/Kabini",
764 };
765
766 void amdgpu_add_thermal_controller(struct amdgpu_device *adev)
767 {
768         struct amdgpu_mode_info *mode_info = &adev->mode_info;
769         ATOM_PPLIB_POWERPLAYTABLE *power_table;
770         int index = GetIndexIntoMasterTable(DATA, PowerPlayInfo);
771         ATOM_PPLIB_THERMALCONTROLLER *controller;
772         struct amdgpu_i2c_bus_rec i2c_bus;
773         u16 data_offset;
774         u8 frev, crev;
775
776         if (!amdgpu_atom_parse_data_header(mode_info->atom_context, index, NULL,
777                                    &frev, &crev, &data_offset))
778                 return;
779         power_table = (ATOM_PPLIB_POWERPLAYTABLE *)
780                 (mode_info->atom_context->bios + data_offset);
781         controller = &power_table->sThermalController;
782
783         /* add the i2c bus for thermal/fan chip */
784         if (controller->ucType > 0) {
785                 if (controller->ucFanParameters & ATOM_PP_FANPARAMETERS_NOFAN)
786                         adev->pm.no_fan = true;
787                 adev->pm.fan_pulses_per_revolution =
788                         controller->ucFanParameters & ATOM_PP_FANPARAMETERS_TACHOMETER_PULSES_PER_REVOLUTION_MASK;
789                 if (adev->pm.fan_pulses_per_revolution) {
790                         adev->pm.fan_min_rpm = controller->ucFanMinRPM;
791                         adev->pm.fan_max_rpm = controller->ucFanMaxRPM;
792                 }
793                 if (controller->ucType == ATOM_PP_THERMALCONTROLLER_RV6xx) {
794                         DRM_INFO("Internal thermal controller %s fan control\n",
795                                  (controller->ucFanParameters &
796                                   ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with");
797                         adev->pm.int_thermal_type = THERMAL_TYPE_RV6XX;
798                 } else if (controller->ucType == ATOM_PP_THERMALCONTROLLER_RV770) {
799                         DRM_INFO("Internal thermal controller %s fan control\n",
800                                  (controller->ucFanParameters &
801                                   ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with");
802                         adev->pm.int_thermal_type = THERMAL_TYPE_RV770;
803                 } else if (controller->ucType == ATOM_PP_THERMALCONTROLLER_EVERGREEN) {
804                         DRM_INFO("Internal thermal controller %s fan control\n",
805                                  (controller->ucFanParameters &
806                                   ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with");
807                         adev->pm.int_thermal_type = THERMAL_TYPE_EVERGREEN;
808                 } else if (controller->ucType == ATOM_PP_THERMALCONTROLLER_SUMO) {
809                         DRM_INFO("Internal thermal controller %s fan control\n",
810                                  (controller->ucFanParameters &
811                                   ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with");
812                         adev->pm.int_thermal_type = THERMAL_TYPE_SUMO;
813                 } else if (controller->ucType == ATOM_PP_THERMALCONTROLLER_NISLANDS) {
814                         DRM_INFO("Internal thermal controller %s fan control\n",
815                                  (controller->ucFanParameters &
816                                   ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with");
817                         adev->pm.int_thermal_type = THERMAL_TYPE_NI;
818                 } else if (controller->ucType == ATOM_PP_THERMALCONTROLLER_SISLANDS) {
819                         DRM_INFO("Internal thermal controller %s fan control\n",
820                                  (controller->ucFanParameters &
821                                   ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with");
822                         adev->pm.int_thermal_type = THERMAL_TYPE_SI;
823                 } else if (controller->ucType == ATOM_PP_THERMALCONTROLLER_CISLANDS) {
824                         DRM_INFO("Internal thermal controller %s fan control\n",
825                                  (controller->ucFanParameters &
826                                   ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with");
827                         adev->pm.int_thermal_type = THERMAL_TYPE_CI;
828                 } else if (controller->ucType == ATOM_PP_THERMALCONTROLLER_KAVERI) {
829                         DRM_INFO("Internal thermal controller %s fan control\n",
830                                  (controller->ucFanParameters &
831                                   ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with");
832                         adev->pm.int_thermal_type = THERMAL_TYPE_KV;
833                 } else if (controller->ucType == ATOM_PP_THERMALCONTROLLER_EXTERNAL_GPIO) {
834                         DRM_INFO("External GPIO thermal controller %s fan control\n",
835                                  (controller->ucFanParameters &
836                                   ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with");
837                         adev->pm.int_thermal_type = THERMAL_TYPE_EXTERNAL_GPIO;
838                 } else if (controller->ucType ==
839                            ATOM_PP_THERMALCONTROLLER_ADT7473_WITH_INTERNAL) {
840                         DRM_INFO("ADT7473 with internal thermal controller %s fan control\n",
841                                  (controller->ucFanParameters &
842                                   ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with");
843                         adev->pm.int_thermal_type = THERMAL_TYPE_ADT7473_WITH_INTERNAL;
844                 } else if (controller->ucType ==
845                            ATOM_PP_THERMALCONTROLLER_EMC2103_WITH_INTERNAL) {
846                         DRM_INFO("EMC2103 with internal thermal controller %s fan control\n",
847                                  (controller->ucFanParameters &
848                                   ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with");
849                         adev->pm.int_thermal_type = THERMAL_TYPE_EMC2103_WITH_INTERNAL;
850                 } else if (controller->ucType < ARRAY_SIZE(pp_lib_thermal_controller_names)) {
851                         DRM_INFO("Possible %s thermal controller at 0x%02x %s fan control\n",
852                                  pp_lib_thermal_controller_names[controller->ucType],
853                                  controller->ucI2cAddress >> 1,
854                                  (controller->ucFanParameters &
855                                   ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with");
856                         adev->pm.int_thermal_type = THERMAL_TYPE_EXTERNAL;
857                         i2c_bus = amdgpu_atombios_lookup_i2c_gpio(adev, controller->ucI2cLine);
858                         adev->pm.i2c_bus = amdgpu_i2c_lookup(adev, &i2c_bus);
859                         if (adev->pm.i2c_bus) {
860                                 struct i2c_board_info info = { };
861                                 const char *name = pp_lib_thermal_controller_names[controller->ucType];
862                                 info.addr = controller->ucI2cAddress >> 1;
863                                 strlcpy(info.type, name, sizeof(info.type));
864                                 i2c_new_client_device(&adev->pm.i2c_bus->adapter, &info);
865                         }
866                 } else {
867                         DRM_INFO("Unknown thermal controller type %d at 0x%02x %s fan control\n",
868                                  controller->ucType,
869                                  controller->ucI2cAddress >> 1,
870                                  (controller->ucFanParameters &
871                                   ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with");
872                 }
873         }
874 }
875
876 enum amdgpu_pcie_gen amdgpu_get_pcie_gen_support(struct amdgpu_device *adev,
877                                                  u32 sys_mask,
878                                                  enum amdgpu_pcie_gen asic_gen,
879                                                  enum amdgpu_pcie_gen default_gen)
880 {
881         switch (asic_gen) {
882         case AMDGPU_PCIE_GEN1:
883                 return AMDGPU_PCIE_GEN1;
884         case AMDGPU_PCIE_GEN2:
885                 return AMDGPU_PCIE_GEN2;
886         case AMDGPU_PCIE_GEN3:
887                 return AMDGPU_PCIE_GEN3;
888         default:
889                 if ((sys_mask & CAIL_PCIE_LINK_SPEED_SUPPORT_GEN3) &&
890                     (default_gen == AMDGPU_PCIE_GEN3))
891                         return AMDGPU_PCIE_GEN3;
892                 else if ((sys_mask & CAIL_PCIE_LINK_SPEED_SUPPORT_GEN2) &&
893                          (default_gen == AMDGPU_PCIE_GEN2))
894                         return AMDGPU_PCIE_GEN2;
895                 else
896                         return AMDGPU_PCIE_GEN1;
897         }
898         return AMDGPU_PCIE_GEN1;
899 }
900
901 struct amd_vce_state*
902 amdgpu_get_vce_clock_state(void *handle, u32 idx)
903 {
904         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
905
906         if (idx < adev->pm.dpm.num_of_vce_states)
907                 return &adev->pm.dpm.vce_states[idx];
908
909         return NULL;
910 }
911
912 int amdgpu_dpm_get_sclk(struct amdgpu_device *adev, bool low)
913 {
914         const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
915
916         return pp_funcs->get_sclk((adev)->powerplay.pp_handle, (low));
917 }
918
919 int amdgpu_dpm_get_mclk(struct amdgpu_device *adev, bool low)
920 {
921         const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
922
923         return pp_funcs->get_mclk((adev)->powerplay.pp_handle, (low));
924 }
925
926 int amdgpu_dpm_set_powergating_by_smu(struct amdgpu_device *adev, uint32_t block_type, bool gate)
927 {
928         int ret = 0;
929         const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
930         enum ip_power_state pwr_state = gate ? POWER_STATE_OFF : POWER_STATE_ON;
931
932         if (atomic_read(&adev->pm.pwr_state[block_type]) == pwr_state) {
933                 dev_dbg(adev->dev, "IP block%d already in the target %s state!",
934                                 block_type, gate ? "gate" : "ungate");
935                 return 0;
936         }
937
938         switch (block_type) {
939         case AMD_IP_BLOCK_TYPE_UVD:
940         case AMD_IP_BLOCK_TYPE_VCE:
941                 if (pp_funcs && pp_funcs->set_powergating_by_smu) {
942                         /*
943                          * TODO: need a better lock mechanism
944                          *
945                          * Here adev->pm.mutex lock protection is enforced on
946                          * UVD and VCE cases only. Since for other cases, there
947                          * may be already lock protection in amdgpu_pm.c.
948                          * This is a quick fix for the deadlock issue below.
949                          *     NFO: task ocltst:2028 blocked for more than 120 seconds.
950                          *     Tainted: G           OE     5.0.0-37-generic #40~18.04.1-Ubuntu
951                          *     echo 0 > /proc/sys/kernel/hung_task_timeout_secs" disables this message.
952                          *     cltst          D    0  2028   2026 0x00000000
953                          *     all Trace:
954                          *     __schedule+0x2c0/0x870
955                          *     schedule+0x2c/0x70
956                          *     schedule_preempt_disabled+0xe/0x10
957                          *     __mutex_lock.isra.9+0x26d/0x4e0
958                          *     __mutex_lock_slowpath+0x13/0x20
959                          *     ? __mutex_lock_slowpath+0x13/0x20
960                          *     mutex_lock+0x2f/0x40
961                          *     amdgpu_dpm_set_powergating_by_smu+0x64/0xe0 [amdgpu]
962                          *     gfx_v8_0_enable_gfx_static_mg_power_gating+0x3c/0x70 [amdgpu]
963                          *     gfx_v8_0_set_powergating_state+0x66/0x260 [amdgpu]
964                          *     amdgpu_device_ip_set_powergating_state+0x62/0xb0 [amdgpu]
965                          *     pp_dpm_force_performance_level+0xe7/0x100 [amdgpu]
966                          *     amdgpu_set_dpm_forced_performance_level+0x129/0x330 [amdgpu]
967                          */
968                         mutex_lock(&adev->pm.mutex);
969                         ret = (pp_funcs->set_powergating_by_smu(
970                                 (adev)->powerplay.pp_handle, block_type, gate));
971                         mutex_unlock(&adev->pm.mutex);
972                 }
973                 break;
974         case AMD_IP_BLOCK_TYPE_GFX:
975         case AMD_IP_BLOCK_TYPE_VCN:
976         case AMD_IP_BLOCK_TYPE_SDMA:
977         case AMD_IP_BLOCK_TYPE_JPEG:
978         case AMD_IP_BLOCK_TYPE_GMC:
979         case AMD_IP_BLOCK_TYPE_ACP:
980                 if (pp_funcs && pp_funcs->set_powergating_by_smu) {
981                         ret = (pp_funcs->set_powergating_by_smu(
982                                 (adev)->powerplay.pp_handle, block_type, gate));
983                 }
984                 break;
985         default:
986                 break;
987         }
988
989         if (!ret)
990                 atomic_set(&adev->pm.pwr_state[block_type], pwr_state);
991
992         return ret;
993 }
994
995 int amdgpu_dpm_baco_enter(struct amdgpu_device *adev)
996 {
997         const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
998         void *pp_handle = adev->powerplay.pp_handle;
999         int ret = 0;
1000
1001         if (!pp_funcs || !pp_funcs->set_asic_baco_state)
1002                 return -ENOENT;
1003
1004         /* enter BACO state */
1005         ret = pp_funcs->set_asic_baco_state(pp_handle, 1);
1006
1007         return ret;
1008 }
1009
1010 int amdgpu_dpm_baco_exit(struct amdgpu_device *adev)
1011 {
1012         const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1013         void *pp_handle = adev->powerplay.pp_handle;
1014         int ret = 0;
1015
1016         if (!pp_funcs || !pp_funcs->set_asic_baco_state)
1017                 return -ENOENT;
1018
1019         /* exit BACO state */
1020         ret = pp_funcs->set_asic_baco_state(pp_handle, 0);
1021
1022         return ret;
1023 }
1024
1025 int amdgpu_dpm_set_mp1_state(struct amdgpu_device *adev,
1026                              enum pp_mp1_state mp1_state)
1027 {
1028         int ret = 0;
1029         const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1030
1031         if (pp_funcs && pp_funcs->set_mp1_state) {
1032                 ret = pp_funcs->set_mp1_state(
1033                                 adev->powerplay.pp_handle,
1034                                 mp1_state);
1035         }
1036
1037         return ret;
1038 }
1039
1040 bool amdgpu_dpm_is_baco_supported(struct amdgpu_device *adev)
1041 {
1042         const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1043         void *pp_handle = adev->powerplay.pp_handle;
1044         bool baco_cap;
1045
1046         if (!pp_funcs || !pp_funcs->get_asic_baco_capability)
1047                 return false;
1048
1049         if (pp_funcs->get_asic_baco_capability(pp_handle, &baco_cap))
1050                 return false;
1051
1052         return baco_cap;
1053 }
1054
1055 int amdgpu_dpm_mode2_reset(struct amdgpu_device *adev)
1056 {
1057         const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1058         void *pp_handle = adev->powerplay.pp_handle;
1059
1060         if (!pp_funcs || !pp_funcs->asic_reset_mode_2)
1061                 return -ENOENT;
1062
1063         return pp_funcs->asic_reset_mode_2(pp_handle);
1064 }
1065
1066 int amdgpu_dpm_baco_reset(struct amdgpu_device *adev)
1067 {
1068         const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1069         void *pp_handle = adev->powerplay.pp_handle;
1070         int ret = 0;
1071
1072         if (!pp_funcs || !pp_funcs->set_asic_baco_state)
1073                 return -ENOENT;
1074
1075         /* enter BACO state */
1076         ret = pp_funcs->set_asic_baco_state(pp_handle, 1);
1077         if (ret)
1078                 return ret;
1079
1080         /* exit BACO state */
1081         ret = pp_funcs->set_asic_baco_state(pp_handle, 0);
1082         if (ret)
1083                 return ret;
1084
1085         return 0;
1086 }
1087
1088 bool amdgpu_dpm_is_mode1_reset_supported(struct amdgpu_device *adev)
1089 {
1090         struct smu_context *smu = &adev->smu;
1091
1092         if (is_support_sw_smu(adev))
1093                 return smu_mode1_reset_is_support(smu);
1094
1095         return false;
1096 }
1097
1098 int amdgpu_dpm_mode1_reset(struct amdgpu_device *adev)
1099 {
1100         struct smu_context *smu = &adev->smu;
1101
1102         if (is_support_sw_smu(adev))
1103                 return smu_mode1_reset(smu);
1104
1105         return -EOPNOTSUPP;
1106 }
1107
1108 int amdgpu_dpm_switch_power_profile(struct amdgpu_device *adev,
1109                                     enum PP_SMC_POWER_PROFILE type,
1110                                     bool en)
1111 {
1112         const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1113         int ret = 0;
1114
1115         if (amdgpu_sriov_vf(adev))
1116                 return 0;
1117
1118         if (pp_funcs && pp_funcs->switch_power_profile)
1119                 ret = pp_funcs->switch_power_profile(
1120                         adev->powerplay.pp_handle, type, en);
1121
1122         return ret;
1123 }
1124
1125 int amdgpu_dpm_set_xgmi_pstate(struct amdgpu_device *adev,
1126                                uint32_t pstate)
1127 {
1128         const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1129         int ret = 0;
1130
1131         if (pp_funcs && pp_funcs->set_xgmi_pstate)
1132                 ret = pp_funcs->set_xgmi_pstate(adev->powerplay.pp_handle,
1133                                                                 pstate);
1134
1135         return ret;
1136 }
1137
1138 int amdgpu_dpm_set_df_cstate(struct amdgpu_device *adev,
1139                              uint32_t cstate)
1140 {
1141         int ret = 0;
1142         const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1143         void *pp_handle = adev->powerplay.pp_handle;
1144
1145         if (pp_funcs && pp_funcs->set_df_cstate)
1146                 ret = pp_funcs->set_df_cstate(pp_handle, cstate);
1147
1148         return ret;
1149 }
1150
1151 int amdgpu_dpm_allow_xgmi_power_down(struct amdgpu_device *adev, bool en)
1152 {
1153         struct smu_context *smu = &adev->smu;
1154
1155         if (is_support_sw_smu(adev))
1156                 return smu_allow_xgmi_power_down(smu, en);
1157
1158         return 0;
1159 }
1160
1161 int amdgpu_dpm_enable_mgpu_fan_boost(struct amdgpu_device *adev)
1162 {
1163         void *pp_handle = adev->powerplay.pp_handle;
1164         const struct amd_pm_funcs *pp_funcs =
1165                         adev->powerplay.pp_funcs;
1166         int ret = 0;
1167
1168         if (pp_funcs && pp_funcs->enable_mgpu_fan_boost)
1169                 ret = pp_funcs->enable_mgpu_fan_boost(pp_handle);
1170
1171         return ret;
1172 }
1173
1174 int amdgpu_dpm_set_clockgating_by_smu(struct amdgpu_device *adev,
1175                                       uint32_t msg_id)
1176 {
1177         void *pp_handle = adev->powerplay.pp_handle;
1178         const struct amd_pm_funcs *pp_funcs =
1179                         adev->powerplay.pp_funcs;
1180         int ret = 0;
1181
1182         if (pp_funcs && pp_funcs->set_clockgating_by_smu)
1183                 ret = pp_funcs->set_clockgating_by_smu(pp_handle,
1184                                                        msg_id);
1185
1186         return ret;
1187 }
1188
1189 int amdgpu_dpm_smu_i2c_bus_access(struct amdgpu_device *adev,
1190                                   bool acquire)
1191 {
1192         void *pp_handle = adev->powerplay.pp_handle;
1193         const struct amd_pm_funcs *pp_funcs =
1194                         adev->powerplay.pp_funcs;
1195         int ret = -EOPNOTSUPP;
1196
1197         if (pp_funcs && pp_funcs->smu_i2c_bus_access)
1198                 ret = pp_funcs->smu_i2c_bus_access(pp_handle,
1199                                                    acquire);
1200
1201         return ret;
1202 }
1203
1204 void amdgpu_pm_acpi_event_handler(struct amdgpu_device *adev)
1205 {
1206         if (adev->pm.dpm_enabled) {
1207                 mutex_lock(&adev->pm.mutex);
1208                 if (power_supply_is_system_supplied() > 0)
1209                         adev->pm.ac_power = true;
1210                 else
1211                         adev->pm.ac_power = false;
1212                 if (adev->powerplay.pp_funcs &&
1213                     adev->powerplay.pp_funcs->enable_bapm)
1214                         amdgpu_dpm_enable_bapm(adev, adev->pm.ac_power);
1215                 mutex_unlock(&adev->pm.mutex);
1216
1217                 if (is_support_sw_smu(adev))
1218                         smu_set_ac_dc(&adev->smu);
1219         }
1220 }
1221
1222 int amdgpu_dpm_read_sensor(struct amdgpu_device *adev, enum amd_pp_sensors sensor,
1223                            void *data, uint32_t *size)
1224 {
1225         const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1226         int ret = 0;
1227
1228         if (!data || !size)
1229                 return -EINVAL;
1230
1231         if (pp_funcs && pp_funcs->read_sensor)
1232                 ret = pp_funcs->read_sensor((adev)->powerplay.pp_handle,
1233                                                                     sensor, data, size);
1234         else
1235                 ret = -EINVAL;
1236
1237         return ret;
1238 }
1239
1240 void amdgpu_dpm_thermal_work_handler(struct work_struct *work)
1241 {
1242         struct amdgpu_device *adev =
1243                 container_of(work, struct amdgpu_device,
1244                              pm.dpm.thermal.work);
1245         /* switch to the thermal state */
1246         enum amd_pm_state_type dpm_state = POWER_STATE_TYPE_INTERNAL_THERMAL;
1247         int temp, size = sizeof(temp);
1248
1249         if (!adev->pm.dpm_enabled)
1250                 return;
1251
1252         if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_GPU_TEMP,
1253                                     (void *)&temp, &size)) {
1254                 if (temp < adev->pm.dpm.thermal.min_temp)
1255                         /* switch back the user state */
1256                         dpm_state = adev->pm.dpm.user_state;
1257         } else {
1258                 if (adev->pm.dpm.thermal.high_to_low)
1259                         /* switch back the user state */
1260                         dpm_state = adev->pm.dpm.user_state;
1261         }
1262         mutex_lock(&adev->pm.mutex);
1263         if (dpm_state == POWER_STATE_TYPE_INTERNAL_THERMAL)
1264                 adev->pm.dpm.thermal_active = true;
1265         else
1266                 adev->pm.dpm.thermal_active = false;
1267         adev->pm.dpm.state = dpm_state;
1268         mutex_unlock(&adev->pm.mutex);
1269
1270         amdgpu_pm_compute_clocks(adev);
1271 }
1272
1273 static struct amdgpu_ps *amdgpu_dpm_pick_power_state(struct amdgpu_device *adev,
1274                                                      enum amd_pm_state_type dpm_state)
1275 {
1276         int i;
1277         struct amdgpu_ps *ps;
1278         u32 ui_class;
1279         bool single_display = (adev->pm.dpm.new_active_crtc_count < 2) ?
1280                 true : false;
1281
1282         /* check if the vblank period is too short to adjust the mclk */
1283         if (single_display && adev->powerplay.pp_funcs->vblank_too_short) {
1284                 if (amdgpu_dpm_vblank_too_short(adev))
1285                         single_display = false;
1286         }
1287
1288         /* certain older asics have a separare 3D performance state,
1289          * so try that first if the user selected performance
1290          */
1291         if (dpm_state == POWER_STATE_TYPE_PERFORMANCE)
1292                 dpm_state = POWER_STATE_TYPE_INTERNAL_3DPERF;
1293         /* balanced states don't exist at the moment */
1294         if (dpm_state == POWER_STATE_TYPE_BALANCED)
1295                 dpm_state = POWER_STATE_TYPE_PERFORMANCE;
1296
1297 restart_search:
1298         /* Pick the best power state based on current conditions */
1299         for (i = 0; i < adev->pm.dpm.num_ps; i++) {
1300                 ps = &adev->pm.dpm.ps[i];
1301                 ui_class = ps->class & ATOM_PPLIB_CLASSIFICATION_UI_MASK;
1302                 switch (dpm_state) {
1303                 /* user states */
1304                 case POWER_STATE_TYPE_BATTERY:
1305                         if (ui_class == ATOM_PPLIB_CLASSIFICATION_UI_BATTERY) {
1306                                 if (ps->caps & ATOM_PPLIB_SINGLE_DISPLAY_ONLY) {
1307                                         if (single_display)
1308                                                 return ps;
1309                                 } else
1310                                         return ps;
1311                         }
1312                         break;
1313                 case POWER_STATE_TYPE_BALANCED:
1314                         if (ui_class == ATOM_PPLIB_CLASSIFICATION_UI_BALANCED) {
1315                                 if (ps->caps & ATOM_PPLIB_SINGLE_DISPLAY_ONLY) {
1316                                         if (single_display)
1317                                                 return ps;
1318                                 } else
1319                                         return ps;
1320                         }
1321                         break;
1322                 case POWER_STATE_TYPE_PERFORMANCE:
1323                         if (ui_class == ATOM_PPLIB_CLASSIFICATION_UI_PERFORMANCE) {
1324                                 if (ps->caps & ATOM_PPLIB_SINGLE_DISPLAY_ONLY) {
1325                                         if (single_display)
1326                                                 return ps;
1327                                 } else
1328                                         return ps;
1329                         }
1330                         break;
1331                 /* internal states */
1332                 case POWER_STATE_TYPE_INTERNAL_UVD:
1333                         if (adev->pm.dpm.uvd_ps)
1334                                 return adev->pm.dpm.uvd_ps;
1335                         else
1336                                 break;
1337                 case POWER_STATE_TYPE_INTERNAL_UVD_SD:
1338                         if (ps->class & ATOM_PPLIB_CLASSIFICATION_SDSTATE)
1339                                 return ps;
1340                         break;
1341                 case POWER_STATE_TYPE_INTERNAL_UVD_HD:
1342                         if (ps->class & ATOM_PPLIB_CLASSIFICATION_HDSTATE)
1343                                 return ps;
1344                         break;
1345                 case POWER_STATE_TYPE_INTERNAL_UVD_HD2:
1346                         if (ps->class & ATOM_PPLIB_CLASSIFICATION_HD2STATE)
1347                                 return ps;
1348                         break;
1349                 case POWER_STATE_TYPE_INTERNAL_UVD_MVC:
1350                         if (ps->class2 & ATOM_PPLIB_CLASSIFICATION2_MVC)
1351                                 return ps;
1352                         break;
1353                 case POWER_STATE_TYPE_INTERNAL_BOOT:
1354                         return adev->pm.dpm.boot_ps;
1355                 case POWER_STATE_TYPE_INTERNAL_THERMAL:
1356                         if (ps->class & ATOM_PPLIB_CLASSIFICATION_THERMAL)
1357                                 return ps;
1358                         break;
1359                 case POWER_STATE_TYPE_INTERNAL_ACPI:
1360                         if (ps->class & ATOM_PPLIB_CLASSIFICATION_ACPI)
1361                                 return ps;
1362                         break;
1363                 case POWER_STATE_TYPE_INTERNAL_ULV:
1364                         if (ps->class2 & ATOM_PPLIB_CLASSIFICATION2_ULV)
1365                                 return ps;
1366                         break;
1367                 case POWER_STATE_TYPE_INTERNAL_3DPERF:
1368                         if (ps->class & ATOM_PPLIB_CLASSIFICATION_3DPERFORMANCE)
1369                                 return ps;
1370                         break;
1371                 default:
1372                         break;
1373                 }
1374         }
1375         /* use a fallback state if we didn't match */
1376         switch (dpm_state) {
1377         case POWER_STATE_TYPE_INTERNAL_UVD_SD:
1378                 dpm_state = POWER_STATE_TYPE_INTERNAL_UVD_HD;
1379                 goto restart_search;
1380         case POWER_STATE_TYPE_INTERNAL_UVD_HD:
1381         case POWER_STATE_TYPE_INTERNAL_UVD_HD2:
1382         case POWER_STATE_TYPE_INTERNAL_UVD_MVC:
1383                 if (adev->pm.dpm.uvd_ps) {
1384                         return adev->pm.dpm.uvd_ps;
1385                 } else {
1386                         dpm_state = POWER_STATE_TYPE_PERFORMANCE;
1387                         goto restart_search;
1388                 }
1389         case POWER_STATE_TYPE_INTERNAL_THERMAL:
1390                 dpm_state = POWER_STATE_TYPE_INTERNAL_ACPI;
1391                 goto restart_search;
1392         case POWER_STATE_TYPE_INTERNAL_ACPI:
1393                 dpm_state = POWER_STATE_TYPE_BATTERY;
1394                 goto restart_search;
1395         case POWER_STATE_TYPE_BATTERY:
1396         case POWER_STATE_TYPE_BALANCED:
1397         case POWER_STATE_TYPE_INTERNAL_3DPERF:
1398                 dpm_state = POWER_STATE_TYPE_PERFORMANCE;
1399                 goto restart_search;
1400         default:
1401                 break;
1402         }
1403
1404         return NULL;
1405 }
1406
1407 static void amdgpu_dpm_change_power_state_locked(struct amdgpu_device *adev)
1408 {
1409         struct amdgpu_ps *ps;
1410         enum amd_pm_state_type dpm_state;
1411         int ret;
1412         bool equal = false;
1413
1414         /* if dpm init failed */
1415         if (!adev->pm.dpm_enabled)
1416                 return;
1417
1418         if (adev->pm.dpm.user_state != adev->pm.dpm.state) {
1419                 /* add other state override checks here */
1420                 if ((!adev->pm.dpm.thermal_active) &&
1421                     (!adev->pm.dpm.uvd_active))
1422                         adev->pm.dpm.state = adev->pm.dpm.user_state;
1423         }
1424         dpm_state = adev->pm.dpm.state;
1425
1426         ps = amdgpu_dpm_pick_power_state(adev, dpm_state);
1427         if (ps)
1428                 adev->pm.dpm.requested_ps = ps;
1429         else
1430                 return;
1431
1432         if (amdgpu_dpm == 1 && adev->powerplay.pp_funcs->print_power_state) {
1433                 printk("switching from power state:\n");
1434                 amdgpu_dpm_print_power_state(adev, adev->pm.dpm.current_ps);
1435                 printk("switching to power state:\n");
1436                 amdgpu_dpm_print_power_state(adev, adev->pm.dpm.requested_ps);
1437         }
1438
1439         /* update whether vce is active */
1440         ps->vce_active = adev->pm.dpm.vce_active;
1441         if (adev->powerplay.pp_funcs->display_configuration_changed)
1442                 amdgpu_dpm_display_configuration_changed(adev);
1443
1444         ret = amdgpu_dpm_pre_set_power_state(adev);
1445         if (ret)
1446                 return;
1447
1448         if (adev->powerplay.pp_funcs->check_state_equal) {
1449                 if (0 != amdgpu_dpm_check_state_equal(adev, adev->pm.dpm.current_ps, adev->pm.dpm.requested_ps, &equal))
1450                         equal = false;
1451         }
1452
1453         if (equal)
1454                 return;
1455
1456         amdgpu_dpm_set_power_state(adev);
1457         amdgpu_dpm_post_set_power_state(adev);
1458
1459         adev->pm.dpm.current_active_crtcs = adev->pm.dpm.new_active_crtcs;
1460         adev->pm.dpm.current_active_crtc_count = adev->pm.dpm.new_active_crtc_count;
1461
1462         if (adev->powerplay.pp_funcs->force_performance_level) {
1463                 if (adev->pm.dpm.thermal_active) {
1464                         enum amd_dpm_forced_level level = adev->pm.dpm.forced_level;
1465                         /* force low perf level for thermal */
1466                         amdgpu_dpm_force_performance_level(adev, AMD_DPM_FORCED_LEVEL_LOW);
1467                         /* save the user's level */
1468                         adev->pm.dpm.forced_level = level;
1469                 } else {
1470                         /* otherwise, user selected level */
1471                         amdgpu_dpm_force_performance_level(adev, adev->pm.dpm.forced_level);
1472                 }
1473         }
1474 }
1475
1476 void amdgpu_pm_compute_clocks(struct amdgpu_device *adev)
1477 {
1478         int i = 0;
1479
1480         if (!adev->pm.dpm_enabled)
1481                 return;
1482
1483         if (adev->mode_info.num_crtc)
1484                 amdgpu_display_bandwidth_update(adev);
1485
1486         for (i = 0; i < AMDGPU_MAX_RINGS; i++) {
1487                 struct amdgpu_ring *ring = adev->rings[i];
1488                 if (ring && ring->sched.ready)
1489                         amdgpu_fence_wait_empty(ring);
1490         }
1491
1492         if (adev->powerplay.pp_funcs->dispatch_tasks) {
1493                 if (!amdgpu_device_has_dc_support(adev)) {
1494                         mutex_lock(&adev->pm.mutex);
1495                         amdgpu_dpm_get_active_displays(adev);
1496                         adev->pm.pm_display_cfg.num_display = adev->pm.dpm.new_active_crtc_count;
1497                         adev->pm.pm_display_cfg.vrefresh = amdgpu_dpm_get_vrefresh(adev);
1498                         adev->pm.pm_display_cfg.min_vblank_time = amdgpu_dpm_get_vblank_time(adev);
1499                         /* we have issues with mclk switching with
1500                          * refresh rates over 120 hz on the non-DC code.
1501                          */
1502                         if (adev->pm.pm_display_cfg.vrefresh > 120)
1503                                 adev->pm.pm_display_cfg.min_vblank_time = 0;
1504                         if (adev->powerplay.pp_funcs->display_configuration_change)
1505                                 adev->powerplay.pp_funcs->display_configuration_change(
1506                                                         adev->powerplay.pp_handle,
1507                                                         &adev->pm.pm_display_cfg);
1508                         mutex_unlock(&adev->pm.mutex);
1509                 }
1510                 amdgpu_dpm_dispatch_task(adev, AMD_PP_TASK_DISPLAY_CONFIG_CHANGE, NULL);
1511         } else {
1512                 mutex_lock(&adev->pm.mutex);
1513                 amdgpu_dpm_get_active_displays(adev);
1514                 amdgpu_dpm_change_power_state_locked(adev);
1515                 mutex_unlock(&adev->pm.mutex);
1516         }
1517 }
1518
1519 void amdgpu_dpm_enable_uvd(struct amdgpu_device *adev, bool enable)
1520 {
1521         int ret = 0;
1522
1523         if (adev->family == AMDGPU_FAMILY_SI) {
1524                 mutex_lock(&adev->pm.mutex);
1525                 if (enable) {
1526                         adev->pm.dpm.uvd_active = true;
1527                         adev->pm.dpm.state = POWER_STATE_TYPE_INTERNAL_UVD;
1528                 } else {
1529                         adev->pm.dpm.uvd_active = false;
1530                 }
1531                 mutex_unlock(&adev->pm.mutex);
1532
1533                 amdgpu_pm_compute_clocks(adev);
1534         } else {
1535                 ret = amdgpu_dpm_set_powergating_by_smu(adev, AMD_IP_BLOCK_TYPE_UVD, !enable);
1536                 if (ret)
1537                         DRM_ERROR("Dpm %s uvd failed, ret = %d. \n",
1538                                   enable ? "enable" : "disable", ret);
1539
1540                 /* enable/disable Low Memory PState for UVD (4k videos) */
1541                 if (adev->asic_type == CHIP_STONEY &&
1542                         adev->uvd.decode_image_width >= WIDTH_4K) {
1543                         struct pp_hwmgr *hwmgr = adev->powerplay.pp_handle;
1544
1545                         if (hwmgr && hwmgr->hwmgr_func &&
1546                             hwmgr->hwmgr_func->update_nbdpm_pstate)
1547                                 hwmgr->hwmgr_func->update_nbdpm_pstate(hwmgr,
1548                                                                        !enable,
1549                                                                        true);
1550                 }
1551         }
1552 }
1553
1554 void amdgpu_dpm_enable_vce(struct amdgpu_device *adev, bool enable)
1555 {
1556         int ret = 0;
1557
1558         if (adev->family == AMDGPU_FAMILY_SI) {
1559                 mutex_lock(&adev->pm.mutex);
1560                 if (enable) {
1561                         adev->pm.dpm.vce_active = true;
1562                         /* XXX select vce level based on ring/task */
1563                         adev->pm.dpm.vce_level = AMD_VCE_LEVEL_AC_ALL;
1564                 } else {
1565                         adev->pm.dpm.vce_active = false;
1566                 }
1567                 mutex_unlock(&adev->pm.mutex);
1568
1569                 amdgpu_pm_compute_clocks(adev);
1570         } else {
1571                 ret = amdgpu_dpm_set_powergating_by_smu(adev, AMD_IP_BLOCK_TYPE_VCE, !enable);
1572                 if (ret)
1573                         DRM_ERROR("Dpm %s vce failed, ret = %d. \n",
1574                                   enable ? "enable" : "disable", ret);
1575         }
1576 }
1577
1578 void amdgpu_pm_print_power_states(struct amdgpu_device *adev)
1579 {
1580         int i;
1581
1582         if (adev->powerplay.pp_funcs->print_power_state == NULL)
1583                 return;
1584
1585         for (i = 0; i < adev->pm.dpm.num_ps; i++)
1586                 amdgpu_dpm_print_power_state(adev, &adev->pm.dpm.ps[i]);
1587
1588 }
1589
1590 void amdgpu_dpm_enable_jpeg(struct amdgpu_device *adev, bool enable)
1591 {
1592         int ret = 0;
1593
1594         ret = amdgpu_dpm_set_powergating_by_smu(adev, AMD_IP_BLOCK_TYPE_JPEG, !enable);
1595         if (ret)
1596                 DRM_ERROR("Dpm %s jpeg failed, ret = %d. \n",
1597                           enable ? "enable" : "disable", ret);
1598 }
1599
1600 int amdgpu_pm_load_smu_firmware(struct amdgpu_device *adev, uint32_t *smu_version)
1601 {
1602         int r;
1603
1604         if (adev->powerplay.pp_funcs && adev->powerplay.pp_funcs->load_firmware) {
1605                 r = adev->powerplay.pp_funcs->load_firmware(adev->powerplay.pp_handle);
1606                 if (r) {
1607                         pr_err("smu firmware loading failed\n");
1608                         return r;
1609                 }
1610
1611                 if (smu_version)
1612                         *smu_version = adev->pm.fw_version;
1613         }
1614
1615         return 0;
1616 }
This page took 0.137714 seconds and 4 git commands to generate.