]> Git Repo - linux.git/blob - drivers/gpu/drm/amd/amdgpu/ci_dpm.c
Merge tag 'drm-vc4-next-2016-11-16' of https://github.com/anholt/linux into drm-next
[linux.git] / drivers / gpu / drm / amd / amdgpu / ci_dpm.c
1 /*
2  * Copyright 2013 Advanced Micro Devices, Inc.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20  * OTHER DEALINGS IN THE SOFTWARE.
21  *
22  */
23
24 #include <linux/firmware.h>
25 #include "drmP.h"
26 #include "amdgpu.h"
27 #include "amdgpu_pm.h"
28 #include "amdgpu_ucode.h"
29 #include "cikd.h"
30 #include "amdgpu_dpm.h"
31 #include "ci_dpm.h"
32 #include "gfx_v7_0.h"
33 #include "atom.h"
34 #include "amd_pcie.h"
35 #include <linux/seq_file.h>
36
37 #include "smu/smu_7_0_1_d.h"
38 #include "smu/smu_7_0_1_sh_mask.h"
39
40 #include "dce/dce_8_0_d.h"
41 #include "dce/dce_8_0_sh_mask.h"
42
43 #include "bif/bif_4_1_d.h"
44 #include "bif/bif_4_1_sh_mask.h"
45
46 #include "gca/gfx_7_2_d.h"
47 #include "gca/gfx_7_2_sh_mask.h"
48
49 #include "gmc/gmc_7_1_d.h"
50 #include "gmc/gmc_7_1_sh_mask.h"
51
52 MODULE_FIRMWARE("radeon/bonaire_smc.bin");
53 MODULE_FIRMWARE("radeon/bonaire_k_smc.bin");
54 MODULE_FIRMWARE("radeon/hawaii_smc.bin");
55 MODULE_FIRMWARE("radeon/hawaii_k_smc.bin");
56
57 #define MC_CG_ARB_FREQ_F0           0x0a
58 #define MC_CG_ARB_FREQ_F1           0x0b
59 #define MC_CG_ARB_FREQ_F2           0x0c
60 #define MC_CG_ARB_FREQ_F3           0x0d
61
62 #define SMC_RAM_END 0x40000
63
64 #define VOLTAGE_SCALE               4
65 #define VOLTAGE_VID_OFFSET_SCALE1    625
66 #define VOLTAGE_VID_OFFSET_SCALE2    100
67
68 static const struct ci_pt_defaults defaults_hawaii_xt =
69 {
70         1, 0xF, 0xFD, 0x19, 5, 0x14, 0, 0xB0000,
71         { 0x2E,  0x00,  0x00,  0x88,  0x00,  0x00,  0x72,  0x60,  0x51,  0xA7,  0x79,  0x6B,  0x90,  0xBD,  0x79  },
72         { 0x217, 0x217, 0x217, 0x242, 0x242, 0x242, 0x269, 0x269, 0x269, 0x2A1, 0x2A1, 0x2A1, 0x2C9, 0x2C9, 0x2C9 }
73 };
74
75 static const struct ci_pt_defaults defaults_hawaii_pro =
76 {
77         1, 0xF, 0xFD, 0x19, 5, 0x14, 0, 0x65062,
78         { 0x2E,  0x00,  0x00,  0x88,  0x00,  0x00,  0x72,  0x60,  0x51,  0xA7,  0x79,  0x6B,  0x90,  0xBD,  0x79  },
79         { 0x217, 0x217, 0x217, 0x242, 0x242, 0x242, 0x269, 0x269, 0x269, 0x2A1, 0x2A1, 0x2A1, 0x2C9, 0x2C9, 0x2C9 }
80 };
81
82 static const struct ci_pt_defaults defaults_bonaire_xt =
83 {
84         1, 0xF, 0xFD, 0x19, 5, 45, 0, 0xB0000,
85         { 0x79,  0x253, 0x25D, 0xAE,  0x72,  0x80,  0x83,  0x86,  0x6F,  0xC8,  0xC9,  0xC9,  0x2F,  0x4D,  0x61  },
86         { 0x17C, 0x172, 0x180, 0x1BC, 0x1B3, 0x1BD, 0x206, 0x200, 0x203, 0x25D, 0x25A, 0x255, 0x2C3, 0x2C5, 0x2B4 }
87 };
88
89 #if 0
90 static const struct ci_pt_defaults defaults_bonaire_pro =
91 {
92         1, 0xF, 0xFD, 0x19, 5, 45, 0, 0x65062,
93         { 0x8C,  0x23F, 0x244, 0xA6,  0x83,  0x85,  0x86,  0x86,  0x83,  0xDB,  0xDB,  0xDA,  0x67,  0x60,  0x5F  },
94         { 0x187, 0x193, 0x193, 0x1C7, 0x1D1, 0x1D1, 0x210, 0x219, 0x219, 0x266, 0x26C, 0x26C, 0x2C9, 0x2CB, 0x2CB }
95 };
96 #endif
97
98 static const struct ci_pt_defaults defaults_saturn_xt =
99 {
100         1, 0xF, 0xFD, 0x19, 5, 55, 0, 0x70000,
101         { 0x8C,  0x247, 0x249, 0xA6,  0x80,  0x81,  0x8B,  0x89,  0x86,  0xC9,  0xCA,  0xC9,  0x4D,  0x4D,  0x4D  },
102         { 0x187, 0x187, 0x187, 0x1C7, 0x1C7, 0x1C7, 0x210, 0x210, 0x210, 0x266, 0x266, 0x266, 0x2C9, 0x2C9, 0x2C9 }
103 };
104
105 #if 0
106 static const struct ci_pt_defaults defaults_saturn_pro =
107 {
108         1, 0xF, 0xFD, 0x19, 5, 55, 0, 0x30000,
109         { 0x96,  0x21D, 0x23B, 0xA1,  0x85,  0x87,  0x83,  0x84,  0x81,  0xE6,  0xE6,  0xE6,  0x71,  0x6A,  0x6A  },
110         { 0x193, 0x19E, 0x19E, 0x1D2, 0x1DC, 0x1DC, 0x21A, 0x223, 0x223, 0x26E, 0x27E, 0x274, 0x2CF, 0x2D2, 0x2D2 }
111 };
112 #endif
113
114 static const struct ci_pt_config_reg didt_config_ci[] =
115 {
116         { 0x10, 0x000000ff, 0, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
117         { 0x10, 0x0000ff00, 8, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
118         { 0x10, 0x00ff0000, 16, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
119         { 0x10, 0xff000000, 24, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
120         { 0x11, 0x000000ff, 0, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
121         { 0x11, 0x0000ff00, 8, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
122         { 0x11, 0x00ff0000, 16, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
123         { 0x11, 0xff000000, 24, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
124         { 0x12, 0x000000ff, 0, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
125         { 0x12, 0x0000ff00, 8, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
126         { 0x12, 0x00ff0000, 16, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
127         { 0x12, 0xff000000, 24, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
128         { 0x2, 0x00003fff, 0, 0x4, CISLANDS_CONFIGREG_DIDT_IND },
129         { 0x2, 0x03ff0000, 16, 0x80, CISLANDS_CONFIGREG_DIDT_IND },
130         { 0x2, 0x78000000, 27, 0x3, CISLANDS_CONFIGREG_DIDT_IND },
131         { 0x1, 0x0000ffff, 0, 0x3FFF, CISLANDS_CONFIGREG_DIDT_IND },
132         { 0x1, 0xffff0000, 16, 0x3FFF, CISLANDS_CONFIGREG_DIDT_IND },
133         { 0x0, 0x00000001, 0, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
134         { 0x30, 0x000000ff, 0, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
135         { 0x30, 0x0000ff00, 8, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
136         { 0x30, 0x00ff0000, 16, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
137         { 0x30, 0xff000000, 24, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
138         { 0x31, 0x000000ff, 0, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
139         { 0x31, 0x0000ff00, 8, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
140         { 0x31, 0x00ff0000, 16, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
141         { 0x31, 0xff000000, 24, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
142         { 0x32, 0x000000ff, 0, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
143         { 0x32, 0x0000ff00, 8, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
144         { 0x32, 0x00ff0000, 16, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
145         { 0x32, 0xff000000, 24, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
146         { 0x22, 0x00003fff, 0, 0x4, CISLANDS_CONFIGREG_DIDT_IND },
147         { 0x22, 0x03ff0000, 16, 0x80, CISLANDS_CONFIGREG_DIDT_IND },
148         { 0x22, 0x78000000, 27, 0x3, CISLANDS_CONFIGREG_DIDT_IND },
149         { 0x21, 0x0000ffff, 0, 0x3FFF, CISLANDS_CONFIGREG_DIDT_IND },
150         { 0x21, 0xffff0000, 16, 0x3FFF, CISLANDS_CONFIGREG_DIDT_IND },
151         { 0x20, 0x00000001, 0, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
152         { 0x50, 0x000000ff, 0, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
153         { 0x50, 0x0000ff00, 8, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
154         { 0x50, 0x00ff0000, 16, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
155         { 0x50, 0xff000000, 24, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
156         { 0x51, 0x000000ff, 0, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
157         { 0x51, 0x0000ff00, 8, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
158         { 0x51, 0x00ff0000, 16, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
159         { 0x51, 0xff000000, 24, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
160         { 0x52, 0x000000ff, 0, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
161         { 0x52, 0x0000ff00, 8, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
162         { 0x52, 0x00ff0000, 16, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
163         { 0x52, 0xff000000, 24, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
164         { 0x42, 0x00003fff, 0, 0x4, CISLANDS_CONFIGREG_DIDT_IND },
165         { 0x42, 0x03ff0000, 16, 0x80, CISLANDS_CONFIGREG_DIDT_IND },
166         { 0x42, 0x78000000, 27, 0x3, CISLANDS_CONFIGREG_DIDT_IND },
167         { 0x41, 0x0000ffff, 0, 0x3FFF, CISLANDS_CONFIGREG_DIDT_IND },
168         { 0x41, 0xffff0000, 16, 0x3FFF, CISLANDS_CONFIGREG_DIDT_IND },
169         { 0x40, 0x00000001, 0, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
170         { 0x70, 0x000000ff, 0, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
171         { 0x70, 0x0000ff00, 8, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
172         { 0x70, 0x00ff0000, 16, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
173         { 0x70, 0xff000000, 24, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
174         { 0x71, 0x000000ff, 0, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
175         { 0x71, 0x0000ff00, 8, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
176         { 0x71, 0x00ff0000, 16, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
177         { 0x71, 0xff000000, 24, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
178         { 0x72, 0x000000ff, 0, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
179         { 0x72, 0x0000ff00, 8, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
180         { 0x72, 0x00ff0000, 16, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
181         { 0x72, 0xff000000, 24, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
182         { 0x62, 0x00003fff, 0, 0x4, CISLANDS_CONFIGREG_DIDT_IND },
183         { 0x62, 0x03ff0000, 16, 0x80, CISLANDS_CONFIGREG_DIDT_IND },
184         { 0x62, 0x78000000, 27, 0x3, CISLANDS_CONFIGREG_DIDT_IND },
185         { 0x61, 0x0000ffff, 0, 0x3FFF, CISLANDS_CONFIGREG_DIDT_IND },
186         { 0x61, 0xffff0000, 16, 0x3FFF, CISLANDS_CONFIGREG_DIDT_IND },
187         { 0x60, 0x00000001, 0, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
188         { 0xFFFFFFFF }
189 };
190
191 static u8 ci_get_memory_module_index(struct amdgpu_device *adev)
192 {
193         return (u8) ((RREG32(mmBIOS_SCRATCH_4) >> 16) & 0xff);
194 }
195
196 #define MC_CG_ARB_FREQ_F0           0x0a
197 #define MC_CG_ARB_FREQ_F1           0x0b
198 #define MC_CG_ARB_FREQ_F2           0x0c
199 #define MC_CG_ARB_FREQ_F3           0x0d
200
201 static int ci_copy_and_switch_arb_sets(struct amdgpu_device *adev,
202                                        u32 arb_freq_src, u32 arb_freq_dest)
203 {
204         u32 mc_arb_dram_timing;
205         u32 mc_arb_dram_timing2;
206         u32 burst_time;
207         u32 mc_cg_config;
208
209         switch (arb_freq_src) {
210         case MC_CG_ARB_FREQ_F0:
211                 mc_arb_dram_timing  = RREG32(mmMC_ARB_DRAM_TIMING);
212                 mc_arb_dram_timing2 = RREG32(mmMC_ARB_DRAM_TIMING2);
213                 burst_time = (RREG32(mmMC_ARB_BURST_TIME) & MC_ARB_BURST_TIME__STATE0_MASK) >>
214                          MC_ARB_BURST_TIME__STATE0__SHIFT;
215                 break;
216         case MC_CG_ARB_FREQ_F1:
217                 mc_arb_dram_timing  = RREG32(mmMC_ARB_DRAM_TIMING_1);
218                 mc_arb_dram_timing2 = RREG32(mmMC_ARB_DRAM_TIMING2_1);
219                 burst_time = (RREG32(mmMC_ARB_BURST_TIME) & MC_ARB_BURST_TIME__STATE1_MASK) >>
220                          MC_ARB_BURST_TIME__STATE1__SHIFT;
221                 break;
222         default:
223                 return -EINVAL;
224         }
225
226         switch (arb_freq_dest) {
227         case MC_CG_ARB_FREQ_F0:
228                 WREG32(mmMC_ARB_DRAM_TIMING, mc_arb_dram_timing);
229                 WREG32(mmMC_ARB_DRAM_TIMING2, mc_arb_dram_timing2);
230                 WREG32_P(mmMC_ARB_BURST_TIME, (burst_time << MC_ARB_BURST_TIME__STATE0__SHIFT),
231                         ~MC_ARB_BURST_TIME__STATE0_MASK);
232                 break;
233         case MC_CG_ARB_FREQ_F1:
234                 WREG32(mmMC_ARB_DRAM_TIMING_1, mc_arb_dram_timing);
235                 WREG32(mmMC_ARB_DRAM_TIMING2_1, mc_arb_dram_timing2);
236                 WREG32_P(mmMC_ARB_BURST_TIME, (burst_time << MC_ARB_BURST_TIME__STATE1__SHIFT),
237                         ~MC_ARB_BURST_TIME__STATE1_MASK);
238                 break;
239         default:
240                 return -EINVAL;
241         }
242
243         mc_cg_config = RREG32(mmMC_CG_CONFIG) | 0x0000000F;
244         WREG32(mmMC_CG_CONFIG, mc_cg_config);
245         WREG32_P(mmMC_ARB_CG, (arb_freq_dest) << MC_ARB_CG__CG_ARB_REQ__SHIFT,
246                 ~MC_ARB_CG__CG_ARB_REQ_MASK);
247
248         return 0;
249 }
250
251 static u8 ci_get_ddr3_mclk_frequency_ratio(u32 memory_clock)
252 {
253         u8 mc_para_index;
254
255         if (memory_clock < 10000)
256                 mc_para_index = 0;
257         else if (memory_clock >= 80000)
258                 mc_para_index = 0x0f;
259         else
260                 mc_para_index = (u8)((memory_clock - 10000) / 5000 + 1);
261         return mc_para_index;
262 }
263
264 static u8 ci_get_mclk_frequency_ratio(u32 memory_clock, bool strobe_mode)
265 {
266         u8 mc_para_index;
267
268         if (strobe_mode) {
269                 if (memory_clock < 12500)
270                         mc_para_index = 0x00;
271                 else if (memory_clock > 47500)
272                         mc_para_index = 0x0f;
273                 else
274                         mc_para_index = (u8)((memory_clock - 10000) / 2500);
275         } else {
276                 if (memory_clock < 65000)
277                         mc_para_index = 0x00;
278                 else if (memory_clock > 135000)
279                         mc_para_index = 0x0f;
280                 else
281                         mc_para_index = (u8)((memory_clock - 60000) / 5000);
282         }
283         return mc_para_index;
284 }
285
286 static void ci_trim_voltage_table_to_fit_state_table(struct amdgpu_device *adev,
287                                                      u32 max_voltage_steps,
288                                                      struct atom_voltage_table *voltage_table)
289 {
290         unsigned int i, diff;
291
292         if (voltage_table->count <= max_voltage_steps)
293                 return;
294
295         diff = voltage_table->count - max_voltage_steps;
296
297         for (i = 0; i < max_voltage_steps; i++)
298                 voltage_table->entries[i] = voltage_table->entries[i + diff];
299
300         voltage_table->count = max_voltage_steps;
301 }
302
303 static int ci_get_std_voltage_value_sidd(struct amdgpu_device *adev,
304                                          struct atom_voltage_table_entry *voltage_table,
305                                          u16 *std_voltage_hi_sidd, u16 *std_voltage_lo_sidd);
306 static int ci_set_power_limit(struct amdgpu_device *adev, u32 n);
307 static int ci_set_overdrive_target_tdp(struct amdgpu_device *adev,
308                                        u32 target_tdp);
309 static int ci_update_uvd_dpm(struct amdgpu_device *adev, bool gate);
310 static void ci_dpm_set_dpm_funcs(struct amdgpu_device *adev);
311 static void ci_dpm_set_irq_funcs(struct amdgpu_device *adev);
312
313 static PPSMC_Result amdgpu_ci_send_msg_to_smc_with_parameter(struct amdgpu_device *adev,
314                                                              PPSMC_Msg msg, u32 parameter);
315 static void ci_thermal_start_smc_fan_control(struct amdgpu_device *adev);
316 static void ci_fan_ctrl_set_default_mode(struct amdgpu_device *adev);
317
318 static struct ci_power_info *ci_get_pi(struct amdgpu_device *adev)
319 {
320         struct ci_power_info *pi = adev->pm.dpm.priv;
321
322         return pi;
323 }
324
325 static struct ci_ps *ci_get_ps(struct amdgpu_ps *rps)
326 {
327         struct ci_ps *ps = rps->ps_priv;
328
329         return ps;
330 }
331
332 static void ci_initialize_powertune_defaults(struct amdgpu_device *adev)
333 {
334         struct ci_power_info *pi = ci_get_pi(adev);
335
336         switch (adev->pdev->device) {
337         case 0x6649:
338         case 0x6650:
339         case 0x6651:
340         case 0x6658:
341         case 0x665C:
342         case 0x665D:
343         default:
344                 pi->powertune_defaults = &defaults_bonaire_xt;
345                 break;
346         case 0x6640:
347         case 0x6641:
348         case 0x6646:
349         case 0x6647:
350                 pi->powertune_defaults = &defaults_saturn_xt;
351                 break;
352         case 0x67B8:
353         case 0x67B0:
354                 pi->powertune_defaults = &defaults_hawaii_xt;
355                 break;
356         case 0x67BA:
357         case 0x67B1:
358                 pi->powertune_defaults = &defaults_hawaii_pro;
359                 break;
360         case 0x67A0:
361         case 0x67A1:
362         case 0x67A2:
363         case 0x67A8:
364         case 0x67A9:
365         case 0x67AA:
366         case 0x67B9:
367         case 0x67BE:
368                 pi->powertune_defaults = &defaults_bonaire_xt;
369                 break;
370         }
371
372         pi->dte_tj_offset = 0;
373
374         pi->caps_power_containment = true;
375         pi->caps_cac = false;
376         pi->caps_sq_ramping = false;
377         pi->caps_db_ramping = false;
378         pi->caps_td_ramping = false;
379         pi->caps_tcp_ramping = false;
380
381         if (pi->caps_power_containment) {
382                 pi->caps_cac = true;
383                 if (adev->asic_type == CHIP_HAWAII)
384                         pi->enable_bapm_feature = false;
385                 else
386                         pi->enable_bapm_feature = true;
387                 pi->enable_tdc_limit_feature = true;
388                 pi->enable_pkg_pwr_tracking_feature = true;
389         }
390 }
391
392 static u8 ci_convert_to_vid(u16 vddc)
393 {
394         return (6200 - (vddc * VOLTAGE_SCALE)) / 25;
395 }
396
397 static int ci_populate_bapm_vddc_vid_sidd(struct amdgpu_device *adev)
398 {
399         struct ci_power_info *pi = ci_get_pi(adev);
400         u8 *hi_vid = pi->smc_powertune_table.BapmVddCVidHiSidd;
401         u8 *lo_vid = pi->smc_powertune_table.BapmVddCVidLoSidd;
402         u8 *hi2_vid = pi->smc_powertune_table.BapmVddCVidHiSidd2;
403         u32 i;
404
405         if (adev->pm.dpm.dyn_state.cac_leakage_table.entries == NULL)
406                 return -EINVAL;
407         if (adev->pm.dpm.dyn_state.cac_leakage_table.count > 8)
408                 return -EINVAL;
409         if (adev->pm.dpm.dyn_state.cac_leakage_table.count !=
410             adev->pm.dpm.dyn_state.vddc_dependency_on_sclk.count)
411                 return -EINVAL;
412
413         for (i = 0; i < adev->pm.dpm.dyn_state.cac_leakage_table.count; i++) {
414                 if (adev->pm.dpm.platform_caps & ATOM_PP_PLATFORM_CAP_EVV) {
415                         lo_vid[i] = ci_convert_to_vid(adev->pm.dpm.dyn_state.cac_leakage_table.entries[i].vddc1);
416                         hi_vid[i] = ci_convert_to_vid(adev->pm.dpm.dyn_state.cac_leakage_table.entries[i].vddc2);
417                         hi2_vid[i] = ci_convert_to_vid(adev->pm.dpm.dyn_state.cac_leakage_table.entries[i].vddc3);
418                 } else {
419                         lo_vid[i] = ci_convert_to_vid(adev->pm.dpm.dyn_state.cac_leakage_table.entries[i].vddc);
420                         hi_vid[i] = ci_convert_to_vid((u16)adev->pm.dpm.dyn_state.cac_leakage_table.entries[i].leakage);
421                 }
422         }
423         return 0;
424 }
425
426 static int ci_populate_vddc_vid(struct amdgpu_device *adev)
427 {
428         struct ci_power_info *pi = ci_get_pi(adev);
429         u8 *vid = pi->smc_powertune_table.VddCVid;
430         u32 i;
431
432         if (pi->vddc_voltage_table.count > 8)
433                 return -EINVAL;
434
435         for (i = 0; i < pi->vddc_voltage_table.count; i++)
436                 vid[i] = ci_convert_to_vid(pi->vddc_voltage_table.entries[i].value);
437
438         return 0;
439 }
440
441 static int ci_populate_svi_load_line(struct amdgpu_device *adev)
442 {
443         struct ci_power_info *pi = ci_get_pi(adev);
444         const struct ci_pt_defaults *pt_defaults = pi->powertune_defaults;
445
446         pi->smc_powertune_table.SviLoadLineEn = pt_defaults->svi_load_line_en;
447         pi->smc_powertune_table.SviLoadLineVddC = pt_defaults->svi_load_line_vddc;
448         pi->smc_powertune_table.SviLoadLineTrimVddC = 3;
449         pi->smc_powertune_table.SviLoadLineOffsetVddC = 0;
450
451         return 0;
452 }
453
454 static int ci_populate_tdc_limit(struct amdgpu_device *adev)
455 {
456         struct ci_power_info *pi = ci_get_pi(adev);
457         const struct ci_pt_defaults *pt_defaults = pi->powertune_defaults;
458         u16 tdc_limit;
459
460         tdc_limit = adev->pm.dpm.dyn_state.cac_tdp_table->tdc * 256;
461         pi->smc_powertune_table.TDC_VDDC_PkgLimit = cpu_to_be16(tdc_limit);
462         pi->smc_powertune_table.TDC_VDDC_ThrottleReleaseLimitPerc =
463                 pt_defaults->tdc_vddc_throttle_release_limit_perc;
464         pi->smc_powertune_table.TDC_MAWt = pt_defaults->tdc_mawt;
465
466         return 0;
467 }
468
469 static int ci_populate_dw8(struct amdgpu_device *adev)
470 {
471         struct ci_power_info *pi = ci_get_pi(adev);
472         const struct ci_pt_defaults *pt_defaults = pi->powertune_defaults;
473         int ret;
474
475         ret = amdgpu_ci_read_smc_sram_dword(adev,
476                                      SMU7_FIRMWARE_HEADER_LOCATION +
477                                      offsetof(SMU7_Firmware_Header, PmFuseTable) +
478                                      offsetof(SMU7_Discrete_PmFuses, TdcWaterfallCtl),
479                                      (u32 *)&pi->smc_powertune_table.TdcWaterfallCtl,
480                                      pi->sram_end);
481         if (ret)
482                 return -EINVAL;
483         else
484                 pi->smc_powertune_table.TdcWaterfallCtl = pt_defaults->tdc_waterfall_ctl;
485
486         return 0;
487 }
488
489 static int ci_populate_fuzzy_fan(struct amdgpu_device *adev)
490 {
491         struct ci_power_info *pi = ci_get_pi(adev);
492
493         if ((adev->pm.dpm.fan.fan_output_sensitivity & (1 << 15)) ||
494             (adev->pm.dpm.fan.fan_output_sensitivity == 0))
495                 adev->pm.dpm.fan.fan_output_sensitivity =
496                         adev->pm.dpm.fan.default_fan_output_sensitivity;
497
498         pi->smc_powertune_table.FuzzyFan_PwmSetDelta =
499                 cpu_to_be16(adev->pm.dpm.fan.fan_output_sensitivity);
500
501         return 0;
502 }
503
504 static int ci_min_max_v_gnbl_pm_lid_from_bapm_vddc(struct amdgpu_device *adev)
505 {
506         struct ci_power_info *pi = ci_get_pi(adev);
507         u8 *hi_vid = pi->smc_powertune_table.BapmVddCVidHiSidd;
508         u8 *lo_vid = pi->smc_powertune_table.BapmVddCVidLoSidd;
509         int i, min, max;
510
511         min = max = hi_vid[0];
512         for (i = 0; i < 8; i++) {
513                 if (0 != hi_vid[i]) {
514                         if (min > hi_vid[i])
515                                 min = hi_vid[i];
516                         if (max < hi_vid[i])
517                                 max = hi_vid[i];
518                 }
519
520                 if (0 != lo_vid[i]) {
521                         if (min > lo_vid[i])
522                                 min = lo_vid[i];
523                         if (max < lo_vid[i])
524                                 max = lo_vid[i];
525                 }
526         }
527
528         if ((min == 0) || (max == 0))
529                 return -EINVAL;
530         pi->smc_powertune_table.GnbLPMLMaxVid = (u8)max;
531         pi->smc_powertune_table.GnbLPMLMinVid = (u8)min;
532
533         return 0;
534 }
535
536 static int ci_populate_bapm_vddc_base_leakage_sidd(struct amdgpu_device *adev)
537 {
538         struct ci_power_info *pi = ci_get_pi(adev);
539         u16 hi_sidd = pi->smc_powertune_table.BapmVddCBaseLeakageHiSidd;
540         u16 lo_sidd = pi->smc_powertune_table.BapmVddCBaseLeakageLoSidd;
541         struct amdgpu_cac_tdp_table *cac_tdp_table =
542                 adev->pm.dpm.dyn_state.cac_tdp_table;
543
544         hi_sidd = cac_tdp_table->high_cac_leakage / 100 * 256;
545         lo_sidd = cac_tdp_table->low_cac_leakage / 100 * 256;
546
547         pi->smc_powertune_table.BapmVddCBaseLeakageHiSidd = cpu_to_be16(hi_sidd);
548         pi->smc_powertune_table.BapmVddCBaseLeakageLoSidd = cpu_to_be16(lo_sidd);
549
550         return 0;
551 }
552
553 static int ci_populate_bapm_parameters_in_dpm_table(struct amdgpu_device *adev)
554 {
555         struct ci_power_info *pi = ci_get_pi(adev);
556         const struct ci_pt_defaults *pt_defaults = pi->powertune_defaults;
557         SMU7_Discrete_DpmTable  *dpm_table = &pi->smc_state_table;
558         struct amdgpu_cac_tdp_table *cac_tdp_table =
559                 adev->pm.dpm.dyn_state.cac_tdp_table;
560         struct amdgpu_ppm_table *ppm = adev->pm.dpm.dyn_state.ppm_table;
561         int i, j, k;
562         const u16 *def1;
563         const u16 *def2;
564
565         dpm_table->DefaultTdp = cac_tdp_table->tdp * 256;
566         dpm_table->TargetTdp = cac_tdp_table->configurable_tdp * 256;
567
568         dpm_table->DTETjOffset = (u8)pi->dte_tj_offset;
569         dpm_table->GpuTjMax =
570                 (u8)(pi->thermal_temp_setting.temperature_high / 1000);
571         dpm_table->GpuTjHyst = 8;
572
573         dpm_table->DTEAmbientTempBase = pt_defaults->dte_ambient_temp_base;
574
575         if (ppm) {
576                 dpm_table->PPM_PkgPwrLimit = cpu_to_be16((u16)ppm->dgpu_tdp * 256 / 1000);
577                 dpm_table->PPM_TemperatureLimit = cpu_to_be16((u16)ppm->tj_max * 256);
578         } else {
579                 dpm_table->PPM_PkgPwrLimit = cpu_to_be16(0);
580                 dpm_table->PPM_TemperatureLimit = cpu_to_be16(0);
581         }
582
583         dpm_table->BAPM_TEMP_GRADIENT = cpu_to_be32(pt_defaults->bapm_temp_gradient);
584         def1 = pt_defaults->bapmti_r;
585         def2 = pt_defaults->bapmti_rc;
586
587         for (i = 0; i < SMU7_DTE_ITERATIONS; i++) {
588                 for (j = 0; j < SMU7_DTE_SOURCES; j++) {
589                         for (k = 0; k < SMU7_DTE_SINKS; k++) {
590                                 dpm_table->BAPMTI_R[i][j][k] = cpu_to_be16(*def1);
591                                 dpm_table->BAPMTI_RC[i][j][k] = cpu_to_be16(*def2);
592                                 def1++;
593                                 def2++;
594                         }
595                 }
596         }
597
598         return 0;
599 }
600
601 static int ci_populate_pm_base(struct amdgpu_device *adev)
602 {
603         struct ci_power_info *pi = ci_get_pi(adev);
604         u32 pm_fuse_table_offset;
605         int ret;
606
607         if (pi->caps_power_containment) {
608                 ret = amdgpu_ci_read_smc_sram_dword(adev,
609                                              SMU7_FIRMWARE_HEADER_LOCATION +
610                                              offsetof(SMU7_Firmware_Header, PmFuseTable),
611                                              &pm_fuse_table_offset, pi->sram_end);
612                 if (ret)
613                         return ret;
614                 ret = ci_populate_bapm_vddc_vid_sidd(adev);
615                 if (ret)
616                         return ret;
617                 ret = ci_populate_vddc_vid(adev);
618                 if (ret)
619                         return ret;
620                 ret = ci_populate_svi_load_line(adev);
621                 if (ret)
622                         return ret;
623                 ret = ci_populate_tdc_limit(adev);
624                 if (ret)
625                         return ret;
626                 ret = ci_populate_dw8(adev);
627                 if (ret)
628                         return ret;
629                 ret = ci_populate_fuzzy_fan(adev);
630                 if (ret)
631                         return ret;
632                 ret = ci_min_max_v_gnbl_pm_lid_from_bapm_vddc(adev);
633                 if (ret)
634                         return ret;
635                 ret = ci_populate_bapm_vddc_base_leakage_sidd(adev);
636                 if (ret)
637                         return ret;
638                 ret = amdgpu_ci_copy_bytes_to_smc(adev, pm_fuse_table_offset,
639                                            (u8 *)&pi->smc_powertune_table,
640                                            sizeof(SMU7_Discrete_PmFuses), pi->sram_end);
641                 if (ret)
642                         return ret;
643         }
644
645         return 0;
646 }
647
648 static void ci_do_enable_didt(struct amdgpu_device *adev, const bool enable)
649 {
650         struct ci_power_info *pi = ci_get_pi(adev);
651         u32 data;
652
653         if (pi->caps_sq_ramping) {
654                 data = RREG32_DIDT(ixDIDT_SQ_CTRL0);
655                 if (enable)
656                         data |= DIDT_SQ_CTRL0__DIDT_CTRL_EN_MASK;
657                 else
658                         data &= ~DIDT_SQ_CTRL0__DIDT_CTRL_EN_MASK;
659                 WREG32_DIDT(ixDIDT_SQ_CTRL0, data);
660         }
661
662         if (pi->caps_db_ramping) {
663                 data = RREG32_DIDT(ixDIDT_DB_CTRL0);
664                 if (enable)
665                         data |= DIDT_DB_CTRL0__DIDT_CTRL_EN_MASK;
666                 else
667                         data &= ~DIDT_DB_CTRL0__DIDT_CTRL_EN_MASK;
668                 WREG32_DIDT(ixDIDT_DB_CTRL0, data);
669         }
670
671         if (pi->caps_td_ramping) {
672                 data = RREG32_DIDT(ixDIDT_TD_CTRL0);
673                 if (enable)
674                         data |= DIDT_TD_CTRL0__DIDT_CTRL_EN_MASK;
675                 else
676                         data &= ~DIDT_TD_CTRL0__DIDT_CTRL_EN_MASK;
677                 WREG32_DIDT(ixDIDT_TD_CTRL0, data);
678         }
679
680         if (pi->caps_tcp_ramping) {
681                 data = RREG32_DIDT(ixDIDT_TCP_CTRL0);
682                 if (enable)
683                         data |= DIDT_TCP_CTRL0__DIDT_CTRL_EN_MASK;
684                 else
685                         data &= ~DIDT_TCP_CTRL0__DIDT_CTRL_EN_MASK;
686                 WREG32_DIDT(ixDIDT_TCP_CTRL0, data);
687         }
688 }
689
690 static int ci_program_pt_config_registers(struct amdgpu_device *adev,
691                                           const struct ci_pt_config_reg *cac_config_regs)
692 {
693         const struct ci_pt_config_reg *config_regs = cac_config_regs;
694         u32 data;
695         u32 cache = 0;
696
697         if (config_regs == NULL)
698                 return -EINVAL;
699
700         while (config_regs->offset != 0xFFFFFFFF) {
701                 if (config_regs->type == CISLANDS_CONFIGREG_CACHE) {
702                         cache |= ((config_regs->value << config_regs->shift) & config_regs->mask);
703                 } else {
704                         switch (config_regs->type) {
705                         case CISLANDS_CONFIGREG_SMC_IND:
706                                 data = RREG32_SMC(config_regs->offset);
707                                 break;
708                         case CISLANDS_CONFIGREG_DIDT_IND:
709                                 data = RREG32_DIDT(config_regs->offset);
710                                 break;
711                         default:
712                                 data = RREG32(config_regs->offset);
713                                 break;
714                         }
715
716                         data &= ~config_regs->mask;
717                         data |= ((config_regs->value << config_regs->shift) & config_regs->mask);
718                         data |= cache;
719
720                         switch (config_regs->type) {
721                         case CISLANDS_CONFIGREG_SMC_IND:
722                                 WREG32_SMC(config_regs->offset, data);
723                                 break;
724                         case CISLANDS_CONFIGREG_DIDT_IND:
725                                 WREG32_DIDT(config_regs->offset, data);
726                                 break;
727                         default:
728                                 WREG32(config_regs->offset, data);
729                                 break;
730                         }
731                         cache = 0;
732                 }
733                 config_regs++;
734         }
735         return 0;
736 }
737
738 static int ci_enable_didt(struct amdgpu_device *adev, bool enable)
739 {
740         struct ci_power_info *pi = ci_get_pi(adev);
741         int ret;
742
743         if (pi->caps_sq_ramping || pi->caps_db_ramping ||
744             pi->caps_td_ramping || pi->caps_tcp_ramping) {
745                 adev->gfx.rlc.funcs->enter_safe_mode(adev);
746
747                 if (enable) {
748                         ret = ci_program_pt_config_registers(adev, didt_config_ci);
749                         if (ret) {
750                                 adev->gfx.rlc.funcs->exit_safe_mode(adev);
751                                 return ret;
752                         }
753                 }
754
755                 ci_do_enable_didt(adev, enable);
756
757                 adev->gfx.rlc.funcs->exit_safe_mode(adev);
758         }
759
760         return 0;
761 }
762
763 static int ci_enable_power_containment(struct amdgpu_device *adev, bool enable)
764 {
765         struct ci_power_info *pi = ci_get_pi(adev);
766         PPSMC_Result smc_result;
767         int ret = 0;
768
769         if (enable) {
770                 pi->power_containment_features = 0;
771                 if (pi->caps_power_containment) {
772                         if (pi->enable_bapm_feature) {
773                                 smc_result = amdgpu_ci_send_msg_to_smc(adev, PPSMC_MSG_EnableDTE);
774                                 if (smc_result != PPSMC_Result_OK)
775                                         ret = -EINVAL;
776                                 else
777                                         pi->power_containment_features |= POWERCONTAINMENT_FEATURE_BAPM;
778                         }
779
780                         if (pi->enable_tdc_limit_feature) {
781                                 smc_result = amdgpu_ci_send_msg_to_smc(adev, PPSMC_MSG_TDCLimitEnable);
782                                 if (smc_result != PPSMC_Result_OK)
783                                         ret = -EINVAL;
784                                 else
785                                         pi->power_containment_features |= POWERCONTAINMENT_FEATURE_TDCLimit;
786                         }
787
788                         if (pi->enable_pkg_pwr_tracking_feature) {
789                                 smc_result = amdgpu_ci_send_msg_to_smc(adev, PPSMC_MSG_PkgPwrLimitEnable);
790                                 if (smc_result != PPSMC_Result_OK) {
791                                         ret = -EINVAL;
792                                 } else {
793                                         struct amdgpu_cac_tdp_table *cac_tdp_table =
794                                                 adev->pm.dpm.dyn_state.cac_tdp_table;
795                                         u32 default_pwr_limit =
796                                                 (u32)(cac_tdp_table->maximum_power_delivery_limit * 256);
797
798                                         pi->power_containment_features |= POWERCONTAINMENT_FEATURE_PkgPwrLimit;
799
800                                         ci_set_power_limit(adev, default_pwr_limit);
801                                 }
802                         }
803                 }
804         } else {
805                 if (pi->caps_power_containment && pi->power_containment_features) {
806                         if (pi->power_containment_features & POWERCONTAINMENT_FEATURE_TDCLimit)
807                                 amdgpu_ci_send_msg_to_smc(adev, PPSMC_MSG_TDCLimitDisable);
808
809                         if (pi->power_containment_features & POWERCONTAINMENT_FEATURE_BAPM)
810                                 amdgpu_ci_send_msg_to_smc(adev, PPSMC_MSG_DisableDTE);
811
812                         if (pi->power_containment_features & POWERCONTAINMENT_FEATURE_PkgPwrLimit)
813                                 amdgpu_ci_send_msg_to_smc(adev, PPSMC_MSG_PkgPwrLimitDisable);
814                         pi->power_containment_features = 0;
815                 }
816         }
817
818         return ret;
819 }
820
821 static int ci_enable_smc_cac(struct amdgpu_device *adev, bool enable)
822 {
823         struct ci_power_info *pi = ci_get_pi(adev);
824         PPSMC_Result smc_result;
825         int ret = 0;
826
827         if (pi->caps_cac) {
828                 if (enable) {
829                         smc_result = amdgpu_ci_send_msg_to_smc(adev, PPSMC_MSG_EnableCac);
830                         if (smc_result != PPSMC_Result_OK) {
831                                 ret = -EINVAL;
832                                 pi->cac_enabled = false;
833                         } else {
834                                 pi->cac_enabled = true;
835                         }
836                 } else if (pi->cac_enabled) {
837                         amdgpu_ci_send_msg_to_smc(adev, PPSMC_MSG_DisableCac);
838                         pi->cac_enabled = false;
839                 }
840         }
841
842         return ret;
843 }
844
845 static int ci_enable_thermal_based_sclk_dpm(struct amdgpu_device *adev,
846                                             bool enable)
847 {
848         struct ci_power_info *pi = ci_get_pi(adev);
849         PPSMC_Result smc_result = PPSMC_Result_OK;
850
851         if (pi->thermal_sclk_dpm_enabled) {
852                 if (enable)
853                         smc_result = amdgpu_ci_send_msg_to_smc(adev, PPSMC_MSG_ENABLE_THERMAL_DPM);
854                 else
855                         smc_result = amdgpu_ci_send_msg_to_smc(adev, PPSMC_MSG_DISABLE_THERMAL_DPM);
856         }
857
858         if (smc_result == PPSMC_Result_OK)
859                 return 0;
860         else
861                 return -EINVAL;
862 }
863
864 static int ci_power_control_set_level(struct amdgpu_device *adev)
865 {
866         struct ci_power_info *pi = ci_get_pi(adev);
867         struct amdgpu_cac_tdp_table *cac_tdp_table =
868                 adev->pm.dpm.dyn_state.cac_tdp_table;
869         s32 adjust_percent;
870         s32 target_tdp;
871         int ret = 0;
872         bool adjust_polarity = false; /* ??? */
873
874         if (pi->caps_power_containment) {
875                 adjust_percent = adjust_polarity ?
876                         adev->pm.dpm.tdp_adjustment : (-1 * adev->pm.dpm.tdp_adjustment);
877                 target_tdp = ((100 + adjust_percent) *
878                               (s32)cac_tdp_table->configurable_tdp) / 100;
879
880                 ret = ci_set_overdrive_target_tdp(adev, (u32)target_tdp);
881         }
882
883         return ret;
884 }
885
886 static void ci_dpm_powergate_uvd(struct amdgpu_device *adev, bool gate)
887 {
888         struct ci_power_info *pi = ci_get_pi(adev);
889
890         pi->uvd_power_gated = gate;
891
892         ci_update_uvd_dpm(adev, gate);
893 }
894
895 static bool ci_dpm_vblank_too_short(struct amdgpu_device *adev)
896 {
897         u32 vblank_time = amdgpu_dpm_get_vblank_time(adev);
898         u32 switch_limit = adev->mc.vram_type == AMDGPU_VRAM_TYPE_GDDR5 ? 450 : 300;
899
900         if (vblank_time < switch_limit)
901                 return true;
902         else
903                 return false;
904
905 }
906
907 static void ci_apply_state_adjust_rules(struct amdgpu_device *adev,
908                                         struct amdgpu_ps *rps)
909 {
910         struct ci_ps *ps = ci_get_ps(rps);
911         struct ci_power_info *pi = ci_get_pi(adev);
912         struct amdgpu_clock_and_voltage_limits *max_limits;
913         bool disable_mclk_switching;
914         u32 sclk, mclk;
915         int i;
916
917         if (rps->vce_active) {
918                 rps->evclk = adev->pm.dpm.vce_states[adev->pm.dpm.vce_level].evclk;
919                 rps->ecclk = adev->pm.dpm.vce_states[adev->pm.dpm.vce_level].ecclk;
920         } else {
921                 rps->evclk = 0;
922                 rps->ecclk = 0;
923         }
924
925         if ((adev->pm.dpm.new_active_crtc_count > 1) ||
926             ci_dpm_vblank_too_short(adev))
927                 disable_mclk_switching = true;
928         else
929                 disable_mclk_switching = false;
930
931         if ((rps->class & ATOM_PPLIB_CLASSIFICATION_UI_MASK) == ATOM_PPLIB_CLASSIFICATION_UI_BATTERY)
932                 pi->battery_state = true;
933         else
934                 pi->battery_state = false;
935
936         if (adev->pm.dpm.ac_power)
937                 max_limits = &adev->pm.dpm.dyn_state.max_clock_voltage_on_ac;
938         else
939                 max_limits = &adev->pm.dpm.dyn_state.max_clock_voltage_on_dc;
940
941         if (adev->pm.dpm.ac_power == false) {
942                 for (i = 0; i < ps->performance_level_count; i++) {
943                         if (ps->performance_levels[i].mclk > max_limits->mclk)
944                                 ps->performance_levels[i].mclk = max_limits->mclk;
945                         if (ps->performance_levels[i].sclk > max_limits->sclk)
946                                 ps->performance_levels[i].sclk = max_limits->sclk;
947                 }
948         }
949
950         /* XXX validate the min clocks required for display */
951
952         if (disable_mclk_switching) {
953                 mclk  = ps->performance_levels[ps->performance_level_count - 1].mclk;
954                 sclk = ps->performance_levels[0].sclk;
955         } else {
956                 mclk = ps->performance_levels[0].mclk;
957                 sclk = ps->performance_levels[0].sclk;
958         }
959
960         if (adev->pm.pm_display_cfg.min_core_set_clock > sclk)
961                 sclk = adev->pm.pm_display_cfg.min_core_set_clock;
962
963         if (adev->pm.pm_display_cfg.min_mem_set_clock > mclk)
964                 mclk = adev->pm.pm_display_cfg.min_mem_set_clock;
965
966         if (rps->vce_active) {
967                 if (sclk < adev->pm.dpm.vce_states[adev->pm.dpm.vce_level].sclk)
968                         sclk = adev->pm.dpm.vce_states[adev->pm.dpm.vce_level].sclk;
969                 if (mclk < adev->pm.dpm.vce_states[adev->pm.dpm.vce_level].mclk)
970                         mclk = adev->pm.dpm.vce_states[adev->pm.dpm.vce_level].mclk;
971         }
972
973         ps->performance_levels[0].sclk = sclk;
974         ps->performance_levels[0].mclk = mclk;
975
976         if (ps->performance_levels[1].sclk < ps->performance_levels[0].sclk)
977                 ps->performance_levels[1].sclk = ps->performance_levels[0].sclk;
978
979         if (disable_mclk_switching) {
980                 if (ps->performance_levels[0].mclk < ps->performance_levels[1].mclk)
981                         ps->performance_levels[0].mclk = ps->performance_levels[1].mclk;
982         } else {
983                 if (ps->performance_levels[1].mclk < ps->performance_levels[0].mclk)
984                         ps->performance_levels[1].mclk = ps->performance_levels[0].mclk;
985         }
986 }
987
988 static int ci_thermal_set_temperature_range(struct amdgpu_device *adev,
989                                             int min_temp, int max_temp)
990 {
991         int low_temp = 0 * 1000;
992         int high_temp = 255 * 1000;
993         u32 tmp;
994
995         if (low_temp < min_temp)
996                 low_temp = min_temp;
997         if (high_temp > max_temp)
998                 high_temp = max_temp;
999         if (high_temp < low_temp) {
1000                 DRM_ERROR("invalid thermal range: %d - %d\n", low_temp, high_temp);
1001                 return -EINVAL;
1002         }
1003
1004         tmp = RREG32_SMC(ixCG_THERMAL_INT);
1005         tmp &= ~(CG_THERMAL_INT__DIG_THERM_INTH_MASK | CG_THERMAL_INT__DIG_THERM_INTL_MASK);
1006         tmp |= ((high_temp / 1000) << CG_THERMAL_INT__DIG_THERM_INTH__SHIFT) |
1007                 ((low_temp / 1000)) << CG_THERMAL_INT__DIG_THERM_INTL__SHIFT;
1008         WREG32_SMC(ixCG_THERMAL_INT, tmp);
1009
1010 #if 0
1011         /* XXX: need to figure out how to handle this properly */
1012         tmp = RREG32_SMC(ixCG_THERMAL_CTRL);
1013         tmp &= DIG_THERM_DPM_MASK;
1014         tmp |= DIG_THERM_DPM(high_temp / 1000);
1015         WREG32_SMC(ixCG_THERMAL_CTRL, tmp);
1016 #endif
1017
1018         adev->pm.dpm.thermal.min_temp = low_temp;
1019         adev->pm.dpm.thermal.max_temp = high_temp;
1020         return 0;
1021 }
1022
1023 static int ci_thermal_enable_alert(struct amdgpu_device *adev,
1024                                    bool enable)
1025 {
1026         u32 thermal_int = RREG32_SMC(ixCG_THERMAL_INT);
1027         PPSMC_Result result;
1028
1029         if (enable) {
1030                 thermal_int &= ~(CG_THERMAL_INT_CTRL__THERM_INTH_MASK_MASK |
1031                                  CG_THERMAL_INT_CTRL__THERM_INTL_MASK_MASK);
1032                 WREG32_SMC(ixCG_THERMAL_INT, thermal_int);
1033                 result = amdgpu_ci_send_msg_to_smc(adev, PPSMC_MSG_Thermal_Cntl_Enable);
1034                 if (result != PPSMC_Result_OK) {
1035                         DRM_DEBUG_KMS("Could not enable thermal interrupts.\n");
1036                         return -EINVAL;
1037                 }
1038         } else {
1039                 thermal_int |= CG_THERMAL_INT_CTRL__THERM_INTH_MASK_MASK |
1040                         CG_THERMAL_INT_CTRL__THERM_INTL_MASK_MASK;
1041                 WREG32_SMC(ixCG_THERMAL_INT, thermal_int);
1042                 result = amdgpu_ci_send_msg_to_smc(adev, PPSMC_MSG_Thermal_Cntl_Disable);
1043                 if (result != PPSMC_Result_OK) {
1044                         DRM_DEBUG_KMS("Could not disable thermal interrupts.\n");
1045                         return -EINVAL;
1046                 }
1047         }
1048
1049         return 0;
1050 }
1051
1052 static void ci_fan_ctrl_set_static_mode(struct amdgpu_device *adev, u32 mode)
1053 {
1054         struct ci_power_info *pi = ci_get_pi(adev);
1055         u32 tmp;
1056
1057         if (pi->fan_ctrl_is_in_default_mode) {
1058                 tmp = (RREG32_SMC(ixCG_FDO_CTRL2) & CG_FDO_CTRL2__FDO_PWM_MODE_MASK)
1059                         >> CG_FDO_CTRL2__FDO_PWM_MODE__SHIFT;
1060                 pi->fan_ctrl_default_mode = tmp;
1061                 tmp = (RREG32_SMC(ixCG_FDO_CTRL2) & CG_FDO_CTRL2__TMIN_MASK)
1062                         >> CG_FDO_CTRL2__TMIN__SHIFT;
1063                 pi->t_min = tmp;
1064                 pi->fan_ctrl_is_in_default_mode = false;
1065         }
1066
1067         tmp = RREG32_SMC(ixCG_FDO_CTRL2) & ~CG_FDO_CTRL2__TMIN_MASK;
1068         tmp |= 0 << CG_FDO_CTRL2__TMIN__SHIFT;
1069         WREG32_SMC(ixCG_FDO_CTRL2, tmp);
1070
1071         tmp = RREG32_SMC(ixCG_FDO_CTRL2) & ~CG_FDO_CTRL2__FDO_PWM_MODE_MASK;
1072         tmp |= mode << CG_FDO_CTRL2__FDO_PWM_MODE__SHIFT;
1073         WREG32_SMC(ixCG_FDO_CTRL2, tmp);
1074 }
1075
1076 static int ci_thermal_setup_fan_table(struct amdgpu_device *adev)
1077 {
1078         struct ci_power_info *pi = ci_get_pi(adev);
1079         SMU7_Discrete_FanTable fan_table = { FDO_MODE_HARDWARE };
1080         u32 duty100;
1081         u32 t_diff1, t_diff2, pwm_diff1, pwm_diff2;
1082         u16 fdo_min, slope1, slope2;
1083         u32 reference_clock, tmp;
1084         int ret;
1085         u64 tmp64;
1086
1087         if (!pi->fan_table_start) {
1088                 adev->pm.dpm.fan.ucode_fan_control = false;
1089                 return 0;
1090         }
1091
1092         duty100 = (RREG32_SMC(ixCG_FDO_CTRL1) & CG_FDO_CTRL1__FMAX_DUTY100_MASK)
1093                 >> CG_FDO_CTRL1__FMAX_DUTY100__SHIFT;
1094
1095         if (duty100 == 0) {
1096                 adev->pm.dpm.fan.ucode_fan_control = false;
1097                 return 0;
1098         }
1099
1100         tmp64 = (u64)adev->pm.dpm.fan.pwm_min * duty100;
1101         do_div(tmp64, 10000);
1102         fdo_min = (u16)tmp64;
1103
1104         t_diff1 = adev->pm.dpm.fan.t_med - adev->pm.dpm.fan.t_min;
1105         t_diff2 = adev->pm.dpm.fan.t_high - adev->pm.dpm.fan.t_med;
1106
1107         pwm_diff1 = adev->pm.dpm.fan.pwm_med - adev->pm.dpm.fan.pwm_min;
1108         pwm_diff2 = adev->pm.dpm.fan.pwm_high - adev->pm.dpm.fan.pwm_med;
1109
1110         slope1 = (u16)((50 + ((16 * duty100 * pwm_diff1) / t_diff1)) / 100);
1111         slope2 = (u16)((50 + ((16 * duty100 * pwm_diff2) / t_diff2)) / 100);
1112
1113         fan_table.TempMin = cpu_to_be16((50 + adev->pm.dpm.fan.t_min) / 100);
1114         fan_table.TempMed = cpu_to_be16((50 + adev->pm.dpm.fan.t_med) / 100);
1115         fan_table.TempMax = cpu_to_be16((50 + adev->pm.dpm.fan.t_max) / 100);
1116
1117         fan_table.Slope1 = cpu_to_be16(slope1);
1118         fan_table.Slope2 = cpu_to_be16(slope2);
1119
1120         fan_table.FdoMin = cpu_to_be16(fdo_min);
1121
1122         fan_table.HystDown = cpu_to_be16(adev->pm.dpm.fan.t_hyst);
1123
1124         fan_table.HystUp = cpu_to_be16(1);
1125
1126         fan_table.HystSlope = cpu_to_be16(1);
1127
1128         fan_table.TempRespLim = cpu_to_be16(5);
1129
1130         reference_clock = amdgpu_asic_get_xclk(adev);
1131
1132         fan_table.RefreshPeriod = cpu_to_be32((adev->pm.dpm.fan.cycle_delay *
1133                                                reference_clock) / 1600);
1134
1135         fan_table.FdoMax = cpu_to_be16((u16)duty100);
1136
1137         tmp = (RREG32_SMC(ixCG_MULT_THERMAL_CTRL) & CG_MULT_THERMAL_CTRL__TEMP_SEL_MASK)
1138                 >> CG_MULT_THERMAL_CTRL__TEMP_SEL__SHIFT;
1139         fan_table.TempSrc = (uint8_t)tmp;
1140
1141         ret = amdgpu_ci_copy_bytes_to_smc(adev,
1142                                           pi->fan_table_start,
1143                                           (u8 *)(&fan_table),
1144                                           sizeof(fan_table),
1145                                           pi->sram_end);
1146
1147         if (ret) {
1148                 DRM_ERROR("Failed to load fan table to the SMC.");
1149                 adev->pm.dpm.fan.ucode_fan_control = false;
1150         }
1151
1152         return 0;
1153 }
1154
1155 static int ci_fan_ctrl_start_smc_fan_control(struct amdgpu_device *adev)
1156 {
1157         struct ci_power_info *pi = ci_get_pi(adev);
1158         PPSMC_Result ret;
1159
1160         if (pi->caps_od_fuzzy_fan_control_support) {
1161                 ret = amdgpu_ci_send_msg_to_smc_with_parameter(adev,
1162                                                                PPSMC_StartFanControl,
1163                                                                FAN_CONTROL_FUZZY);
1164                 if (ret != PPSMC_Result_OK)
1165                         return -EINVAL;
1166                 ret = amdgpu_ci_send_msg_to_smc_with_parameter(adev,
1167                                                                PPSMC_MSG_SetFanPwmMax,
1168                                                                adev->pm.dpm.fan.default_max_fan_pwm);
1169                 if (ret != PPSMC_Result_OK)
1170                         return -EINVAL;
1171         } else {
1172                 ret = amdgpu_ci_send_msg_to_smc_with_parameter(adev,
1173                                                                PPSMC_StartFanControl,
1174                                                                FAN_CONTROL_TABLE);
1175                 if (ret != PPSMC_Result_OK)
1176                         return -EINVAL;
1177         }
1178
1179         pi->fan_is_controlled_by_smc = true;
1180         return 0;
1181 }
1182
1183
1184 static int ci_fan_ctrl_stop_smc_fan_control(struct amdgpu_device *adev)
1185 {
1186         PPSMC_Result ret;
1187         struct ci_power_info *pi = ci_get_pi(adev);
1188
1189         ret = amdgpu_ci_send_msg_to_smc(adev, PPSMC_StopFanControl);
1190         if (ret == PPSMC_Result_OK) {
1191                 pi->fan_is_controlled_by_smc = false;
1192                 return 0;
1193         } else {
1194                 return -EINVAL;
1195         }
1196 }
1197
1198 static int ci_dpm_get_fan_speed_percent(struct amdgpu_device *adev,
1199                                         u32 *speed)
1200 {
1201         u32 duty, duty100;
1202         u64 tmp64;
1203
1204         if (adev->pm.no_fan)
1205                 return -ENOENT;
1206
1207         duty100 = (RREG32_SMC(ixCG_FDO_CTRL1) & CG_FDO_CTRL1__FMAX_DUTY100_MASK)
1208                 >> CG_FDO_CTRL1__FMAX_DUTY100__SHIFT;
1209         duty = (RREG32_SMC(ixCG_THERMAL_STATUS) & CG_THERMAL_STATUS__FDO_PWM_DUTY_MASK)
1210                 >> CG_THERMAL_STATUS__FDO_PWM_DUTY__SHIFT;
1211
1212         if (duty100 == 0)
1213                 return -EINVAL;
1214
1215         tmp64 = (u64)duty * 100;
1216         do_div(tmp64, duty100);
1217         *speed = (u32)tmp64;
1218
1219         if (*speed > 100)
1220                 *speed = 100;
1221
1222         return 0;
1223 }
1224
1225 static int ci_dpm_set_fan_speed_percent(struct amdgpu_device *adev,
1226                                         u32 speed)
1227 {
1228         u32 tmp;
1229         u32 duty, duty100;
1230         u64 tmp64;
1231         struct ci_power_info *pi = ci_get_pi(adev);
1232
1233         if (adev->pm.no_fan)
1234                 return -ENOENT;
1235
1236         if (pi->fan_is_controlled_by_smc)
1237                 return -EINVAL;
1238
1239         if (speed > 100)
1240                 return -EINVAL;
1241
1242         duty100 = (RREG32_SMC(ixCG_FDO_CTRL1) & CG_FDO_CTRL1__FMAX_DUTY100_MASK)
1243                 >> CG_FDO_CTRL1__FMAX_DUTY100__SHIFT;
1244
1245         if (duty100 == 0)
1246                 return -EINVAL;
1247
1248         tmp64 = (u64)speed * duty100;
1249         do_div(tmp64, 100);
1250         duty = (u32)tmp64;
1251
1252         tmp = RREG32_SMC(ixCG_FDO_CTRL0) & ~CG_FDO_CTRL0__FDO_STATIC_DUTY_MASK;
1253         tmp |= duty << CG_FDO_CTRL0__FDO_STATIC_DUTY__SHIFT;
1254         WREG32_SMC(ixCG_FDO_CTRL0, tmp);
1255
1256         return 0;
1257 }
1258
1259 static void ci_dpm_set_fan_control_mode(struct amdgpu_device *adev, u32 mode)
1260 {
1261         if (mode) {
1262                 /* stop auto-manage */
1263                 if (adev->pm.dpm.fan.ucode_fan_control)
1264                         ci_fan_ctrl_stop_smc_fan_control(adev);
1265                 ci_fan_ctrl_set_static_mode(adev, mode);
1266         } else {
1267                 /* restart auto-manage */
1268                 if (adev->pm.dpm.fan.ucode_fan_control)
1269                         ci_thermal_start_smc_fan_control(adev);
1270                 else
1271                         ci_fan_ctrl_set_default_mode(adev);
1272         }
1273 }
1274
1275 static u32 ci_dpm_get_fan_control_mode(struct amdgpu_device *adev)
1276 {
1277         struct ci_power_info *pi = ci_get_pi(adev);
1278         u32 tmp;
1279
1280         if (pi->fan_is_controlled_by_smc)
1281                 return 0;
1282
1283         tmp = RREG32_SMC(ixCG_FDO_CTRL2) & CG_FDO_CTRL2__FDO_PWM_MODE_MASK;
1284         return (tmp >> CG_FDO_CTRL2__FDO_PWM_MODE__SHIFT);
1285 }
1286
1287 #if 0
1288 static int ci_fan_ctrl_get_fan_speed_rpm(struct amdgpu_device *adev,
1289                                          u32 *speed)
1290 {
1291         u32 tach_period;
1292         u32 xclk = amdgpu_asic_get_xclk(adev);
1293
1294         if (adev->pm.no_fan)
1295                 return -ENOENT;
1296
1297         if (adev->pm.fan_pulses_per_revolution == 0)
1298                 return -ENOENT;
1299
1300         tach_period = (RREG32_SMC(ixCG_TACH_STATUS) & CG_TACH_STATUS__TACH_PERIOD_MASK)
1301                 >> CG_TACH_STATUS__TACH_PERIOD__SHIFT;
1302         if (tach_period == 0)
1303                 return -ENOENT;
1304
1305         *speed = 60 * xclk * 10000 / tach_period;
1306
1307         return 0;
1308 }
1309
1310 static int ci_fan_ctrl_set_fan_speed_rpm(struct amdgpu_device *adev,
1311                                          u32 speed)
1312 {
1313         u32 tach_period, tmp;
1314         u32 xclk = amdgpu_asic_get_xclk(adev);
1315
1316         if (adev->pm.no_fan)
1317                 return -ENOENT;
1318
1319         if (adev->pm.fan_pulses_per_revolution == 0)
1320                 return -ENOENT;
1321
1322         if ((speed < adev->pm.fan_min_rpm) ||
1323             (speed > adev->pm.fan_max_rpm))
1324                 return -EINVAL;
1325
1326         if (adev->pm.dpm.fan.ucode_fan_control)
1327                 ci_fan_ctrl_stop_smc_fan_control(adev);
1328
1329         tach_period = 60 * xclk * 10000 / (8 * speed);
1330         tmp = RREG32_SMC(ixCG_TACH_CTRL) & ~CG_TACH_CTRL__TARGET_PERIOD_MASK;
1331         tmp |= tach_period << CG_TACH_CTRL__TARGET_PERIOD__SHIFT;
1332         WREG32_SMC(CG_TACH_CTRL, tmp);
1333
1334         ci_fan_ctrl_set_static_mode(adev, FDO_PWM_MODE_STATIC_RPM);
1335
1336         return 0;
1337 }
1338 #endif
1339
1340 static void ci_fan_ctrl_set_default_mode(struct amdgpu_device *adev)
1341 {
1342         struct ci_power_info *pi = ci_get_pi(adev);
1343         u32 tmp;
1344
1345         if (!pi->fan_ctrl_is_in_default_mode) {
1346                 tmp = RREG32_SMC(ixCG_FDO_CTRL2) & ~CG_FDO_CTRL2__FDO_PWM_MODE_MASK;
1347                 tmp |= pi->fan_ctrl_default_mode << CG_FDO_CTRL2__FDO_PWM_MODE__SHIFT;
1348                 WREG32_SMC(ixCG_FDO_CTRL2, tmp);
1349
1350                 tmp = RREG32_SMC(ixCG_FDO_CTRL2) & ~CG_FDO_CTRL2__TMIN_MASK;
1351                 tmp |= pi->t_min << CG_FDO_CTRL2__TMIN__SHIFT;
1352                 WREG32_SMC(ixCG_FDO_CTRL2, tmp);
1353                 pi->fan_ctrl_is_in_default_mode = true;
1354         }
1355 }
1356
1357 static void ci_thermal_start_smc_fan_control(struct amdgpu_device *adev)
1358 {
1359         if (adev->pm.dpm.fan.ucode_fan_control) {
1360                 ci_fan_ctrl_start_smc_fan_control(adev);
1361                 ci_fan_ctrl_set_static_mode(adev, FDO_PWM_MODE_STATIC);
1362         }
1363 }
1364
1365 static void ci_thermal_initialize(struct amdgpu_device *adev)
1366 {
1367         u32 tmp;
1368
1369         if (adev->pm.fan_pulses_per_revolution) {
1370                 tmp = RREG32_SMC(ixCG_TACH_CTRL) & ~CG_TACH_CTRL__EDGE_PER_REV_MASK;
1371                 tmp |= (adev->pm.fan_pulses_per_revolution - 1)
1372                         << CG_TACH_CTRL__EDGE_PER_REV__SHIFT;
1373                 WREG32_SMC(ixCG_TACH_CTRL, tmp);
1374         }
1375
1376         tmp = RREG32_SMC(ixCG_FDO_CTRL2) & ~CG_FDO_CTRL2__TACH_PWM_RESP_RATE_MASK;
1377         tmp |= 0x28 << CG_FDO_CTRL2__TACH_PWM_RESP_RATE__SHIFT;
1378         WREG32_SMC(ixCG_FDO_CTRL2, tmp);
1379 }
1380
1381 static int ci_thermal_start_thermal_controller(struct amdgpu_device *adev)
1382 {
1383         int ret;
1384
1385         ci_thermal_initialize(adev);
1386         ret = ci_thermal_set_temperature_range(adev, CISLANDS_TEMP_RANGE_MIN, CISLANDS_TEMP_RANGE_MAX);
1387         if (ret)
1388                 return ret;
1389         ret = ci_thermal_enable_alert(adev, true);
1390         if (ret)
1391                 return ret;
1392         if (adev->pm.dpm.fan.ucode_fan_control) {
1393                 ret = ci_thermal_setup_fan_table(adev);
1394                 if (ret)
1395                         return ret;
1396                 ci_thermal_start_smc_fan_control(adev);
1397         }
1398
1399         return 0;
1400 }
1401
1402 static void ci_thermal_stop_thermal_controller(struct amdgpu_device *adev)
1403 {
1404         if (!adev->pm.no_fan)
1405                 ci_fan_ctrl_set_default_mode(adev);
1406 }
1407
1408 static int ci_read_smc_soft_register(struct amdgpu_device *adev,
1409                                      u16 reg_offset, u32 *value)
1410 {
1411         struct ci_power_info *pi = ci_get_pi(adev);
1412
1413         return amdgpu_ci_read_smc_sram_dword(adev,
1414                                       pi->soft_regs_start + reg_offset,
1415                                       value, pi->sram_end);
1416 }
1417
1418 static int ci_write_smc_soft_register(struct amdgpu_device *adev,
1419                                       u16 reg_offset, u32 value)
1420 {
1421         struct ci_power_info *pi = ci_get_pi(adev);
1422
1423         return amdgpu_ci_write_smc_sram_dword(adev,
1424                                        pi->soft_regs_start + reg_offset,
1425                                        value, pi->sram_end);
1426 }
1427
1428 static void ci_init_fps_limits(struct amdgpu_device *adev)
1429 {
1430         struct ci_power_info *pi = ci_get_pi(adev);
1431         SMU7_Discrete_DpmTable *table = &pi->smc_state_table;
1432
1433         if (pi->caps_fps) {
1434                 u16 tmp;
1435
1436                 tmp = 45;
1437                 table->FpsHighT = cpu_to_be16(tmp);
1438
1439                 tmp = 30;
1440                 table->FpsLowT = cpu_to_be16(tmp);
1441         }
1442 }
1443
1444 static int ci_update_sclk_t(struct amdgpu_device *adev)
1445 {
1446         struct ci_power_info *pi = ci_get_pi(adev);
1447         int ret = 0;
1448         u32 low_sclk_interrupt_t = 0;
1449
1450         if (pi->caps_sclk_throttle_low_notification) {
1451                 low_sclk_interrupt_t = cpu_to_be32(pi->low_sclk_interrupt_t);
1452
1453                 ret = amdgpu_ci_copy_bytes_to_smc(adev,
1454                                            pi->dpm_table_start +
1455                                            offsetof(SMU7_Discrete_DpmTable, LowSclkInterruptT),
1456                                            (u8 *)&low_sclk_interrupt_t,
1457                                            sizeof(u32), pi->sram_end);
1458
1459         }
1460
1461         return ret;
1462 }
1463
1464 static void ci_get_leakage_voltages(struct amdgpu_device *adev)
1465 {
1466         struct ci_power_info *pi = ci_get_pi(adev);
1467         u16 leakage_id, virtual_voltage_id;
1468         u16 vddc, vddci;
1469         int i;
1470
1471         pi->vddc_leakage.count = 0;
1472         pi->vddci_leakage.count = 0;
1473
1474         if (adev->pm.dpm.platform_caps & ATOM_PP_PLATFORM_CAP_EVV) {
1475                 for (i = 0; i < CISLANDS_MAX_LEAKAGE_COUNT; i++) {
1476                         virtual_voltage_id = ATOM_VIRTUAL_VOLTAGE_ID0 + i;
1477                         if (amdgpu_atombios_get_voltage_evv(adev, virtual_voltage_id, &vddc) != 0)
1478                                 continue;
1479                         if (vddc != 0 && vddc != virtual_voltage_id) {
1480                                 pi->vddc_leakage.actual_voltage[pi->vddc_leakage.count] = vddc;
1481                                 pi->vddc_leakage.leakage_id[pi->vddc_leakage.count] = virtual_voltage_id;
1482                                 pi->vddc_leakage.count++;
1483                         }
1484                 }
1485         } else if (amdgpu_atombios_get_leakage_id_from_vbios(adev, &leakage_id) == 0) {
1486                 for (i = 0; i < CISLANDS_MAX_LEAKAGE_COUNT; i++) {
1487                         virtual_voltage_id = ATOM_VIRTUAL_VOLTAGE_ID0 + i;
1488                         if (amdgpu_atombios_get_leakage_vddc_based_on_leakage_params(adev, &vddc, &vddci,
1489                                                                                      virtual_voltage_id,
1490                                                                                      leakage_id) == 0) {
1491                                 if (vddc != 0 && vddc != virtual_voltage_id) {
1492                                         pi->vddc_leakage.actual_voltage[pi->vddc_leakage.count] = vddc;
1493                                         pi->vddc_leakage.leakage_id[pi->vddc_leakage.count] = virtual_voltage_id;
1494                                         pi->vddc_leakage.count++;
1495                                 }
1496                                 if (vddci != 0 && vddci != virtual_voltage_id) {
1497                                         pi->vddci_leakage.actual_voltage[pi->vddci_leakage.count] = vddci;
1498                                         pi->vddci_leakage.leakage_id[pi->vddci_leakage.count] = virtual_voltage_id;
1499                                         pi->vddci_leakage.count++;
1500                                 }
1501                         }
1502                 }
1503         }
1504 }
1505
1506 static void ci_set_dpm_event_sources(struct amdgpu_device *adev, u32 sources)
1507 {
1508         struct ci_power_info *pi = ci_get_pi(adev);
1509         bool want_thermal_protection;
1510         enum amdgpu_dpm_event_src dpm_event_src;
1511         u32 tmp;
1512
1513         switch (sources) {
1514         case 0:
1515         default:
1516                 want_thermal_protection = false;
1517                 break;
1518         case (1 << AMDGPU_DPM_AUTO_THROTTLE_SRC_THERMAL):
1519                 want_thermal_protection = true;
1520                 dpm_event_src = AMDGPU_DPM_EVENT_SRC_DIGITAL;
1521                 break;
1522         case (1 << AMDGPU_DPM_AUTO_THROTTLE_SRC_EXTERNAL):
1523                 want_thermal_protection = true;
1524                 dpm_event_src = AMDGPU_DPM_EVENT_SRC_EXTERNAL;
1525                 break;
1526         case ((1 << AMDGPU_DPM_AUTO_THROTTLE_SRC_EXTERNAL) |
1527               (1 << AMDGPU_DPM_AUTO_THROTTLE_SRC_THERMAL)):
1528                 want_thermal_protection = true;
1529                 dpm_event_src = AMDGPU_DPM_EVENT_SRC_DIGIAL_OR_EXTERNAL;
1530                 break;
1531         }
1532
1533         if (want_thermal_protection) {
1534 #if 0
1535                 /* XXX: need to figure out how to handle this properly */
1536                 tmp = RREG32_SMC(ixCG_THERMAL_CTRL);
1537                 tmp &= DPM_EVENT_SRC_MASK;
1538                 tmp |= DPM_EVENT_SRC(dpm_event_src);
1539                 WREG32_SMC(ixCG_THERMAL_CTRL, tmp);
1540 #endif
1541
1542                 tmp = RREG32_SMC(ixGENERAL_PWRMGT);
1543                 if (pi->thermal_protection)
1544                         tmp &= ~GENERAL_PWRMGT__THERMAL_PROTECTION_DIS_MASK;
1545                 else
1546                         tmp |= GENERAL_PWRMGT__THERMAL_PROTECTION_DIS_MASK;
1547                 WREG32_SMC(ixGENERAL_PWRMGT, tmp);
1548         } else {
1549                 tmp = RREG32_SMC(ixGENERAL_PWRMGT);
1550                 tmp |= GENERAL_PWRMGT__THERMAL_PROTECTION_DIS_MASK;
1551                 WREG32_SMC(ixGENERAL_PWRMGT, tmp);
1552         }
1553 }
1554
1555 static void ci_enable_auto_throttle_source(struct amdgpu_device *adev,
1556                                            enum amdgpu_dpm_auto_throttle_src source,
1557                                            bool enable)
1558 {
1559         struct ci_power_info *pi = ci_get_pi(adev);
1560
1561         if (enable) {
1562                 if (!(pi->active_auto_throttle_sources & (1 << source))) {
1563                         pi->active_auto_throttle_sources |= 1 << source;
1564                         ci_set_dpm_event_sources(adev, pi->active_auto_throttle_sources);
1565                 }
1566         } else {
1567                 if (pi->active_auto_throttle_sources & (1 << source)) {
1568                         pi->active_auto_throttle_sources &= ~(1 << source);
1569                         ci_set_dpm_event_sources(adev, pi->active_auto_throttle_sources);
1570                 }
1571         }
1572 }
1573
1574 static void ci_enable_vr_hot_gpio_interrupt(struct amdgpu_device *adev)
1575 {
1576         if (adev->pm.dpm.platform_caps & ATOM_PP_PLATFORM_CAP_REGULATOR_HOT)
1577                 amdgpu_ci_send_msg_to_smc(adev, PPSMC_MSG_EnableVRHotGPIOInterrupt);
1578 }
1579
1580 static int ci_unfreeze_sclk_mclk_dpm(struct amdgpu_device *adev)
1581 {
1582         struct ci_power_info *pi = ci_get_pi(adev);
1583         PPSMC_Result smc_result;
1584
1585         if (!pi->need_update_smu7_dpm_table)
1586                 return 0;
1587
1588         if ((!pi->sclk_dpm_key_disabled) &&
1589             (pi->need_update_smu7_dpm_table & (DPMTABLE_OD_UPDATE_SCLK | DPMTABLE_UPDATE_SCLK))) {
1590                 smc_result = amdgpu_ci_send_msg_to_smc(adev, PPSMC_MSG_SCLKDPM_UnfreezeLevel);
1591                 if (smc_result != PPSMC_Result_OK)
1592                         return -EINVAL;
1593         }
1594
1595         if ((!pi->mclk_dpm_key_disabled) &&
1596             (pi->need_update_smu7_dpm_table & DPMTABLE_OD_UPDATE_MCLK)) {
1597                 smc_result = amdgpu_ci_send_msg_to_smc(adev, PPSMC_MSG_MCLKDPM_UnfreezeLevel);
1598                 if (smc_result != PPSMC_Result_OK)
1599                         return -EINVAL;
1600         }
1601
1602         pi->need_update_smu7_dpm_table = 0;
1603         return 0;
1604 }
1605
1606 static int ci_enable_sclk_mclk_dpm(struct amdgpu_device *adev, bool enable)
1607 {
1608         struct ci_power_info *pi = ci_get_pi(adev);
1609         PPSMC_Result smc_result;
1610
1611         if (enable) {
1612                 if (!pi->sclk_dpm_key_disabled) {
1613                         smc_result = amdgpu_ci_send_msg_to_smc(adev, PPSMC_MSG_DPM_Enable);
1614                         if (smc_result != PPSMC_Result_OK)
1615                                 return -EINVAL;
1616                 }
1617
1618                 if (!pi->mclk_dpm_key_disabled) {
1619                         smc_result = amdgpu_ci_send_msg_to_smc(adev, PPSMC_MSG_MCLKDPM_Enable);
1620                         if (smc_result != PPSMC_Result_OK)
1621                                 return -EINVAL;
1622
1623                         WREG32_P(mmMC_SEQ_CNTL_3, MC_SEQ_CNTL_3__CAC_EN_MASK,
1624                                         ~MC_SEQ_CNTL_3__CAC_EN_MASK);
1625
1626                         WREG32_SMC(ixLCAC_MC0_CNTL, 0x05);
1627                         WREG32_SMC(ixLCAC_MC1_CNTL, 0x05);
1628                         WREG32_SMC(ixLCAC_CPL_CNTL, 0x100005);
1629
1630                         udelay(10);
1631
1632                         WREG32_SMC(ixLCAC_MC0_CNTL, 0x400005);
1633                         WREG32_SMC(ixLCAC_MC1_CNTL, 0x400005);
1634                         WREG32_SMC(ixLCAC_CPL_CNTL, 0x500005);
1635                 }
1636         } else {
1637                 if (!pi->sclk_dpm_key_disabled) {
1638                         smc_result = amdgpu_ci_send_msg_to_smc(adev, PPSMC_MSG_DPM_Disable);
1639                         if (smc_result != PPSMC_Result_OK)
1640                                 return -EINVAL;
1641                 }
1642
1643                 if (!pi->mclk_dpm_key_disabled) {
1644                         smc_result = amdgpu_ci_send_msg_to_smc(adev, PPSMC_MSG_MCLKDPM_Disable);
1645                         if (smc_result != PPSMC_Result_OK)
1646                                 return -EINVAL;
1647                 }
1648         }
1649
1650         return 0;
1651 }
1652
1653 static int ci_start_dpm(struct amdgpu_device *adev)
1654 {
1655         struct ci_power_info *pi = ci_get_pi(adev);
1656         PPSMC_Result smc_result;
1657         int ret;
1658         u32 tmp;
1659
1660         tmp = RREG32_SMC(ixGENERAL_PWRMGT);
1661         tmp |= GENERAL_PWRMGT__GLOBAL_PWRMGT_EN_MASK;
1662         WREG32_SMC(ixGENERAL_PWRMGT, tmp);
1663
1664         tmp = RREG32_SMC(ixSCLK_PWRMGT_CNTL);
1665         tmp |= SCLK_PWRMGT_CNTL__DYNAMIC_PM_EN_MASK;
1666         WREG32_SMC(ixSCLK_PWRMGT_CNTL, tmp);
1667
1668         ci_write_smc_soft_register(adev, offsetof(SMU7_SoftRegisters, VoltageChangeTimeout), 0x1000);
1669
1670         WREG32_P(mmBIF_LNCNT_RESET, 0, ~BIF_LNCNT_RESET__RESET_LNCNT_EN_MASK);
1671
1672         smc_result = amdgpu_ci_send_msg_to_smc(adev, PPSMC_MSG_Voltage_Cntl_Enable);
1673         if (smc_result != PPSMC_Result_OK)
1674                 return -EINVAL;
1675
1676         ret = ci_enable_sclk_mclk_dpm(adev, true);
1677         if (ret)
1678                 return ret;
1679
1680         if (!pi->pcie_dpm_key_disabled) {
1681                 smc_result = amdgpu_ci_send_msg_to_smc(adev, PPSMC_MSG_PCIeDPM_Enable);
1682                 if (smc_result != PPSMC_Result_OK)
1683                         return -EINVAL;
1684         }
1685
1686         return 0;
1687 }
1688
1689 static int ci_freeze_sclk_mclk_dpm(struct amdgpu_device *adev)
1690 {
1691         struct ci_power_info *pi = ci_get_pi(adev);
1692         PPSMC_Result smc_result;
1693
1694         if (!pi->need_update_smu7_dpm_table)
1695                 return 0;
1696
1697         if ((!pi->sclk_dpm_key_disabled) &&
1698             (pi->need_update_smu7_dpm_table & (DPMTABLE_OD_UPDATE_SCLK | DPMTABLE_UPDATE_SCLK))) {
1699                 smc_result = amdgpu_ci_send_msg_to_smc(adev, PPSMC_MSG_SCLKDPM_FreezeLevel);
1700                 if (smc_result != PPSMC_Result_OK)
1701                         return -EINVAL;
1702         }
1703
1704         if ((!pi->mclk_dpm_key_disabled) &&
1705             (pi->need_update_smu7_dpm_table & DPMTABLE_OD_UPDATE_MCLK)) {
1706                 smc_result = amdgpu_ci_send_msg_to_smc(adev, PPSMC_MSG_MCLKDPM_FreezeLevel);
1707                 if (smc_result != PPSMC_Result_OK)
1708                         return -EINVAL;
1709         }
1710
1711         return 0;
1712 }
1713
1714 static int ci_stop_dpm(struct amdgpu_device *adev)
1715 {
1716         struct ci_power_info *pi = ci_get_pi(adev);
1717         PPSMC_Result smc_result;
1718         int ret;
1719         u32 tmp;
1720
1721         tmp = RREG32_SMC(ixGENERAL_PWRMGT);
1722         tmp &= ~GENERAL_PWRMGT__GLOBAL_PWRMGT_EN_MASK;
1723         WREG32_SMC(ixGENERAL_PWRMGT, tmp);
1724
1725         tmp = RREG32_SMC(ixSCLK_PWRMGT_CNTL);
1726         tmp &= ~SCLK_PWRMGT_CNTL__DYNAMIC_PM_EN_MASK;
1727         WREG32_SMC(ixSCLK_PWRMGT_CNTL, tmp);
1728
1729         if (!pi->pcie_dpm_key_disabled) {
1730                 smc_result = amdgpu_ci_send_msg_to_smc(adev, PPSMC_MSG_PCIeDPM_Disable);
1731                 if (smc_result != PPSMC_Result_OK)
1732                         return -EINVAL;
1733         }
1734
1735         ret = ci_enable_sclk_mclk_dpm(adev, false);
1736         if (ret)
1737                 return ret;
1738
1739         smc_result = amdgpu_ci_send_msg_to_smc(adev, PPSMC_MSG_Voltage_Cntl_Disable);
1740         if (smc_result != PPSMC_Result_OK)
1741                 return -EINVAL;
1742
1743         return 0;
1744 }
1745
1746 static void ci_enable_sclk_control(struct amdgpu_device *adev, bool enable)
1747 {
1748         u32 tmp = RREG32_SMC(ixSCLK_PWRMGT_CNTL);
1749
1750         if (enable)
1751                 tmp &= ~SCLK_PWRMGT_CNTL__SCLK_PWRMGT_OFF_MASK;
1752         else
1753                 tmp |= SCLK_PWRMGT_CNTL__SCLK_PWRMGT_OFF_MASK;
1754         WREG32_SMC(ixSCLK_PWRMGT_CNTL, tmp);
1755 }
1756
1757 #if 0
1758 static int ci_notify_hw_of_power_source(struct amdgpu_device *adev,
1759                                         bool ac_power)
1760 {
1761         struct ci_power_info *pi = ci_get_pi(adev);
1762         struct amdgpu_cac_tdp_table *cac_tdp_table =
1763                 adev->pm.dpm.dyn_state.cac_tdp_table;
1764         u32 power_limit;
1765
1766         if (ac_power)
1767                 power_limit = (u32)(cac_tdp_table->maximum_power_delivery_limit * 256);
1768         else
1769                 power_limit = (u32)(cac_tdp_table->battery_power_limit * 256);
1770
1771         ci_set_power_limit(adev, power_limit);
1772
1773         if (pi->caps_automatic_dc_transition) {
1774                 if (ac_power)
1775                         amdgpu_ci_send_msg_to_smc(adev, PPSMC_MSG_RunningOnAC);
1776                 else
1777                         amdgpu_ci_send_msg_to_smc(adev, PPSMC_MSG_Remove_DC_Clamp);
1778         }
1779
1780         return 0;
1781 }
1782 #endif
1783
1784 static PPSMC_Result amdgpu_ci_send_msg_to_smc_with_parameter(struct amdgpu_device *adev,
1785                                                       PPSMC_Msg msg, u32 parameter)
1786 {
1787         WREG32(mmSMC_MSG_ARG_0, parameter);
1788         return amdgpu_ci_send_msg_to_smc(adev, msg);
1789 }
1790
1791 static PPSMC_Result amdgpu_ci_send_msg_to_smc_return_parameter(struct amdgpu_device *adev,
1792                                                         PPSMC_Msg msg, u32 *parameter)
1793 {
1794         PPSMC_Result smc_result;
1795
1796         smc_result = amdgpu_ci_send_msg_to_smc(adev, msg);
1797
1798         if ((smc_result == PPSMC_Result_OK) && parameter)
1799                 *parameter = RREG32(mmSMC_MSG_ARG_0);
1800
1801         return smc_result;
1802 }
1803
1804 static int ci_dpm_force_state_sclk(struct amdgpu_device *adev, u32 n)
1805 {
1806         struct ci_power_info *pi = ci_get_pi(adev);
1807
1808         if (!pi->sclk_dpm_key_disabled) {
1809                 PPSMC_Result smc_result =
1810                         amdgpu_ci_send_msg_to_smc_with_parameter(adev, PPSMC_MSG_SCLKDPM_SetEnabledMask, 1 << n);
1811                 if (smc_result != PPSMC_Result_OK)
1812                         return -EINVAL;
1813         }
1814
1815         return 0;
1816 }
1817
1818 static int ci_dpm_force_state_mclk(struct amdgpu_device *adev, u32 n)
1819 {
1820         struct ci_power_info *pi = ci_get_pi(adev);
1821
1822         if (!pi->mclk_dpm_key_disabled) {
1823                 PPSMC_Result smc_result =
1824                         amdgpu_ci_send_msg_to_smc_with_parameter(adev, PPSMC_MSG_MCLKDPM_SetEnabledMask, 1 << n);
1825                 if (smc_result != PPSMC_Result_OK)
1826                         return -EINVAL;
1827         }
1828
1829         return 0;
1830 }
1831
1832 static int ci_dpm_force_state_pcie(struct amdgpu_device *adev, u32 n)
1833 {
1834         struct ci_power_info *pi = ci_get_pi(adev);
1835
1836         if (!pi->pcie_dpm_key_disabled) {
1837                 PPSMC_Result smc_result =
1838                         amdgpu_ci_send_msg_to_smc_with_parameter(adev, PPSMC_MSG_PCIeDPM_ForceLevel, n);
1839                 if (smc_result != PPSMC_Result_OK)
1840                         return -EINVAL;
1841         }
1842
1843         return 0;
1844 }
1845
1846 static int ci_set_power_limit(struct amdgpu_device *adev, u32 n)
1847 {
1848         struct ci_power_info *pi = ci_get_pi(adev);
1849
1850         if (pi->power_containment_features & POWERCONTAINMENT_FEATURE_PkgPwrLimit) {
1851                 PPSMC_Result smc_result =
1852                         amdgpu_ci_send_msg_to_smc_with_parameter(adev, PPSMC_MSG_PkgPwrSetLimit, n);
1853                 if (smc_result != PPSMC_Result_OK)
1854                         return -EINVAL;
1855         }
1856
1857         return 0;
1858 }
1859
1860 static int ci_set_overdrive_target_tdp(struct amdgpu_device *adev,
1861                                        u32 target_tdp)
1862 {
1863         PPSMC_Result smc_result =
1864                 amdgpu_ci_send_msg_to_smc_with_parameter(adev, PPSMC_MSG_OverDriveSetTargetTdp, target_tdp);
1865         if (smc_result != PPSMC_Result_OK)
1866                 return -EINVAL;
1867         return 0;
1868 }
1869
1870 #if 0
1871 static int ci_set_boot_state(struct amdgpu_device *adev)
1872 {
1873         return ci_enable_sclk_mclk_dpm(adev, false);
1874 }
1875 #endif
1876
1877 static u32 ci_get_average_sclk_freq(struct amdgpu_device *adev)
1878 {
1879         u32 sclk_freq;
1880         PPSMC_Result smc_result =
1881                 amdgpu_ci_send_msg_to_smc_return_parameter(adev,
1882                                                     PPSMC_MSG_API_GetSclkFrequency,
1883                                                     &sclk_freq);
1884         if (smc_result != PPSMC_Result_OK)
1885                 sclk_freq = 0;
1886
1887         return sclk_freq;
1888 }
1889
1890 static u32 ci_get_average_mclk_freq(struct amdgpu_device *adev)
1891 {
1892         u32 mclk_freq;
1893         PPSMC_Result smc_result =
1894                 amdgpu_ci_send_msg_to_smc_return_parameter(adev,
1895                                                     PPSMC_MSG_API_GetMclkFrequency,
1896                                                     &mclk_freq);
1897         if (smc_result != PPSMC_Result_OK)
1898                 mclk_freq = 0;
1899
1900         return mclk_freq;
1901 }
1902
1903 static void ci_dpm_start_smc(struct amdgpu_device *adev)
1904 {
1905         int i;
1906
1907         amdgpu_ci_program_jump_on_start(adev);
1908         amdgpu_ci_start_smc_clock(adev);
1909         amdgpu_ci_start_smc(adev);
1910         for (i = 0; i < adev->usec_timeout; i++) {
1911                 if (RREG32_SMC(ixFIRMWARE_FLAGS) & FIRMWARE_FLAGS__INTERRUPTS_ENABLED_MASK)
1912                         break;
1913         }
1914 }
1915
1916 static void ci_dpm_stop_smc(struct amdgpu_device *adev)
1917 {
1918         amdgpu_ci_reset_smc(adev);
1919         amdgpu_ci_stop_smc_clock(adev);
1920 }
1921
1922 static int ci_process_firmware_header(struct amdgpu_device *adev)
1923 {
1924         struct ci_power_info *pi = ci_get_pi(adev);
1925         u32 tmp;
1926         int ret;
1927
1928         ret = amdgpu_ci_read_smc_sram_dword(adev,
1929                                      SMU7_FIRMWARE_HEADER_LOCATION +
1930                                      offsetof(SMU7_Firmware_Header, DpmTable),
1931                                      &tmp, pi->sram_end);
1932         if (ret)
1933                 return ret;
1934
1935         pi->dpm_table_start = tmp;
1936
1937         ret = amdgpu_ci_read_smc_sram_dword(adev,
1938                                      SMU7_FIRMWARE_HEADER_LOCATION +
1939                                      offsetof(SMU7_Firmware_Header, SoftRegisters),
1940                                      &tmp, pi->sram_end);
1941         if (ret)
1942                 return ret;
1943
1944         pi->soft_regs_start = tmp;
1945
1946         ret = amdgpu_ci_read_smc_sram_dword(adev,
1947                                      SMU7_FIRMWARE_HEADER_LOCATION +
1948                                      offsetof(SMU7_Firmware_Header, mcRegisterTable),
1949                                      &tmp, pi->sram_end);
1950         if (ret)
1951                 return ret;
1952
1953         pi->mc_reg_table_start = tmp;
1954
1955         ret = amdgpu_ci_read_smc_sram_dword(adev,
1956                                      SMU7_FIRMWARE_HEADER_LOCATION +
1957                                      offsetof(SMU7_Firmware_Header, FanTable),
1958                                      &tmp, pi->sram_end);
1959         if (ret)
1960                 return ret;
1961
1962         pi->fan_table_start = tmp;
1963
1964         ret = amdgpu_ci_read_smc_sram_dword(adev,
1965                                      SMU7_FIRMWARE_HEADER_LOCATION +
1966                                      offsetof(SMU7_Firmware_Header, mcArbDramTimingTable),
1967                                      &tmp, pi->sram_end);
1968         if (ret)
1969                 return ret;
1970
1971         pi->arb_table_start = tmp;
1972
1973         return 0;
1974 }
1975
1976 static void ci_read_clock_registers(struct amdgpu_device *adev)
1977 {
1978         struct ci_power_info *pi = ci_get_pi(adev);
1979
1980         pi->clock_registers.cg_spll_func_cntl =
1981                 RREG32_SMC(ixCG_SPLL_FUNC_CNTL);
1982         pi->clock_registers.cg_spll_func_cntl_2 =
1983                 RREG32_SMC(ixCG_SPLL_FUNC_CNTL_2);
1984         pi->clock_registers.cg_spll_func_cntl_3 =
1985                 RREG32_SMC(ixCG_SPLL_FUNC_CNTL_3);
1986         pi->clock_registers.cg_spll_func_cntl_4 =
1987                 RREG32_SMC(ixCG_SPLL_FUNC_CNTL_4);
1988         pi->clock_registers.cg_spll_spread_spectrum =
1989                 RREG32_SMC(ixCG_SPLL_SPREAD_SPECTRUM);
1990         pi->clock_registers.cg_spll_spread_spectrum_2 =
1991                 RREG32_SMC(ixCG_SPLL_SPREAD_SPECTRUM_2);
1992         pi->clock_registers.dll_cntl = RREG32(mmDLL_CNTL);
1993         pi->clock_registers.mclk_pwrmgt_cntl = RREG32(mmMCLK_PWRMGT_CNTL);
1994         pi->clock_registers.mpll_ad_func_cntl = RREG32(mmMPLL_AD_FUNC_CNTL);
1995         pi->clock_registers.mpll_dq_func_cntl = RREG32(mmMPLL_DQ_FUNC_CNTL);
1996         pi->clock_registers.mpll_func_cntl = RREG32(mmMPLL_FUNC_CNTL);
1997         pi->clock_registers.mpll_func_cntl_1 = RREG32(mmMPLL_FUNC_CNTL_1);
1998         pi->clock_registers.mpll_func_cntl_2 = RREG32(mmMPLL_FUNC_CNTL_2);
1999         pi->clock_registers.mpll_ss1 = RREG32(mmMPLL_SS1);
2000         pi->clock_registers.mpll_ss2 = RREG32(mmMPLL_SS2);
2001 }
2002
2003 static void ci_init_sclk_t(struct amdgpu_device *adev)
2004 {
2005         struct ci_power_info *pi = ci_get_pi(adev);
2006
2007         pi->low_sclk_interrupt_t = 0;
2008 }
2009
2010 static void ci_enable_thermal_protection(struct amdgpu_device *adev,
2011                                          bool enable)
2012 {
2013         u32 tmp = RREG32_SMC(ixGENERAL_PWRMGT);
2014
2015         if (enable)
2016                 tmp &= ~GENERAL_PWRMGT__THERMAL_PROTECTION_DIS_MASK;
2017         else
2018                 tmp |= GENERAL_PWRMGT__THERMAL_PROTECTION_DIS_MASK;
2019         WREG32_SMC(ixGENERAL_PWRMGT, tmp);
2020 }
2021
2022 static void ci_enable_acpi_power_management(struct amdgpu_device *adev)
2023 {
2024         u32 tmp = RREG32_SMC(ixGENERAL_PWRMGT);
2025
2026         tmp |= GENERAL_PWRMGT__STATIC_PM_EN_MASK;
2027
2028         WREG32_SMC(ixGENERAL_PWRMGT, tmp);
2029 }
2030
2031 #if 0
2032 static int ci_enter_ulp_state(struct amdgpu_device *adev)
2033 {
2034
2035         WREG32(mmSMC_MESSAGE_0, PPSMC_MSG_SwitchToMinimumPower);
2036
2037         udelay(25000);
2038
2039         return 0;
2040 }
2041
2042 static int ci_exit_ulp_state(struct amdgpu_device *adev)
2043 {
2044         int i;
2045
2046         WREG32(mmSMC_MESSAGE_0, PPSMC_MSG_ResumeFromMinimumPower);
2047
2048         udelay(7000);
2049
2050         for (i = 0; i < adev->usec_timeout; i++) {
2051                 if (RREG32(mmSMC_RESP_0) == 1)
2052                         break;
2053                 udelay(1000);
2054         }
2055
2056         return 0;
2057 }
2058 #endif
2059
2060 static int ci_notify_smc_display_change(struct amdgpu_device *adev,
2061                                         bool has_display)
2062 {
2063         PPSMC_Msg msg = has_display ? PPSMC_MSG_HasDisplay : PPSMC_MSG_NoDisplay;
2064
2065         return (amdgpu_ci_send_msg_to_smc(adev, msg) == PPSMC_Result_OK) ?  0 : -EINVAL;
2066 }
2067
2068 static int ci_enable_ds_master_switch(struct amdgpu_device *adev,
2069                                       bool enable)
2070 {
2071         struct ci_power_info *pi = ci_get_pi(adev);
2072
2073         if (enable) {
2074                 if (pi->caps_sclk_ds) {
2075                         if (amdgpu_ci_send_msg_to_smc(adev, PPSMC_MSG_MASTER_DeepSleep_ON) != PPSMC_Result_OK)
2076                                 return -EINVAL;
2077                 } else {
2078                         if (amdgpu_ci_send_msg_to_smc(adev, PPSMC_MSG_MASTER_DeepSleep_OFF) != PPSMC_Result_OK)
2079                                 return -EINVAL;
2080                 }
2081         } else {
2082                 if (pi->caps_sclk_ds) {
2083                         if (amdgpu_ci_send_msg_to_smc(adev, PPSMC_MSG_MASTER_DeepSleep_OFF) != PPSMC_Result_OK)
2084                                 return -EINVAL;
2085                 }
2086         }
2087
2088         return 0;
2089 }
2090
2091 static void ci_program_display_gap(struct amdgpu_device *adev)
2092 {
2093         u32 tmp = RREG32_SMC(ixCG_DISPLAY_GAP_CNTL);
2094         u32 pre_vbi_time_in_us;
2095         u32 frame_time_in_us;
2096         u32 ref_clock = adev->clock.spll.reference_freq;
2097         u32 refresh_rate = amdgpu_dpm_get_vrefresh(adev);
2098         u32 vblank_time = amdgpu_dpm_get_vblank_time(adev);
2099
2100         tmp &= ~CG_DISPLAY_GAP_CNTL__DISP_GAP_MASK;
2101         if (adev->pm.dpm.new_active_crtc_count > 0)
2102                 tmp |= (AMDGPU_PM_DISPLAY_GAP_VBLANK_OR_WM << CG_DISPLAY_GAP_CNTL__DISP_GAP__SHIFT);
2103         else
2104                 tmp |= (AMDGPU_PM_DISPLAY_GAP_IGNORE << CG_DISPLAY_GAP_CNTL__DISP_GAP__SHIFT);
2105         WREG32_SMC(ixCG_DISPLAY_GAP_CNTL, tmp);
2106
2107         if (refresh_rate == 0)
2108                 refresh_rate = 60;
2109         if (vblank_time == 0xffffffff)
2110                 vblank_time = 500;
2111         frame_time_in_us = 1000000 / refresh_rate;
2112         pre_vbi_time_in_us =
2113                 frame_time_in_us - 200 - vblank_time;
2114         tmp = pre_vbi_time_in_us * (ref_clock / 100);
2115
2116         WREG32_SMC(ixCG_DISPLAY_GAP_CNTL2, tmp);
2117         ci_write_smc_soft_register(adev, offsetof(SMU7_SoftRegisters, PreVBlankGap), 0x64);
2118         ci_write_smc_soft_register(adev, offsetof(SMU7_SoftRegisters, VBlankTimeout), (frame_time_in_us - pre_vbi_time_in_us));
2119
2120
2121         ci_notify_smc_display_change(adev, (adev->pm.dpm.new_active_crtc_count == 1));
2122
2123 }
2124
2125 static void ci_enable_spread_spectrum(struct amdgpu_device *adev, bool enable)
2126 {
2127         struct ci_power_info *pi = ci_get_pi(adev);
2128         u32 tmp;
2129
2130         if (enable) {
2131                 if (pi->caps_sclk_ss_support) {
2132                         tmp = RREG32_SMC(ixGENERAL_PWRMGT);
2133                         tmp |= GENERAL_PWRMGT__DYN_SPREAD_SPECTRUM_EN_MASK;
2134                         WREG32_SMC(ixGENERAL_PWRMGT, tmp);
2135                 }
2136         } else {
2137                 tmp = RREG32_SMC(ixCG_SPLL_SPREAD_SPECTRUM);
2138                 tmp &= ~CG_SPLL_SPREAD_SPECTRUM__SSEN_MASK;
2139                 WREG32_SMC(ixCG_SPLL_SPREAD_SPECTRUM, tmp);
2140
2141                 tmp = RREG32_SMC(ixGENERAL_PWRMGT);
2142                 tmp &= ~GENERAL_PWRMGT__DYN_SPREAD_SPECTRUM_EN_MASK;
2143                 WREG32_SMC(ixGENERAL_PWRMGT, tmp);
2144         }
2145 }
2146
2147 static void ci_program_sstp(struct amdgpu_device *adev)
2148 {
2149         WREG32_SMC(ixCG_STATIC_SCREEN_PARAMETER,
2150         ((CISLANDS_SSTU_DFLT << CG_STATIC_SCREEN_PARAMETER__STATIC_SCREEN_THRESHOLD_UNIT__SHIFT) |
2151          (CISLANDS_SST_DFLT << CG_STATIC_SCREEN_PARAMETER__STATIC_SCREEN_THRESHOLD__SHIFT)));
2152 }
2153
2154 static void ci_enable_display_gap(struct amdgpu_device *adev)
2155 {
2156         u32 tmp = RREG32_SMC(ixCG_DISPLAY_GAP_CNTL);
2157
2158         tmp &= ~(CG_DISPLAY_GAP_CNTL__DISP_GAP_MASK |
2159                         CG_DISPLAY_GAP_CNTL__DISP_GAP_MCHG_MASK);
2160         tmp |= ((AMDGPU_PM_DISPLAY_GAP_IGNORE << CG_DISPLAY_GAP_CNTL__DISP_GAP__SHIFT) |
2161                 (AMDGPU_PM_DISPLAY_GAP_VBLANK << CG_DISPLAY_GAP_CNTL__DISP_GAP_MCHG__SHIFT));
2162
2163         WREG32_SMC(ixCG_DISPLAY_GAP_CNTL, tmp);
2164 }
2165
2166 static void ci_program_vc(struct amdgpu_device *adev)
2167 {
2168         u32 tmp;
2169
2170         tmp = RREG32_SMC(ixSCLK_PWRMGT_CNTL);
2171         tmp &= ~(SCLK_PWRMGT_CNTL__RESET_SCLK_CNT_MASK | SCLK_PWRMGT_CNTL__RESET_BUSY_CNT_MASK);
2172         WREG32_SMC(ixSCLK_PWRMGT_CNTL, tmp);
2173
2174         WREG32_SMC(ixCG_FREQ_TRAN_VOTING_0, CISLANDS_VRC_DFLT0);
2175         WREG32_SMC(ixCG_FREQ_TRAN_VOTING_1, CISLANDS_VRC_DFLT1);
2176         WREG32_SMC(ixCG_FREQ_TRAN_VOTING_2, CISLANDS_VRC_DFLT2);
2177         WREG32_SMC(ixCG_FREQ_TRAN_VOTING_3, CISLANDS_VRC_DFLT3);
2178         WREG32_SMC(ixCG_FREQ_TRAN_VOTING_4, CISLANDS_VRC_DFLT4);
2179         WREG32_SMC(ixCG_FREQ_TRAN_VOTING_5, CISLANDS_VRC_DFLT5);
2180         WREG32_SMC(ixCG_FREQ_TRAN_VOTING_6, CISLANDS_VRC_DFLT6);
2181         WREG32_SMC(ixCG_FREQ_TRAN_VOTING_7, CISLANDS_VRC_DFLT7);
2182 }
2183
2184 static void ci_clear_vc(struct amdgpu_device *adev)
2185 {
2186         u32 tmp;
2187
2188         tmp = RREG32_SMC(ixSCLK_PWRMGT_CNTL);
2189         tmp |= (SCLK_PWRMGT_CNTL__RESET_SCLK_CNT_MASK | SCLK_PWRMGT_CNTL__RESET_BUSY_CNT_MASK);
2190         WREG32_SMC(ixSCLK_PWRMGT_CNTL, tmp);
2191
2192         WREG32_SMC(ixCG_FREQ_TRAN_VOTING_0, 0);
2193         WREG32_SMC(ixCG_FREQ_TRAN_VOTING_1, 0);
2194         WREG32_SMC(ixCG_FREQ_TRAN_VOTING_2, 0);
2195         WREG32_SMC(ixCG_FREQ_TRAN_VOTING_3, 0);
2196         WREG32_SMC(ixCG_FREQ_TRAN_VOTING_4, 0);
2197         WREG32_SMC(ixCG_FREQ_TRAN_VOTING_5, 0);
2198         WREG32_SMC(ixCG_FREQ_TRAN_VOTING_6, 0);
2199         WREG32_SMC(ixCG_FREQ_TRAN_VOTING_7, 0);
2200 }
2201
2202 static int ci_upload_firmware(struct amdgpu_device *adev)
2203 {
2204         struct ci_power_info *pi = ci_get_pi(adev);
2205         int i, ret;
2206
2207         if (amdgpu_ci_is_smc_running(adev)) {
2208                 DRM_INFO("smc is running, no need to load smc firmware\n");
2209                 return 0;
2210         }
2211
2212         for (i = 0; i < adev->usec_timeout; i++) {
2213                 if (RREG32_SMC(ixRCU_UC_EVENTS) & RCU_UC_EVENTS__boot_seq_done_MASK)
2214                         break;
2215         }
2216         WREG32_SMC(ixSMC_SYSCON_MISC_CNTL, 1);
2217
2218         amdgpu_ci_stop_smc_clock(adev);
2219         amdgpu_ci_reset_smc(adev);
2220
2221         ret = amdgpu_ci_load_smc_ucode(adev, pi->sram_end);
2222
2223         return ret;
2224
2225 }
2226
2227 static int ci_get_svi2_voltage_table(struct amdgpu_device *adev,
2228                                      struct amdgpu_clock_voltage_dependency_table *voltage_dependency_table,
2229                                      struct atom_voltage_table *voltage_table)
2230 {
2231         u32 i;
2232
2233         if (voltage_dependency_table == NULL)
2234                 return -EINVAL;
2235
2236         voltage_table->mask_low = 0;
2237         voltage_table->phase_delay = 0;
2238
2239         voltage_table->count = voltage_dependency_table->count;
2240         for (i = 0; i < voltage_table->count; i++) {
2241                 voltage_table->entries[i].value = voltage_dependency_table->entries[i].v;
2242                 voltage_table->entries[i].smio_low = 0;
2243         }
2244
2245         return 0;
2246 }
2247
2248 static int ci_construct_voltage_tables(struct amdgpu_device *adev)
2249 {
2250         struct ci_power_info *pi = ci_get_pi(adev);
2251         int ret;
2252
2253         if (pi->voltage_control == CISLANDS_VOLTAGE_CONTROL_BY_GPIO) {
2254                 ret = amdgpu_atombios_get_voltage_table(adev, VOLTAGE_TYPE_VDDC,
2255                                                         VOLTAGE_OBJ_GPIO_LUT,
2256                                                         &pi->vddc_voltage_table);
2257                 if (ret)
2258                         return ret;
2259         } else if (pi->voltage_control == CISLANDS_VOLTAGE_CONTROL_BY_SVID2) {
2260                 ret = ci_get_svi2_voltage_table(adev,
2261                                                 &adev->pm.dpm.dyn_state.vddc_dependency_on_mclk,
2262                                                 &pi->vddc_voltage_table);
2263                 if (ret)
2264                         return ret;
2265         }
2266
2267         if (pi->vddc_voltage_table.count > SMU7_MAX_LEVELS_VDDC)
2268                 ci_trim_voltage_table_to_fit_state_table(adev, SMU7_MAX_LEVELS_VDDC,
2269                                                          &pi->vddc_voltage_table);
2270
2271         if (pi->vddci_control == CISLANDS_VOLTAGE_CONTROL_BY_GPIO) {
2272                 ret = amdgpu_atombios_get_voltage_table(adev, VOLTAGE_TYPE_VDDCI,
2273                                                         VOLTAGE_OBJ_GPIO_LUT,
2274                                                         &pi->vddci_voltage_table);
2275                 if (ret)
2276                         return ret;
2277         } else if (pi->vddci_control == CISLANDS_VOLTAGE_CONTROL_BY_SVID2) {
2278                 ret = ci_get_svi2_voltage_table(adev,
2279                                                 &adev->pm.dpm.dyn_state.vddci_dependency_on_mclk,
2280                                                 &pi->vddci_voltage_table);
2281                 if (ret)
2282                         return ret;
2283         }
2284
2285         if (pi->vddci_voltage_table.count > SMU7_MAX_LEVELS_VDDCI)
2286                 ci_trim_voltage_table_to_fit_state_table(adev, SMU7_MAX_LEVELS_VDDCI,
2287                                                          &pi->vddci_voltage_table);
2288
2289         if (pi->mvdd_control == CISLANDS_VOLTAGE_CONTROL_BY_GPIO) {
2290                 ret = amdgpu_atombios_get_voltage_table(adev, VOLTAGE_TYPE_MVDDC,
2291                                                         VOLTAGE_OBJ_GPIO_LUT,
2292                                                         &pi->mvdd_voltage_table);
2293                 if (ret)
2294                         return ret;
2295         } else if (pi->mvdd_control == CISLANDS_VOLTAGE_CONTROL_BY_SVID2) {
2296                 ret = ci_get_svi2_voltage_table(adev,
2297                                                 &adev->pm.dpm.dyn_state.mvdd_dependency_on_mclk,
2298                                                 &pi->mvdd_voltage_table);
2299                 if (ret)
2300                         return ret;
2301         }
2302
2303         if (pi->mvdd_voltage_table.count > SMU7_MAX_LEVELS_MVDD)
2304                 ci_trim_voltage_table_to_fit_state_table(adev, SMU7_MAX_LEVELS_MVDD,
2305                                                          &pi->mvdd_voltage_table);
2306
2307         return 0;
2308 }
2309
2310 static void ci_populate_smc_voltage_table(struct amdgpu_device *adev,
2311                                           struct atom_voltage_table_entry *voltage_table,
2312                                           SMU7_Discrete_VoltageLevel *smc_voltage_table)
2313 {
2314         int ret;
2315
2316         ret = ci_get_std_voltage_value_sidd(adev, voltage_table,
2317                                             &smc_voltage_table->StdVoltageHiSidd,
2318                                             &smc_voltage_table->StdVoltageLoSidd);
2319
2320         if (ret) {
2321                 smc_voltage_table->StdVoltageHiSidd = voltage_table->value * VOLTAGE_SCALE;
2322                 smc_voltage_table->StdVoltageLoSidd = voltage_table->value * VOLTAGE_SCALE;
2323         }
2324
2325         smc_voltage_table->Voltage = cpu_to_be16(voltage_table->value * VOLTAGE_SCALE);
2326         smc_voltage_table->StdVoltageHiSidd =
2327                 cpu_to_be16(smc_voltage_table->StdVoltageHiSidd);
2328         smc_voltage_table->StdVoltageLoSidd =
2329                 cpu_to_be16(smc_voltage_table->StdVoltageLoSidd);
2330 }
2331
2332 static int ci_populate_smc_vddc_table(struct amdgpu_device *adev,
2333                                       SMU7_Discrete_DpmTable *table)
2334 {
2335         struct ci_power_info *pi = ci_get_pi(adev);
2336         unsigned int count;
2337
2338         table->VddcLevelCount = pi->vddc_voltage_table.count;
2339         for (count = 0; count < table->VddcLevelCount; count++) {
2340                 ci_populate_smc_voltage_table(adev,
2341                                               &pi->vddc_voltage_table.entries[count],
2342                                               &table->VddcLevel[count]);
2343
2344                 if (pi->voltage_control == CISLANDS_VOLTAGE_CONTROL_BY_GPIO)
2345                         table->VddcLevel[count].Smio |=
2346                                 pi->vddc_voltage_table.entries[count].smio_low;
2347                 else
2348                         table->VddcLevel[count].Smio = 0;
2349         }
2350         table->VddcLevelCount = cpu_to_be32(table->VddcLevelCount);
2351
2352         return 0;
2353 }
2354
2355 static int ci_populate_smc_vddci_table(struct amdgpu_device *adev,
2356                                        SMU7_Discrete_DpmTable *table)
2357 {
2358         unsigned int count;
2359         struct ci_power_info *pi = ci_get_pi(adev);
2360
2361         table->VddciLevelCount = pi->vddci_voltage_table.count;
2362         for (count = 0; count < table->VddciLevelCount; count++) {
2363                 ci_populate_smc_voltage_table(adev,
2364                                               &pi->vddci_voltage_table.entries[count],
2365                                               &table->VddciLevel[count]);
2366
2367                 if (pi->vddci_control == CISLANDS_VOLTAGE_CONTROL_BY_GPIO)
2368                         table->VddciLevel[count].Smio |=
2369                                 pi->vddci_voltage_table.entries[count].smio_low;
2370                 else
2371                         table->VddciLevel[count].Smio = 0;
2372         }
2373         table->VddciLevelCount = cpu_to_be32(table->VddciLevelCount);
2374
2375         return 0;
2376 }
2377
2378 static int ci_populate_smc_mvdd_table(struct amdgpu_device *adev,
2379                                       SMU7_Discrete_DpmTable *table)
2380 {
2381         struct ci_power_info *pi = ci_get_pi(adev);
2382         unsigned int count;
2383
2384         table->MvddLevelCount = pi->mvdd_voltage_table.count;
2385         for (count = 0; count < table->MvddLevelCount; count++) {
2386                 ci_populate_smc_voltage_table(adev,
2387                                               &pi->mvdd_voltage_table.entries[count],
2388                                               &table->MvddLevel[count]);
2389
2390                 if (pi->mvdd_control == CISLANDS_VOLTAGE_CONTROL_BY_GPIO)
2391                         table->MvddLevel[count].Smio |=
2392                                 pi->mvdd_voltage_table.entries[count].smio_low;
2393                 else
2394                         table->MvddLevel[count].Smio = 0;
2395         }
2396         table->MvddLevelCount = cpu_to_be32(table->MvddLevelCount);
2397
2398         return 0;
2399 }
2400
2401 static int ci_populate_smc_voltage_tables(struct amdgpu_device *adev,
2402                                           SMU7_Discrete_DpmTable *table)
2403 {
2404         int ret;
2405
2406         ret = ci_populate_smc_vddc_table(adev, table);
2407         if (ret)
2408                 return ret;
2409
2410         ret = ci_populate_smc_vddci_table(adev, table);
2411         if (ret)
2412                 return ret;
2413
2414         ret = ci_populate_smc_mvdd_table(adev, table);
2415         if (ret)
2416                 return ret;
2417
2418         return 0;
2419 }
2420
2421 static int ci_populate_mvdd_value(struct amdgpu_device *adev, u32 mclk,
2422                                   SMU7_Discrete_VoltageLevel *voltage)
2423 {
2424         struct ci_power_info *pi = ci_get_pi(adev);
2425         u32 i = 0;
2426
2427         if (pi->mvdd_control != CISLANDS_VOLTAGE_CONTROL_NONE) {
2428                 for (i = 0; i < adev->pm.dpm.dyn_state.mvdd_dependency_on_mclk.count; i++) {
2429                         if (mclk <= adev->pm.dpm.dyn_state.mvdd_dependency_on_mclk.entries[i].clk) {
2430                                 voltage->Voltage = pi->mvdd_voltage_table.entries[i].value;
2431                                 break;
2432                         }
2433                 }
2434
2435                 if (i >= adev->pm.dpm.dyn_state.mvdd_dependency_on_mclk.count)
2436                         return -EINVAL;
2437         }
2438
2439         return -EINVAL;
2440 }
2441
2442 static int ci_get_std_voltage_value_sidd(struct amdgpu_device *adev,
2443                                          struct atom_voltage_table_entry *voltage_table,
2444                                          u16 *std_voltage_hi_sidd, u16 *std_voltage_lo_sidd)
2445 {
2446         u16 v_index, idx;
2447         bool voltage_found = false;
2448         *std_voltage_hi_sidd = voltage_table->value * VOLTAGE_SCALE;
2449         *std_voltage_lo_sidd = voltage_table->value * VOLTAGE_SCALE;
2450
2451         if (adev->pm.dpm.dyn_state.vddc_dependency_on_sclk.entries == NULL)
2452                 return -EINVAL;
2453
2454         if (adev->pm.dpm.dyn_state.cac_leakage_table.entries) {
2455                 for (v_index = 0; (u32)v_index < adev->pm.dpm.dyn_state.vddc_dependency_on_sclk.count; v_index++) {
2456                         if (voltage_table->value ==
2457                             adev->pm.dpm.dyn_state.vddc_dependency_on_sclk.entries[v_index].v) {
2458                                 voltage_found = true;
2459                                 if ((u32)v_index < adev->pm.dpm.dyn_state.cac_leakage_table.count)
2460                                         idx = v_index;
2461                                 else
2462                                         idx = adev->pm.dpm.dyn_state.cac_leakage_table.count - 1;
2463                                 *std_voltage_lo_sidd =
2464                                         adev->pm.dpm.dyn_state.cac_leakage_table.entries[idx].vddc * VOLTAGE_SCALE;
2465                                 *std_voltage_hi_sidd =
2466                                         adev->pm.dpm.dyn_state.cac_leakage_table.entries[idx].leakage * VOLTAGE_SCALE;
2467                                 break;
2468                         }
2469                 }
2470
2471                 if (!voltage_found) {
2472                         for (v_index = 0; (u32)v_index < adev->pm.dpm.dyn_state.vddc_dependency_on_sclk.count; v_index++) {
2473                                 if (voltage_table->value <=
2474                                     adev->pm.dpm.dyn_state.vddc_dependency_on_sclk.entries[v_index].v) {
2475                                         voltage_found = true;
2476                                         if ((u32)v_index < adev->pm.dpm.dyn_state.cac_leakage_table.count)
2477                                                 idx = v_index;
2478                                         else
2479                                                 idx = adev->pm.dpm.dyn_state.cac_leakage_table.count - 1;
2480                                         *std_voltage_lo_sidd =
2481                                                 adev->pm.dpm.dyn_state.cac_leakage_table.entries[idx].vddc * VOLTAGE_SCALE;
2482                                         *std_voltage_hi_sidd =
2483                                                 adev->pm.dpm.dyn_state.cac_leakage_table.entries[idx].leakage * VOLTAGE_SCALE;
2484                                         break;
2485                                 }
2486                         }
2487                 }
2488         }
2489
2490         return 0;
2491 }
2492
2493 static void ci_populate_phase_value_based_on_sclk(struct amdgpu_device *adev,
2494                                                   const struct amdgpu_phase_shedding_limits_table *limits,
2495                                                   u32 sclk,
2496                                                   u32 *phase_shedding)
2497 {
2498         unsigned int i;
2499
2500         *phase_shedding = 1;
2501
2502         for (i = 0; i < limits->count; i++) {
2503                 if (sclk < limits->entries[i].sclk) {
2504                         *phase_shedding = i;
2505                         break;
2506                 }
2507         }
2508 }
2509
2510 static void ci_populate_phase_value_based_on_mclk(struct amdgpu_device *adev,
2511                                                   const struct amdgpu_phase_shedding_limits_table *limits,
2512                                                   u32 mclk,
2513                                                   u32 *phase_shedding)
2514 {
2515         unsigned int i;
2516
2517         *phase_shedding = 1;
2518
2519         for (i = 0; i < limits->count; i++) {
2520                 if (mclk < limits->entries[i].mclk) {
2521                         *phase_shedding = i;
2522                         break;
2523                 }
2524         }
2525 }
2526
2527 static int ci_init_arb_table_index(struct amdgpu_device *adev)
2528 {
2529         struct ci_power_info *pi = ci_get_pi(adev);
2530         u32 tmp;
2531         int ret;
2532
2533         ret = amdgpu_ci_read_smc_sram_dword(adev, pi->arb_table_start,
2534                                      &tmp, pi->sram_end);
2535         if (ret)
2536                 return ret;
2537
2538         tmp &= 0x00FFFFFF;
2539         tmp |= MC_CG_ARB_FREQ_F1 << 24;
2540
2541         return amdgpu_ci_write_smc_sram_dword(adev, pi->arb_table_start,
2542                                        tmp, pi->sram_end);
2543 }
2544
2545 static int ci_get_dependency_volt_by_clk(struct amdgpu_device *adev,
2546                                          struct amdgpu_clock_voltage_dependency_table *allowed_clock_voltage_table,
2547                                          u32 clock, u32 *voltage)
2548 {
2549         u32 i = 0;
2550
2551         if (allowed_clock_voltage_table->count == 0)
2552                 return -EINVAL;
2553
2554         for (i = 0; i < allowed_clock_voltage_table->count; i++) {
2555                 if (allowed_clock_voltage_table->entries[i].clk >= clock) {
2556                         *voltage = allowed_clock_voltage_table->entries[i].v;
2557                         return 0;
2558                 }
2559         }
2560
2561         *voltage = allowed_clock_voltage_table->entries[i-1].v;
2562
2563         return 0;
2564 }
2565
2566 static u8 ci_get_sleep_divider_id_from_clock(u32 sclk, u32 min_sclk_in_sr)
2567 {
2568         u32 i;
2569         u32 tmp;
2570         u32 min = max(min_sclk_in_sr, (u32)CISLAND_MINIMUM_ENGINE_CLOCK);
2571
2572         if (sclk < min)
2573                 return 0;
2574
2575         for (i = CISLAND_MAX_DEEPSLEEP_DIVIDER_ID;  ; i--) {
2576                 tmp = sclk >> i;
2577                 if (tmp >= min || i == 0)
2578                         break;
2579         }
2580
2581         return (u8)i;
2582 }
2583
2584 static int ci_initial_switch_from_arb_f0_to_f1(struct amdgpu_device *adev)
2585 {
2586         return ci_copy_and_switch_arb_sets(adev, MC_CG_ARB_FREQ_F0, MC_CG_ARB_FREQ_F1);
2587 }
2588
2589 static int ci_reset_to_default(struct amdgpu_device *adev)
2590 {
2591         return (amdgpu_ci_send_msg_to_smc(adev, PPSMC_MSG_ResetToDefaults) == PPSMC_Result_OK) ?
2592                 0 : -EINVAL;
2593 }
2594
2595 static int ci_force_switch_to_arb_f0(struct amdgpu_device *adev)
2596 {
2597         u32 tmp;
2598
2599         tmp = (RREG32_SMC(ixSMC_SCRATCH9) & 0x0000ff00) >> 8;
2600
2601         if (tmp == MC_CG_ARB_FREQ_F0)
2602                 return 0;
2603
2604         return ci_copy_and_switch_arb_sets(adev, tmp, MC_CG_ARB_FREQ_F0);
2605 }
2606
2607 static void ci_register_patching_mc_arb(struct amdgpu_device *adev,
2608                                         const u32 engine_clock,
2609                                         const u32 memory_clock,
2610                                         u32 *dram_timimg2)
2611 {
2612         bool patch;
2613         u32 tmp, tmp2;
2614
2615         tmp = RREG32(mmMC_SEQ_MISC0);
2616         patch = ((tmp & 0x0000f00) == 0x300) ? true : false;
2617
2618         if (patch &&
2619             ((adev->pdev->device == 0x67B0) ||
2620              (adev->pdev->device == 0x67B1))) {
2621                 if ((memory_clock > 100000) && (memory_clock <= 125000)) {
2622                         tmp2 = (((0x31 * engine_clock) / 125000) - 1) & 0xff;
2623                         *dram_timimg2 &= ~0x00ff0000;
2624                         *dram_timimg2 |= tmp2 << 16;
2625                 } else if ((memory_clock > 125000) && (memory_clock <= 137500)) {
2626                         tmp2 = (((0x36 * engine_clock) / 137500) - 1) & 0xff;
2627                         *dram_timimg2 &= ~0x00ff0000;
2628                         *dram_timimg2 |= tmp2 << 16;
2629                 }
2630         }
2631 }
2632
2633 static int ci_populate_memory_timing_parameters(struct amdgpu_device *adev,
2634                                                 u32 sclk,
2635                                                 u32 mclk,
2636                                                 SMU7_Discrete_MCArbDramTimingTableEntry *arb_regs)
2637 {
2638         u32 dram_timing;
2639         u32 dram_timing2;
2640         u32 burst_time;
2641
2642         amdgpu_atombios_set_engine_dram_timings(adev, sclk, mclk);
2643
2644         dram_timing  = RREG32(mmMC_ARB_DRAM_TIMING);
2645         dram_timing2 = RREG32(mmMC_ARB_DRAM_TIMING2);
2646         burst_time = RREG32(mmMC_ARB_BURST_TIME) & MC_ARB_BURST_TIME__STATE0_MASK;
2647
2648         ci_register_patching_mc_arb(adev, sclk, mclk, &dram_timing2);
2649
2650         arb_regs->McArbDramTiming  = cpu_to_be32(dram_timing);
2651         arb_regs->McArbDramTiming2 = cpu_to_be32(dram_timing2);
2652         arb_regs->McArbBurstTime = (u8)burst_time;
2653
2654         return 0;
2655 }
2656
2657 static int ci_do_program_memory_timing_parameters(struct amdgpu_device *adev)
2658 {
2659         struct ci_power_info *pi = ci_get_pi(adev);
2660         SMU7_Discrete_MCArbDramTimingTable arb_regs;
2661         u32 i, j;
2662         int ret =  0;
2663
2664         memset(&arb_regs, 0, sizeof(SMU7_Discrete_MCArbDramTimingTable));
2665
2666         for (i = 0; i < pi->dpm_table.sclk_table.count; i++) {
2667                 for (j = 0; j < pi->dpm_table.mclk_table.count; j++) {
2668                         ret = ci_populate_memory_timing_parameters(adev,
2669                                                                    pi->dpm_table.sclk_table.dpm_levels[i].value,
2670                                                                    pi->dpm_table.mclk_table.dpm_levels[j].value,
2671                                                                    &arb_regs.entries[i][j]);
2672                         if (ret)
2673                                 break;
2674                 }
2675         }
2676
2677         if (ret == 0)
2678                 ret = amdgpu_ci_copy_bytes_to_smc(adev,
2679                                            pi->arb_table_start,
2680                                            (u8 *)&arb_regs,
2681                                            sizeof(SMU7_Discrete_MCArbDramTimingTable),
2682                                            pi->sram_end);
2683
2684         return ret;
2685 }
2686
2687 static int ci_program_memory_timing_parameters(struct amdgpu_device *adev)
2688 {
2689         struct ci_power_info *pi = ci_get_pi(adev);
2690
2691         if (pi->need_update_smu7_dpm_table == 0)
2692                 return 0;
2693
2694         return ci_do_program_memory_timing_parameters(adev);
2695 }
2696
2697 static void ci_populate_smc_initial_state(struct amdgpu_device *adev,
2698                                           struct amdgpu_ps *amdgpu_boot_state)
2699 {
2700         struct ci_ps *boot_state = ci_get_ps(amdgpu_boot_state);
2701         struct ci_power_info *pi = ci_get_pi(adev);
2702         u32 level = 0;
2703
2704         for (level = 0; level < adev->pm.dpm.dyn_state.vddc_dependency_on_sclk.count; level++) {
2705                 if (adev->pm.dpm.dyn_state.vddc_dependency_on_sclk.entries[level].clk >=
2706                     boot_state->performance_levels[0].sclk) {
2707                         pi->smc_state_table.GraphicsBootLevel = level;
2708                         break;
2709                 }
2710         }
2711
2712         for (level = 0; level < adev->pm.dpm.dyn_state.vddc_dependency_on_mclk.count; level++) {
2713                 if (adev->pm.dpm.dyn_state.vddc_dependency_on_mclk.entries[level].clk >=
2714                     boot_state->performance_levels[0].mclk) {
2715                         pi->smc_state_table.MemoryBootLevel = level;
2716                         break;
2717                 }
2718         }
2719 }
2720
2721 static u32 ci_get_dpm_level_enable_mask_value(struct ci_single_dpm_table *dpm_table)
2722 {
2723         u32 i;
2724         u32 mask_value = 0;
2725
2726         for (i = dpm_table->count; i > 0; i--) {
2727                 mask_value = mask_value << 1;
2728                 if (dpm_table->dpm_levels[i-1].enabled)
2729                         mask_value |= 0x1;
2730                 else
2731                         mask_value &= 0xFFFFFFFE;
2732         }
2733
2734         return mask_value;
2735 }
2736
2737 static void ci_populate_smc_link_level(struct amdgpu_device *adev,
2738                                        SMU7_Discrete_DpmTable *table)
2739 {
2740         struct ci_power_info *pi = ci_get_pi(adev);
2741         struct ci_dpm_table *dpm_table = &pi->dpm_table;
2742         u32 i;
2743
2744         for (i = 0; i < dpm_table->pcie_speed_table.count; i++) {
2745                 table->LinkLevel[i].PcieGenSpeed =
2746                         (u8)dpm_table->pcie_speed_table.dpm_levels[i].value;
2747                 table->LinkLevel[i].PcieLaneCount =
2748                         amdgpu_encode_pci_lane_width(dpm_table->pcie_speed_table.dpm_levels[i].param1);
2749                 table->LinkLevel[i].EnabledForActivity = 1;
2750                 table->LinkLevel[i].DownT = cpu_to_be32(5);
2751                 table->LinkLevel[i].UpT = cpu_to_be32(30);
2752         }
2753
2754         pi->smc_state_table.LinkLevelCount = (u8)dpm_table->pcie_speed_table.count;
2755         pi->dpm_level_enable_mask.pcie_dpm_enable_mask =
2756                 ci_get_dpm_level_enable_mask_value(&dpm_table->pcie_speed_table);
2757 }
2758
2759 static int ci_populate_smc_uvd_level(struct amdgpu_device *adev,
2760                                      SMU7_Discrete_DpmTable *table)
2761 {
2762         u32 count;
2763         struct atom_clock_dividers dividers;
2764         int ret = -EINVAL;
2765
2766         table->UvdLevelCount =
2767                 adev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table.count;
2768
2769         for (count = 0; count < table->UvdLevelCount; count++) {
2770                 table->UvdLevel[count].VclkFrequency =
2771                         adev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table.entries[count].vclk;
2772                 table->UvdLevel[count].DclkFrequency =
2773                         adev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table.entries[count].dclk;
2774                 table->UvdLevel[count].MinVddc =
2775                         adev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table.entries[count].v * VOLTAGE_SCALE;
2776                 table->UvdLevel[count].MinVddcPhases = 1;
2777
2778                 ret = amdgpu_atombios_get_clock_dividers(adev,
2779                                                          COMPUTE_GPUCLK_INPUT_FLAG_DEFAULT_GPUCLK,
2780                                                          table->UvdLevel[count].VclkFrequency, false, &dividers);
2781                 if (ret)
2782                         return ret;
2783
2784                 table->UvdLevel[count].VclkDivider = (u8)dividers.post_divider;
2785
2786                 ret = amdgpu_atombios_get_clock_dividers(adev,
2787                                                          COMPUTE_GPUCLK_INPUT_FLAG_DEFAULT_GPUCLK,
2788                                                          table->UvdLevel[count].DclkFrequency, false, &dividers);
2789                 if (ret)
2790                         return ret;
2791
2792                 table->UvdLevel[count].DclkDivider = (u8)dividers.post_divider;
2793
2794                 table->UvdLevel[count].VclkFrequency = cpu_to_be32(table->UvdLevel[count].VclkFrequency);
2795                 table->UvdLevel[count].DclkFrequency = cpu_to_be32(table->UvdLevel[count].DclkFrequency);
2796                 table->UvdLevel[count].MinVddc = cpu_to_be16(table->UvdLevel[count].MinVddc);
2797         }
2798
2799         return ret;
2800 }
2801
2802 static int ci_populate_smc_vce_level(struct amdgpu_device *adev,
2803                                      SMU7_Discrete_DpmTable *table)
2804 {
2805         u32 count;
2806         struct atom_clock_dividers dividers;
2807         int ret = -EINVAL;
2808
2809         table->VceLevelCount =
2810                 adev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table.count;
2811
2812         for (count = 0; count < table->VceLevelCount; count++) {
2813                 table->VceLevel[count].Frequency =
2814                         adev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table.entries[count].evclk;
2815                 table->VceLevel[count].MinVoltage =
2816                         (u16)adev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table.entries[count].v * VOLTAGE_SCALE;
2817                 table->VceLevel[count].MinPhases = 1;
2818
2819                 ret = amdgpu_atombios_get_clock_dividers(adev,
2820                                                          COMPUTE_GPUCLK_INPUT_FLAG_DEFAULT_GPUCLK,
2821                                                          table->VceLevel[count].Frequency, false, &dividers);
2822                 if (ret)
2823                         return ret;
2824
2825                 table->VceLevel[count].Divider = (u8)dividers.post_divider;
2826
2827                 table->VceLevel[count].Frequency = cpu_to_be32(table->VceLevel[count].Frequency);
2828                 table->VceLevel[count].MinVoltage = cpu_to_be16(table->VceLevel[count].MinVoltage);
2829         }
2830
2831         return ret;
2832
2833 }
2834
2835 static int ci_populate_smc_acp_level(struct amdgpu_device *adev,
2836                                      SMU7_Discrete_DpmTable *table)
2837 {
2838         u32 count;
2839         struct atom_clock_dividers dividers;
2840         int ret = -EINVAL;
2841
2842         table->AcpLevelCount = (u8)
2843                 (adev->pm.dpm.dyn_state.acp_clock_voltage_dependency_table.count);
2844
2845         for (count = 0; count < table->AcpLevelCount; count++) {
2846                 table->AcpLevel[count].Frequency =
2847                         adev->pm.dpm.dyn_state.acp_clock_voltage_dependency_table.entries[count].clk;
2848                 table->AcpLevel[count].MinVoltage =
2849                         adev->pm.dpm.dyn_state.acp_clock_voltage_dependency_table.entries[count].v;
2850                 table->AcpLevel[count].MinPhases = 1;
2851
2852                 ret = amdgpu_atombios_get_clock_dividers(adev,
2853                                                          COMPUTE_GPUCLK_INPUT_FLAG_DEFAULT_GPUCLK,
2854                                                          table->AcpLevel[count].Frequency, false, &dividers);
2855                 if (ret)
2856                         return ret;
2857
2858                 table->AcpLevel[count].Divider = (u8)dividers.post_divider;
2859
2860                 table->AcpLevel[count].Frequency = cpu_to_be32(table->AcpLevel[count].Frequency);
2861                 table->AcpLevel[count].MinVoltage = cpu_to_be16(table->AcpLevel[count].MinVoltage);
2862         }
2863
2864         return ret;
2865 }
2866
2867 static int ci_populate_smc_samu_level(struct amdgpu_device *adev,
2868                                       SMU7_Discrete_DpmTable *table)
2869 {
2870         u32 count;
2871         struct atom_clock_dividers dividers;
2872         int ret = -EINVAL;
2873
2874         table->SamuLevelCount =
2875                 adev->pm.dpm.dyn_state.samu_clock_voltage_dependency_table.count;
2876
2877         for (count = 0; count < table->SamuLevelCount; count++) {
2878                 table->SamuLevel[count].Frequency =
2879                         adev->pm.dpm.dyn_state.samu_clock_voltage_dependency_table.entries[count].clk;
2880                 table->SamuLevel[count].MinVoltage =
2881                         adev->pm.dpm.dyn_state.samu_clock_voltage_dependency_table.entries[count].v * VOLTAGE_SCALE;
2882                 table->SamuLevel[count].MinPhases = 1;
2883
2884                 ret = amdgpu_atombios_get_clock_dividers(adev,
2885                                                          COMPUTE_GPUCLK_INPUT_FLAG_DEFAULT_GPUCLK,
2886                                                          table->SamuLevel[count].Frequency, false, &dividers);
2887                 if (ret)
2888                         return ret;
2889
2890                 table->SamuLevel[count].Divider = (u8)dividers.post_divider;
2891
2892                 table->SamuLevel[count].Frequency = cpu_to_be32(table->SamuLevel[count].Frequency);
2893                 table->SamuLevel[count].MinVoltage = cpu_to_be16(table->SamuLevel[count].MinVoltage);
2894         }
2895
2896         return ret;
2897 }
2898
2899 static int ci_calculate_mclk_params(struct amdgpu_device *adev,
2900                                     u32 memory_clock,
2901                                     SMU7_Discrete_MemoryLevel *mclk,
2902                                     bool strobe_mode,
2903                                     bool dll_state_on)
2904 {
2905         struct ci_power_info *pi = ci_get_pi(adev);
2906         u32  dll_cntl = pi->clock_registers.dll_cntl;
2907         u32  mclk_pwrmgt_cntl = pi->clock_registers.mclk_pwrmgt_cntl;
2908         u32  mpll_ad_func_cntl = pi->clock_registers.mpll_ad_func_cntl;
2909         u32  mpll_dq_func_cntl = pi->clock_registers.mpll_dq_func_cntl;
2910         u32  mpll_func_cntl = pi->clock_registers.mpll_func_cntl;
2911         u32  mpll_func_cntl_1 = pi->clock_registers.mpll_func_cntl_1;
2912         u32  mpll_func_cntl_2 = pi->clock_registers.mpll_func_cntl_2;
2913         u32  mpll_ss1 = pi->clock_registers.mpll_ss1;
2914         u32  mpll_ss2 = pi->clock_registers.mpll_ss2;
2915         struct atom_mpll_param mpll_param;
2916         int ret;
2917
2918         ret = amdgpu_atombios_get_memory_pll_dividers(adev, memory_clock, strobe_mode, &mpll_param);
2919         if (ret)
2920                 return ret;
2921
2922         mpll_func_cntl &= ~MPLL_FUNC_CNTL__BWCTRL_MASK;
2923         mpll_func_cntl |= (mpll_param.bwcntl << MPLL_FUNC_CNTL__BWCTRL__SHIFT);
2924
2925         mpll_func_cntl_1 &= ~(MPLL_FUNC_CNTL_1__CLKF_MASK | MPLL_FUNC_CNTL_1__CLKFRAC_MASK |
2926                         MPLL_FUNC_CNTL_1__VCO_MODE_MASK);
2927         mpll_func_cntl_1 |= (mpll_param.clkf) << MPLL_FUNC_CNTL_1__CLKF__SHIFT |
2928                 (mpll_param.clkfrac << MPLL_FUNC_CNTL_1__CLKFRAC__SHIFT) |
2929                 (mpll_param.vco_mode << MPLL_FUNC_CNTL_1__VCO_MODE__SHIFT);
2930
2931         mpll_ad_func_cntl &= ~MPLL_AD_FUNC_CNTL__YCLK_POST_DIV_MASK;
2932         mpll_ad_func_cntl |= (mpll_param.post_div << MPLL_AD_FUNC_CNTL__YCLK_POST_DIV__SHIFT);
2933
2934         if (adev->mc.vram_type == AMDGPU_VRAM_TYPE_GDDR5) {
2935                 mpll_dq_func_cntl &= ~(MPLL_DQ_FUNC_CNTL__YCLK_SEL_MASK |
2936                                 MPLL_AD_FUNC_CNTL__YCLK_POST_DIV_MASK);
2937                 mpll_dq_func_cntl |= (mpll_param.yclk_sel << MPLL_DQ_FUNC_CNTL__YCLK_SEL__SHIFT) |
2938                                 (mpll_param.post_div << MPLL_AD_FUNC_CNTL__YCLK_POST_DIV__SHIFT);
2939         }
2940
2941         if (pi->caps_mclk_ss_support) {
2942                 struct amdgpu_atom_ss ss;
2943                 u32 freq_nom;
2944                 u32 tmp;
2945                 u32 reference_clock = adev->clock.mpll.reference_freq;
2946
2947                 if (mpll_param.qdr == 1)
2948                         freq_nom = memory_clock * 4 * (1 << mpll_param.post_div);
2949                 else
2950                         freq_nom = memory_clock * 2 * (1 << mpll_param.post_div);
2951
2952                 tmp = (freq_nom / reference_clock);
2953                 tmp = tmp * tmp;
2954                 if (amdgpu_atombios_get_asic_ss_info(adev, &ss,
2955                                                      ASIC_INTERNAL_MEMORY_SS, freq_nom)) {
2956                         u32 clks = reference_clock * 5 / ss.rate;
2957                         u32 clkv = (u32)((((131 * ss.percentage * ss.rate) / 100) * tmp) / freq_nom);
2958
2959                         mpll_ss1 &= ~MPLL_SS1__CLKV_MASK;
2960                         mpll_ss1 |= (clkv << MPLL_SS1__CLKV__SHIFT);
2961
2962                         mpll_ss2 &= ~MPLL_SS2__CLKS_MASK;
2963                         mpll_ss2 |= (clks << MPLL_SS2__CLKS__SHIFT);
2964                 }
2965         }
2966
2967         mclk_pwrmgt_cntl &= ~MCLK_PWRMGT_CNTL__DLL_SPEED_MASK;
2968         mclk_pwrmgt_cntl |= (mpll_param.dll_speed << MCLK_PWRMGT_CNTL__DLL_SPEED__SHIFT);
2969
2970         if (dll_state_on)
2971                 mclk_pwrmgt_cntl |= MCLK_PWRMGT_CNTL__MRDCK0_PDNB_MASK |
2972                         MCLK_PWRMGT_CNTL__MRDCK1_PDNB_MASK;
2973         else
2974                 mclk_pwrmgt_cntl &= ~(MCLK_PWRMGT_CNTL__MRDCK0_PDNB_MASK |
2975                         MCLK_PWRMGT_CNTL__MRDCK1_PDNB_MASK);
2976
2977         mclk->MclkFrequency = memory_clock;
2978         mclk->MpllFuncCntl = mpll_func_cntl;
2979         mclk->MpllFuncCntl_1 = mpll_func_cntl_1;
2980         mclk->MpllFuncCntl_2 = mpll_func_cntl_2;
2981         mclk->MpllAdFuncCntl = mpll_ad_func_cntl;
2982         mclk->MpllDqFuncCntl = mpll_dq_func_cntl;
2983         mclk->MclkPwrmgtCntl = mclk_pwrmgt_cntl;
2984         mclk->DllCntl = dll_cntl;
2985         mclk->MpllSs1 = mpll_ss1;
2986         mclk->MpllSs2 = mpll_ss2;
2987
2988         return 0;
2989 }
2990
2991 static int ci_populate_single_memory_level(struct amdgpu_device *adev,
2992                                            u32 memory_clock,
2993                                            SMU7_Discrete_MemoryLevel *memory_level)
2994 {
2995         struct ci_power_info *pi = ci_get_pi(adev);
2996         int ret;
2997         bool dll_state_on;
2998
2999         if (adev->pm.dpm.dyn_state.vddc_dependency_on_mclk.entries) {
3000                 ret = ci_get_dependency_volt_by_clk(adev,
3001                                                     &adev->pm.dpm.dyn_state.vddc_dependency_on_mclk,
3002                                                     memory_clock, &memory_level->MinVddc);
3003                 if (ret)
3004                         return ret;
3005         }
3006
3007         if (adev->pm.dpm.dyn_state.vddci_dependency_on_mclk.entries) {
3008                 ret = ci_get_dependency_volt_by_clk(adev,
3009                                                     &adev->pm.dpm.dyn_state.vddci_dependency_on_mclk,
3010                                                     memory_clock, &memory_level->MinVddci);
3011                 if (ret)
3012                         return ret;
3013         }
3014
3015         if (adev->pm.dpm.dyn_state.mvdd_dependency_on_mclk.entries) {
3016                 ret = ci_get_dependency_volt_by_clk(adev,
3017                                                     &adev->pm.dpm.dyn_state.mvdd_dependency_on_mclk,
3018                                                     memory_clock, &memory_level->MinMvdd);
3019                 if (ret)
3020                         return ret;
3021         }
3022
3023         memory_level->MinVddcPhases = 1;
3024
3025         if (pi->vddc_phase_shed_control)
3026                 ci_populate_phase_value_based_on_mclk(adev,
3027                                                       &adev->pm.dpm.dyn_state.phase_shedding_limits_table,
3028                                                       memory_clock,
3029                                                       &memory_level->MinVddcPhases);
3030
3031         memory_level->EnabledForThrottle = 1;
3032         memory_level->UpH = 0;
3033         memory_level->DownH = 100;
3034         memory_level->VoltageDownH = 0;
3035         memory_level->ActivityLevel = (u16)pi->mclk_activity_target;
3036
3037         memory_level->StutterEnable = false;
3038         memory_level->StrobeEnable = false;
3039         memory_level->EdcReadEnable = false;
3040         memory_level->EdcWriteEnable = false;
3041         memory_level->RttEnable = false;
3042
3043         memory_level->DisplayWatermark = PPSMC_DISPLAY_WATERMARK_LOW;
3044
3045         if (pi->mclk_stutter_mode_threshold &&
3046             (memory_clock <= pi->mclk_stutter_mode_threshold) &&
3047             (!pi->uvd_enabled) &&
3048             (RREG32(mmDPG_PIPE_STUTTER_CONTROL) & DPG_PIPE_STUTTER_CONTROL__STUTTER_ENABLE_MASK) &&
3049             (adev->pm.dpm.new_active_crtc_count <= 2))
3050                 memory_level->StutterEnable = true;
3051
3052         if (pi->mclk_strobe_mode_threshold &&
3053             (memory_clock <= pi->mclk_strobe_mode_threshold))
3054                 memory_level->StrobeEnable = 1;
3055
3056         if (adev->mc.vram_type == AMDGPU_VRAM_TYPE_GDDR5) {
3057                 memory_level->StrobeRatio =
3058                         ci_get_mclk_frequency_ratio(memory_clock, memory_level->StrobeEnable);
3059                 if (pi->mclk_edc_enable_threshold &&
3060                     (memory_clock > pi->mclk_edc_enable_threshold))
3061                         memory_level->EdcReadEnable = true;
3062
3063                 if (pi->mclk_edc_wr_enable_threshold &&
3064                     (memory_clock > pi->mclk_edc_wr_enable_threshold))
3065                         memory_level->EdcWriteEnable = true;
3066
3067                 if (memory_level->StrobeEnable) {
3068                         if (ci_get_mclk_frequency_ratio(memory_clock, true) >=
3069                             ((RREG32(mmMC_SEQ_MISC7) >> 16) & 0xf))
3070                                 dll_state_on = ((RREG32(mmMC_SEQ_MISC5) >> 1) & 0x1) ? true : false;
3071                         else
3072                                 dll_state_on = ((RREG32(mmMC_SEQ_MISC6) >> 1) & 0x1) ? true : false;
3073                 } else {
3074                         dll_state_on = pi->dll_default_on;
3075                 }
3076         } else {
3077                 memory_level->StrobeRatio = ci_get_ddr3_mclk_frequency_ratio(memory_clock);
3078                 dll_state_on = ((RREG32(mmMC_SEQ_MISC5) >> 1) & 0x1) ? true : false;
3079         }
3080
3081         ret = ci_calculate_mclk_params(adev, memory_clock, memory_level, memory_level->StrobeEnable, dll_state_on);
3082         if (ret)
3083                 return ret;
3084
3085         memory_level->MinVddc = cpu_to_be32(memory_level->MinVddc * VOLTAGE_SCALE);
3086         memory_level->MinVddcPhases = cpu_to_be32(memory_level->MinVddcPhases);
3087         memory_level->MinVddci = cpu_to_be32(memory_level->MinVddci * VOLTAGE_SCALE);
3088         memory_level->MinMvdd = cpu_to_be32(memory_level->MinMvdd * VOLTAGE_SCALE);
3089
3090         memory_level->MclkFrequency = cpu_to_be32(memory_level->MclkFrequency);
3091         memory_level->ActivityLevel = cpu_to_be16(memory_level->ActivityLevel);
3092         memory_level->MpllFuncCntl = cpu_to_be32(memory_level->MpllFuncCntl);
3093         memory_level->MpllFuncCntl_1 = cpu_to_be32(memory_level->MpllFuncCntl_1);
3094         memory_level->MpllFuncCntl_2 = cpu_to_be32(memory_level->MpllFuncCntl_2);
3095         memory_level->MpllAdFuncCntl = cpu_to_be32(memory_level->MpllAdFuncCntl);
3096         memory_level->MpllDqFuncCntl = cpu_to_be32(memory_level->MpllDqFuncCntl);
3097         memory_level->MclkPwrmgtCntl = cpu_to_be32(memory_level->MclkPwrmgtCntl);
3098         memory_level->DllCntl = cpu_to_be32(memory_level->DllCntl);
3099         memory_level->MpllSs1 = cpu_to_be32(memory_level->MpllSs1);
3100         memory_level->MpllSs2 = cpu_to_be32(memory_level->MpllSs2);
3101
3102         return 0;
3103 }
3104
3105 static int ci_populate_smc_acpi_level(struct amdgpu_device *adev,
3106                                       SMU7_Discrete_DpmTable *table)
3107 {
3108         struct ci_power_info *pi = ci_get_pi(adev);
3109         struct atom_clock_dividers dividers;
3110         SMU7_Discrete_VoltageLevel voltage_level;
3111         u32 spll_func_cntl = pi->clock_registers.cg_spll_func_cntl;
3112         u32 spll_func_cntl_2 = pi->clock_registers.cg_spll_func_cntl_2;
3113         u32 dll_cntl = pi->clock_registers.dll_cntl;
3114         u32 mclk_pwrmgt_cntl = pi->clock_registers.mclk_pwrmgt_cntl;
3115         int ret;
3116
3117         table->ACPILevel.Flags &= ~PPSMC_SWSTATE_FLAG_DC;
3118
3119         if (pi->acpi_vddc)
3120                 table->ACPILevel.MinVddc = cpu_to_be32(pi->acpi_vddc * VOLTAGE_SCALE);
3121         else
3122                 table->ACPILevel.MinVddc = cpu_to_be32(pi->min_vddc_in_pp_table * VOLTAGE_SCALE);
3123
3124         table->ACPILevel.MinVddcPhases = pi->vddc_phase_shed_control ? 0 : 1;
3125
3126         table->ACPILevel.SclkFrequency = adev->clock.spll.reference_freq;
3127
3128         ret = amdgpu_atombios_get_clock_dividers(adev,
3129                                                  COMPUTE_GPUCLK_INPUT_FLAG_SCLK,
3130                                                  table->ACPILevel.SclkFrequency, false, &dividers);
3131         if (ret)
3132                 return ret;
3133
3134         table->ACPILevel.SclkDid = (u8)dividers.post_divider;
3135         table->ACPILevel.DisplayWatermark = PPSMC_DISPLAY_WATERMARK_LOW;
3136         table->ACPILevel.DeepSleepDivId = 0;
3137
3138         spll_func_cntl &= ~CG_SPLL_FUNC_CNTL__SPLL_PWRON_MASK;
3139         spll_func_cntl |= CG_SPLL_FUNC_CNTL__SPLL_RESET_MASK;
3140
3141         spll_func_cntl_2 &= ~CG_SPLL_FUNC_CNTL_2__SCLK_MUX_SEL_MASK;
3142         spll_func_cntl_2 |= (4 << CG_SPLL_FUNC_CNTL_2__SCLK_MUX_SEL__SHIFT);
3143
3144         table->ACPILevel.CgSpllFuncCntl = spll_func_cntl;
3145         table->ACPILevel.CgSpllFuncCntl2 = spll_func_cntl_2;
3146         table->ACPILevel.CgSpllFuncCntl3 = pi->clock_registers.cg_spll_func_cntl_3;
3147         table->ACPILevel.CgSpllFuncCntl4 = pi->clock_registers.cg_spll_func_cntl_4;
3148         table->ACPILevel.SpllSpreadSpectrum = pi->clock_registers.cg_spll_spread_spectrum;
3149         table->ACPILevel.SpllSpreadSpectrum2 = pi->clock_registers.cg_spll_spread_spectrum_2;
3150         table->ACPILevel.CcPwrDynRm = 0;
3151         table->ACPILevel.CcPwrDynRm1 = 0;
3152
3153         table->ACPILevel.Flags = cpu_to_be32(table->ACPILevel.Flags);
3154         table->ACPILevel.MinVddcPhases = cpu_to_be32(table->ACPILevel.MinVddcPhases);
3155         table->ACPILevel.SclkFrequency = cpu_to_be32(table->ACPILevel.SclkFrequency);
3156         table->ACPILevel.CgSpllFuncCntl = cpu_to_be32(table->ACPILevel.CgSpllFuncCntl);
3157         table->ACPILevel.CgSpllFuncCntl2 = cpu_to_be32(table->ACPILevel.CgSpllFuncCntl2);
3158         table->ACPILevel.CgSpllFuncCntl3 = cpu_to_be32(table->ACPILevel.CgSpllFuncCntl3);
3159         table->ACPILevel.CgSpllFuncCntl4 = cpu_to_be32(table->ACPILevel.CgSpllFuncCntl4);
3160         table->ACPILevel.SpllSpreadSpectrum = cpu_to_be32(table->ACPILevel.SpllSpreadSpectrum);
3161         table->ACPILevel.SpllSpreadSpectrum2 = cpu_to_be32(table->ACPILevel.SpllSpreadSpectrum2);
3162         table->ACPILevel.CcPwrDynRm = cpu_to_be32(table->ACPILevel.CcPwrDynRm);
3163         table->ACPILevel.CcPwrDynRm1 = cpu_to_be32(table->ACPILevel.CcPwrDynRm1);
3164
3165         table->MemoryACPILevel.MinVddc = table->ACPILevel.MinVddc;
3166         table->MemoryACPILevel.MinVddcPhases = table->ACPILevel.MinVddcPhases;
3167
3168         if (pi->vddci_control != CISLANDS_VOLTAGE_CONTROL_NONE) {
3169                 if (pi->acpi_vddci)
3170                         table->MemoryACPILevel.MinVddci =
3171                                 cpu_to_be32(pi->acpi_vddci * VOLTAGE_SCALE);
3172                 else
3173                         table->MemoryACPILevel.MinVddci =
3174                                 cpu_to_be32(pi->min_vddci_in_pp_table * VOLTAGE_SCALE);
3175         }
3176
3177         if (ci_populate_mvdd_value(adev, 0, &voltage_level))
3178                 table->MemoryACPILevel.MinMvdd = 0;
3179         else
3180                 table->MemoryACPILevel.MinMvdd =
3181                         cpu_to_be32(voltage_level.Voltage * VOLTAGE_SCALE);
3182
3183         mclk_pwrmgt_cntl |= MCLK_PWRMGT_CNTL__MRDCK0_RESET_MASK |
3184                 MCLK_PWRMGT_CNTL__MRDCK1_RESET_MASK;
3185         mclk_pwrmgt_cntl &= ~(MCLK_PWRMGT_CNTL__MRDCK0_PDNB_MASK |
3186                         MCLK_PWRMGT_CNTL__MRDCK1_PDNB_MASK);
3187
3188         dll_cntl &= ~(DLL_CNTL__MRDCK0_BYPASS_MASK | DLL_CNTL__MRDCK1_BYPASS_MASK);
3189
3190         table->MemoryACPILevel.DllCntl = cpu_to_be32(dll_cntl);
3191         table->MemoryACPILevel.MclkPwrmgtCntl = cpu_to_be32(mclk_pwrmgt_cntl);
3192         table->MemoryACPILevel.MpllAdFuncCntl =
3193                 cpu_to_be32(pi->clock_registers.mpll_ad_func_cntl);
3194         table->MemoryACPILevel.MpllDqFuncCntl =
3195                 cpu_to_be32(pi->clock_registers.mpll_dq_func_cntl);
3196         table->MemoryACPILevel.MpllFuncCntl =
3197                 cpu_to_be32(pi->clock_registers.mpll_func_cntl);
3198         table->MemoryACPILevel.MpllFuncCntl_1 =
3199                 cpu_to_be32(pi->clock_registers.mpll_func_cntl_1);
3200         table->MemoryACPILevel.MpllFuncCntl_2 =
3201                 cpu_to_be32(pi->clock_registers.mpll_func_cntl_2);
3202         table->MemoryACPILevel.MpllSs1 = cpu_to_be32(pi->clock_registers.mpll_ss1);
3203         table->MemoryACPILevel.MpllSs2 = cpu_to_be32(pi->clock_registers.mpll_ss2);
3204
3205         table->MemoryACPILevel.EnabledForThrottle = 0;
3206         table->MemoryACPILevel.EnabledForActivity = 0;
3207         table->MemoryACPILevel.UpH = 0;
3208         table->MemoryACPILevel.DownH = 100;
3209         table->MemoryACPILevel.VoltageDownH = 0;
3210         table->MemoryACPILevel.ActivityLevel =
3211                 cpu_to_be16((u16)pi->mclk_activity_target);
3212
3213         table->MemoryACPILevel.StutterEnable = false;
3214         table->MemoryACPILevel.StrobeEnable = false;
3215         table->MemoryACPILevel.EdcReadEnable = false;
3216         table->MemoryACPILevel.EdcWriteEnable = false;
3217         table->MemoryACPILevel.RttEnable = false;
3218
3219         return 0;
3220 }
3221
3222
3223 static int ci_enable_ulv(struct amdgpu_device *adev, bool enable)
3224 {
3225         struct ci_power_info *pi = ci_get_pi(adev);
3226         struct ci_ulv_parm *ulv = &pi->ulv;
3227
3228         if (ulv->supported) {
3229                 if (enable)
3230                         return (amdgpu_ci_send_msg_to_smc(adev, PPSMC_MSG_EnableULV) == PPSMC_Result_OK) ?
3231                                 0 : -EINVAL;
3232                 else
3233                         return (amdgpu_ci_send_msg_to_smc(adev, PPSMC_MSG_DisableULV) == PPSMC_Result_OK) ?
3234                                 0 : -EINVAL;
3235         }
3236
3237         return 0;
3238 }
3239
3240 static int ci_populate_ulv_level(struct amdgpu_device *adev,
3241                                  SMU7_Discrete_Ulv *state)
3242 {
3243         struct ci_power_info *pi = ci_get_pi(adev);
3244         u16 ulv_voltage = adev->pm.dpm.backbias_response_time;
3245
3246         state->CcPwrDynRm = 0;
3247         state->CcPwrDynRm1 = 0;
3248
3249         if (ulv_voltage == 0) {
3250                 pi->ulv.supported = false;
3251                 return 0;
3252         }
3253
3254         if (pi->voltage_control != CISLANDS_VOLTAGE_CONTROL_BY_SVID2) {
3255                 if (ulv_voltage > adev->pm.dpm.dyn_state.vddc_dependency_on_sclk.entries[0].v)
3256                         state->VddcOffset = 0;
3257                 else
3258                         state->VddcOffset =
3259                                 adev->pm.dpm.dyn_state.vddc_dependency_on_sclk.entries[0].v - ulv_voltage;
3260         } else {
3261                 if (ulv_voltage > adev->pm.dpm.dyn_state.vddc_dependency_on_sclk.entries[0].v)
3262                         state->VddcOffsetVid = 0;
3263                 else
3264                         state->VddcOffsetVid = (u8)
3265                                 ((adev->pm.dpm.dyn_state.vddc_dependency_on_sclk.entries[0].v - ulv_voltage) *
3266                                  VOLTAGE_VID_OFFSET_SCALE2 / VOLTAGE_VID_OFFSET_SCALE1);
3267         }
3268         state->VddcPhase = pi->vddc_phase_shed_control ? 0 : 1;
3269
3270         state->CcPwrDynRm = cpu_to_be32(state->CcPwrDynRm);
3271         state->CcPwrDynRm1 = cpu_to_be32(state->CcPwrDynRm1);
3272         state->VddcOffset = cpu_to_be16(state->VddcOffset);
3273
3274         return 0;
3275 }
3276
3277 static int ci_calculate_sclk_params(struct amdgpu_device *adev,
3278                                     u32 engine_clock,
3279                                     SMU7_Discrete_GraphicsLevel *sclk)
3280 {
3281         struct ci_power_info *pi = ci_get_pi(adev);
3282         struct atom_clock_dividers dividers;
3283         u32 spll_func_cntl_3 = pi->clock_registers.cg_spll_func_cntl_3;
3284         u32 spll_func_cntl_4 = pi->clock_registers.cg_spll_func_cntl_4;
3285         u32 cg_spll_spread_spectrum = pi->clock_registers.cg_spll_spread_spectrum;
3286         u32 cg_spll_spread_spectrum_2 = pi->clock_registers.cg_spll_spread_spectrum_2;
3287         u32 reference_clock = adev->clock.spll.reference_freq;
3288         u32 reference_divider;
3289         u32 fbdiv;
3290         int ret;
3291
3292         ret = amdgpu_atombios_get_clock_dividers(adev,
3293                                                  COMPUTE_GPUCLK_INPUT_FLAG_SCLK,
3294                                                  engine_clock, false, &dividers);
3295         if (ret)
3296                 return ret;
3297
3298         reference_divider = 1 + dividers.ref_div;
3299         fbdiv = dividers.fb_div & 0x3FFFFFF;
3300
3301         spll_func_cntl_3 &= ~CG_SPLL_FUNC_CNTL_3__SPLL_FB_DIV_MASK;
3302         spll_func_cntl_3 |= (fbdiv << CG_SPLL_FUNC_CNTL_3__SPLL_FB_DIV__SHIFT);
3303         spll_func_cntl_3 |= CG_SPLL_FUNC_CNTL_3__SPLL_DITHEN_MASK;
3304
3305         if (pi->caps_sclk_ss_support) {
3306                 struct amdgpu_atom_ss ss;
3307                 u32 vco_freq = engine_clock * dividers.post_div;
3308
3309                 if (amdgpu_atombios_get_asic_ss_info(adev, &ss,
3310                                                      ASIC_INTERNAL_ENGINE_SS, vco_freq)) {
3311                         u32 clk_s = reference_clock * 5 / (reference_divider * ss.rate);
3312                         u32 clk_v = 4 * ss.percentage * fbdiv / (clk_s * 10000);
3313
3314                         cg_spll_spread_spectrum &= ~(CG_SPLL_SPREAD_SPECTRUM__CLKS_MASK | CG_SPLL_SPREAD_SPECTRUM__SSEN_MASK);
3315                         cg_spll_spread_spectrum |= (clk_s << CG_SPLL_SPREAD_SPECTRUM__CLKS__SHIFT);
3316                         cg_spll_spread_spectrum |= (1 << CG_SPLL_SPREAD_SPECTRUM__SSEN__SHIFT);
3317
3318                         cg_spll_spread_spectrum_2 &= ~CG_SPLL_SPREAD_SPECTRUM_2__CLKV_MASK;
3319                         cg_spll_spread_spectrum_2 |= (clk_v << CG_SPLL_SPREAD_SPECTRUM_2__CLKV__SHIFT);
3320                 }
3321         }
3322
3323         sclk->SclkFrequency = engine_clock;
3324         sclk->CgSpllFuncCntl3 = spll_func_cntl_3;
3325         sclk->CgSpllFuncCntl4 = spll_func_cntl_4;
3326         sclk->SpllSpreadSpectrum = cg_spll_spread_spectrum;
3327         sclk->SpllSpreadSpectrum2  = cg_spll_spread_spectrum_2;
3328         sclk->SclkDid = (u8)dividers.post_divider;
3329
3330         return 0;
3331 }
3332
3333 static int ci_populate_single_graphic_level(struct amdgpu_device *adev,
3334                                             u32 engine_clock,
3335                                             u16 sclk_activity_level_t,
3336                                             SMU7_Discrete_GraphicsLevel *graphic_level)
3337 {
3338         struct ci_power_info *pi = ci_get_pi(adev);
3339         int ret;
3340
3341         ret = ci_calculate_sclk_params(adev, engine_clock, graphic_level);
3342         if (ret)
3343                 return ret;
3344
3345         ret = ci_get_dependency_volt_by_clk(adev,
3346                                             &adev->pm.dpm.dyn_state.vddc_dependency_on_sclk,
3347                                             engine_clock, &graphic_level->MinVddc);
3348         if (ret)
3349                 return ret;
3350
3351         graphic_level->SclkFrequency = engine_clock;
3352
3353         graphic_level->Flags =  0;
3354         graphic_level->MinVddcPhases = 1;
3355
3356         if (pi->vddc_phase_shed_control)
3357                 ci_populate_phase_value_based_on_sclk(adev,
3358                                                       &adev->pm.dpm.dyn_state.phase_shedding_limits_table,
3359                                                       engine_clock,
3360                                                       &graphic_level->MinVddcPhases);
3361
3362         graphic_level->ActivityLevel = sclk_activity_level_t;
3363
3364         graphic_level->CcPwrDynRm = 0;
3365         graphic_level->CcPwrDynRm1 = 0;
3366         graphic_level->EnabledForThrottle = 1;
3367         graphic_level->UpH = 0;
3368         graphic_level->DownH = 0;
3369         graphic_level->VoltageDownH = 0;
3370         graphic_level->PowerThrottle = 0;
3371
3372         if (pi->caps_sclk_ds)
3373                 graphic_level->DeepSleepDivId = ci_get_sleep_divider_id_from_clock(engine_clock,
3374                                                                                    CISLAND_MINIMUM_ENGINE_CLOCK);
3375
3376         graphic_level->DisplayWatermark = PPSMC_DISPLAY_WATERMARK_LOW;
3377
3378         graphic_level->Flags = cpu_to_be32(graphic_level->Flags);
3379         graphic_level->MinVddc = cpu_to_be32(graphic_level->MinVddc * VOLTAGE_SCALE);
3380         graphic_level->MinVddcPhases = cpu_to_be32(graphic_level->MinVddcPhases);
3381         graphic_level->SclkFrequency = cpu_to_be32(graphic_level->SclkFrequency);
3382         graphic_level->ActivityLevel = cpu_to_be16(graphic_level->ActivityLevel);
3383         graphic_level->CgSpllFuncCntl3 = cpu_to_be32(graphic_level->CgSpllFuncCntl3);
3384         graphic_level->CgSpllFuncCntl4 = cpu_to_be32(graphic_level->CgSpllFuncCntl4);
3385         graphic_level->SpllSpreadSpectrum = cpu_to_be32(graphic_level->SpllSpreadSpectrum);
3386         graphic_level->SpllSpreadSpectrum2 = cpu_to_be32(graphic_level->SpllSpreadSpectrum2);
3387         graphic_level->CcPwrDynRm = cpu_to_be32(graphic_level->CcPwrDynRm);
3388         graphic_level->CcPwrDynRm1 = cpu_to_be32(graphic_level->CcPwrDynRm1);
3389
3390         return 0;
3391 }
3392
3393 static int ci_populate_all_graphic_levels(struct amdgpu_device *adev)
3394 {
3395         struct ci_power_info *pi = ci_get_pi(adev);
3396         struct ci_dpm_table *dpm_table = &pi->dpm_table;
3397         u32 level_array_address = pi->dpm_table_start +
3398                 offsetof(SMU7_Discrete_DpmTable, GraphicsLevel);
3399         u32 level_array_size = sizeof(SMU7_Discrete_GraphicsLevel) *
3400                 SMU7_MAX_LEVELS_GRAPHICS;
3401         SMU7_Discrete_GraphicsLevel *levels = pi->smc_state_table.GraphicsLevel;
3402         u32 i, ret;
3403
3404         memset(levels, 0, level_array_size);
3405
3406         for (i = 0; i < dpm_table->sclk_table.count; i++) {
3407                 ret = ci_populate_single_graphic_level(adev,
3408                                                        dpm_table->sclk_table.dpm_levels[i].value,
3409                                                        (u16)pi->activity_target[i],
3410                                                        &pi->smc_state_table.GraphicsLevel[i]);
3411                 if (ret)
3412                         return ret;
3413                 if (i > 1)
3414                         pi->smc_state_table.GraphicsLevel[i].DeepSleepDivId = 0;
3415                 if (i == (dpm_table->sclk_table.count - 1))
3416                         pi->smc_state_table.GraphicsLevel[i].DisplayWatermark =
3417                                 PPSMC_DISPLAY_WATERMARK_HIGH;
3418         }
3419         pi->smc_state_table.GraphicsLevel[0].EnabledForActivity = 1;
3420
3421         pi->smc_state_table.GraphicsDpmLevelCount = (u8)dpm_table->sclk_table.count;
3422         pi->dpm_level_enable_mask.sclk_dpm_enable_mask =
3423                 ci_get_dpm_level_enable_mask_value(&dpm_table->sclk_table);
3424
3425         ret = amdgpu_ci_copy_bytes_to_smc(adev, level_array_address,
3426                                    (u8 *)levels, level_array_size,
3427                                    pi->sram_end);
3428         if (ret)
3429                 return ret;
3430
3431         return 0;
3432 }
3433
3434 static int ci_populate_ulv_state(struct amdgpu_device *adev,
3435                                  SMU7_Discrete_Ulv *ulv_level)
3436 {
3437         return ci_populate_ulv_level(adev, ulv_level);
3438 }
3439
3440 static int ci_populate_all_memory_levels(struct amdgpu_device *adev)
3441 {
3442         struct ci_power_info *pi = ci_get_pi(adev);
3443         struct ci_dpm_table *dpm_table = &pi->dpm_table;
3444         u32 level_array_address = pi->dpm_table_start +
3445                 offsetof(SMU7_Discrete_DpmTable, MemoryLevel);
3446         u32 level_array_size = sizeof(SMU7_Discrete_MemoryLevel) *
3447                 SMU7_MAX_LEVELS_MEMORY;
3448         SMU7_Discrete_MemoryLevel *levels = pi->smc_state_table.MemoryLevel;
3449         u32 i, ret;
3450
3451         memset(levels, 0, level_array_size);
3452
3453         for (i = 0; i < dpm_table->mclk_table.count; i++) {
3454                 if (dpm_table->mclk_table.dpm_levels[i].value == 0)
3455                         return -EINVAL;
3456                 ret = ci_populate_single_memory_level(adev,
3457                                                       dpm_table->mclk_table.dpm_levels[i].value,
3458                                                       &pi->smc_state_table.MemoryLevel[i]);
3459                 if (ret)
3460                         return ret;
3461         }
3462
3463         pi->smc_state_table.MemoryLevel[0].EnabledForActivity = 1;
3464
3465         if ((dpm_table->mclk_table.count >= 2) &&
3466             ((adev->pdev->device == 0x67B0) || (adev->pdev->device == 0x67B1))) {
3467                 pi->smc_state_table.MemoryLevel[1].MinVddc =
3468                         pi->smc_state_table.MemoryLevel[0].MinVddc;
3469                 pi->smc_state_table.MemoryLevel[1].MinVddcPhases =
3470                         pi->smc_state_table.MemoryLevel[0].MinVddcPhases;
3471         }
3472
3473         pi->smc_state_table.MemoryLevel[0].ActivityLevel = cpu_to_be16(0x1F);
3474
3475         pi->smc_state_table.MemoryDpmLevelCount = (u8)dpm_table->mclk_table.count;
3476         pi->dpm_level_enable_mask.mclk_dpm_enable_mask =
3477                 ci_get_dpm_level_enable_mask_value(&dpm_table->mclk_table);
3478
3479         pi->smc_state_table.MemoryLevel[dpm_table->mclk_table.count - 1].DisplayWatermark =
3480                 PPSMC_DISPLAY_WATERMARK_HIGH;
3481
3482         ret = amdgpu_ci_copy_bytes_to_smc(adev, level_array_address,
3483                                    (u8 *)levels, level_array_size,
3484                                    pi->sram_end);
3485         if (ret)
3486                 return ret;
3487
3488         return 0;
3489 }
3490
3491 static void ci_reset_single_dpm_table(struct amdgpu_device *adev,
3492                                       struct ci_single_dpm_table* dpm_table,
3493                                       u32 count)
3494 {
3495         u32 i;
3496
3497         dpm_table->count = count;
3498         for (i = 0; i < MAX_REGULAR_DPM_NUMBER; i++)
3499                 dpm_table->dpm_levels[i].enabled = false;
3500 }
3501
3502 static void ci_setup_pcie_table_entry(struct ci_single_dpm_table* dpm_table,
3503                                       u32 index, u32 pcie_gen, u32 pcie_lanes)
3504 {
3505         dpm_table->dpm_levels[index].value = pcie_gen;
3506         dpm_table->dpm_levels[index].param1 = pcie_lanes;
3507         dpm_table->dpm_levels[index].enabled = true;
3508 }
3509
3510 static int ci_setup_default_pcie_tables(struct amdgpu_device *adev)
3511 {
3512         struct ci_power_info *pi = ci_get_pi(adev);
3513
3514         if (!pi->use_pcie_performance_levels && !pi->use_pcie_powersaving_levels)
3515                 return -EINVAL;
3516
3517         if (pi->use_pcie_performance_levels && !pi->use_pcie_powersaving_levels) {
3518                 pi->pcie_gen_powersaving = pi->pcie_gen_performance;
3519                 pi->pcie_lane_powersaving = pi->pcie_lane_performance;
3520         } else if (!pi->use_pcie_performance_levels && pi->use_pcie_powersaving_levels) {
3521                 pi->pcie_gen_performance = pi->pcie_gen_powersaving;
3522                 pi->pcie_lane_performance = pi->pcie_lane_powersaving;
3523         }
3524
3525         ci_reset_single_dpm_table(adev,
3526                                   &pi->dpm_table.pcie_speed_table,
3527                                   SMU7_MAX_LEVELS_LINK);
3528
3529         if (adev->asic_type == CHIP_BONAIRE)
3530                 ci_setup_pcie_table_entry(&pi->dpm_table.pcie_speed_table, 0,
3531                                           pi->pcie_gen_powersaving.min,
3532                                           pi->pcie_lane_powersaving.max);
3533         else
3534                 ci_setup_pcie_table_entry(&pi->dpm_table.pcie_speed_table, 0,
3535                                           pi->pcie_gen_powersaving.min,
3536                                           pi->pcie_lane_powersaving.min);
3537         ci_setup_pcie_table_entry(&pi->dpm_table.pcie_speed_table, 1,
3538                                   pi->pcie_gen_performance.min,
3539                                   pi->pcie_lane_performance.min);
3540         ci_setup_pcie_table_entry(&pi->dpm_table.pcie_speed_table, 2,
3541                                   pi->pcie_gen_powersaving.min,
3542                                   pi->pcie_lane_powersaving.max);
3543         ci_setup_pcie_table_entry(&pi->dpm_table.pcie_speed_table, 3,
3544                                   pi->pcie_gen_performance.min,
3545                                   pi->pcie_lane_performance.max);
3546         ci_setup_pcie_table_entry(&pi->dpm_table.pcie_speed_table, 4,
3547                                   pi->pcie_gen_powersaving.max,
3548                                   pi->pcie_lane_powersaving.max);
3549         ci_setup_pcie_table_entry(&pi->dpm_table.pcie_speed_table, 5,
3550                                   pi->pcie_gen_performance.max,
3551                                   pi->pcie_lane_performance.max);
3552
3553         pi->dpm_table.pcie_speed_table.count = 6;
3554
3555         return 0;
3556 }
3557
3558 static int ci_setup_default_dpm_tables(struct amdgpu_device *adev)
3559 {
3560         struct ci_power_info *pi = ci_get_pi(adev);
3561         struct amdgpu_clock_voltage_dependency_table *allowed_sclk_vddc_table =
3562                 &adev->pm.dpm.dyn_state.vddc_dependency_on_sclk;
3563         struct amdgpu_clock_voltage_dependency_table *allowed_mclk_table =
3564                 &adev->pm.dpm.dyn_state.vddc_dependency_on_mclk;
3565         struct amdgpu_cac_leakage_table *std_voltage_table =
3566                 &adev->pm.dpm.dyn_state.cac_leakage_table;
3567         u32 i;
3568
3569         if (allowed_sclk_vddc_table == NULL)
3570                 return -EINVAL;
3571         if (allowed_sclk_vddc_table->count < 1)
3572                 return -EINVAL;
3573         if (allowed_mclk_table == NULL)
3574                 return -EINVAL;
3575         if (allowed_mclk_table->count < 1)
3576                 return -EINVAL;
3577
3578         memset(&pi->dpm_table, 0, sizeof(struct ci_dpm_table));
3579
3580         ci_reset_single_dpm_table(adev,
3581                                   &pi->dpm_table.sclk_table,
3582                                   SMU7_MAX_LEVELS_GRAPHICS);
3583         ci_reset_single_dpm_table(adev,
3584                                   &pi->dpm_table.mclk_table,
3585                                   SMU7_MAX_LEVELS_MEMORY);
3586         ci_reset_single_dpm_table(adev,
3587                                   &pi->dpm_table.vddc_table,
3588                                   SMU7_MAX_LEVELS_VDDC);
3589         ci_reset_single_dpm_table(adev,
3590                                   &pi->dpm_table.vddci_table,
3591                                   SMU7_MAX_LEVELS_VDDCI);
3592         ci_reset_single_dpm_table(adev,
3593                                   &pi->dpm_table.mvdd_table,
3594                                   SMU7_MAX_LEVELS_MVDD);
3595
3596         pi->dpm_table.sclk_table.count = 0;
3597         for (i = 0; i < allowed_sclk_vddc_table->count; i++) {
3598                 if ((i == 0) ||
3599                     (pi->dpm_table.sclk_table.dpm_levels[pi->dpm_table.sclk_table.count-1].value !=
3600                      allowed_sclk_vddc_table->entries[i].clk)) {
3601                         pi->dpm_table.sclk_table.dpm_levels[pi->dpm_table.sclk_table.count].value =
3602                                 allowed_sclk_vddc_table->entries[i].clk;
3603                         pi->dpm_table.sclk_table.dpm_levels[pi->dpm_table.sclk_table.count].enabled =
3604                                 (i == 0) ? true : false;
3605                         pi->dpm_table.sclk_table.count++;
3606                 }
3607         }
3608
3609         pi->dpm_table.mclk_table.count = 0;
3610         for (i = 0; i < allowed_mclk_table->count; i++) {
3611                 if ((i == 0) ||
3612                     (pi->dpm_table.mclk_table.dpm_levels[pi->dpm_table.mclk_table.count-1].value !=
3613                      allowed_mclk_table->entries[i].clk)) {
3614                         pi->dpm_table.mclk_table.dpm_levels[pi->dpm_table.mclk_table.count].value =
3615                                 allowed_mclk_table->entries[i].clk;
3616                         pi->dpm_table.mclk_table.dpm_levels[pi->dpm_table.mclk_table.count].enabled =
3617                                 (i == 0) ? true : false;
3618                         pi->dpm_table.mclk_table.count++;
3619                 }
3620         }
3621
3622         for (i = 0; i < allowed_sclk_vddc_table->count; i++) {
3623                 pi->dpm_table.vddc_table.dpm_levels[i].value =
3624                         allowed_sclk_vddc_table->entries[i].v;
3625                 pi->dpm_table.vddc_table.dpm_levels[i].param1 =
3626                         std_voltage_table->entries[i].leakage;
3627                 pi->dpm_table.vddc_table.dpm_levels[i].enabled = true;
3628         }
3629         pi->dpm_table.vddc_table.count = allowed_sclk_vddc_table->count;
3630
3631         allowed_mclk_table = &adev->pm.dpm.dyn_state.vddci_dependency_on_mclk;
3632         if (allowed_mclk_table) {
3633                 for (i = 0; i < allowed_mclk_table->count; i++) {
3634                         pi->dpm_table.vddci_table.dpm_levels[i].value =
3635                                 allowed_mclk_table->entries[i].v;
3636                         pi->dpm_table.vddci_table.dpm_levels[i].enabled = true;
3637                 }
3638                 pi->dpm_table.vddci_table.count = allowed_mclk_table->count;
3639         }
3640
3641         allowed_mclk_table = &adev->pm.dpm.dyn_state.mvdd_dependency_on_mclk;
3642         if (allowed_mclk_table) {
3643                 for (i = 0; i < allowed_mclk_table->count; i++) {
3644                         pi->dpm_table.mvdd_table.dpm_levels[i].value =
3645                                 allowed_mclk_table->entries[i].v;
3646                         pi->dpm_table.mvdd_table.dpm_levels[i].enabled = true;
3647                 }
3648                 pi->dpm_table.mvdd_table.count = allowed_mclk_table->count;
3649         }
3650
3651         ci_setup_default_pcie_tables(adev);
3652
3653         /* save a copy of the default DPM table */
3654         memcpy(&(pi->golden_dpm_table), &(pi->dpm_table),
3655                         sizeof(struct ci_dpm_table));
3656
3657         return 0;
3658 }
3659
3660 static int ci_find_boot_level(struct ci_single_dpm_table *table,
3661                               u32 value, u32 *boot_level)
3662 {
3663         u32 i;
3664         int ret = -EINVAL;
3665
3666         for(i = 0; i < table->count; i++) {
3667                 if (value == table->dpm_levels[i].value) {
3668                         *boot_level = i;
3669                         ret = 0;
3670                 }
3671         }
3672
3673         return ret;
3674 }
3675
3676 static int ci_init_smc_table(struct amdgpu_device *adev)
3677 {
3678         struct ci_power_info *pi = ci_get_pi(adev);
3679         struct ci_ulv_parm *ulv = &pi->ulv;
3680         struct amdgpu_ps *amdgpu_boot_state = adev->pm.dpm.boot_ps;
3681         SMU7_Discrete_DpmTable *table = &pi->smc_state_table;
3682         int ret;
3683
3684         ret = ci_setup_default_dpm_tables(adev);
3685         if (ret)
3686                 return ret;
3687
3688         if (pi->voltage_control != CISLANDS_VOLTAGE_CONTROL_NONE)
3689                 ci_populate_smc_voltage_tables(adev, table);
3690
3691         ci_init_fps_limits(adev);
3692
3693         if (adev->pm.dpm.platform_caps & ATOM_PP_PLATFORM_CAP_HARDWAREDC)
3694                 table->SystemFlags |= PPSMC_SYSTEMFLAG_GPIO_DC;
3695
3696         if (adev->pm.dpm.platform_caps & ATOM_PP_PLATFORM_CAP_STEPVDDC)
3697                 table->SystemFlags |= PPSMC_SYSTEMFLAG_STEPVDDC;
3698
3699         if (adev->mc.vram_type == AMDGPU_VRAM_TYPE_GDDR5)
3700                 table->SystemFlags |= PPSMC_SYSTEMFLAG_GDDR5;
3701
3702         if (ulv->supported) {
3703                 ret = ci_populate_ulv_state(adev, &pi->smc_state_table.Ulv);
3704                 if (ret)
3705                         return ret;
3706                 WREG32_SMC(ixCG_ULV_PARAMETER, ulv->cg_ulv_parameter);
3707         }
3708
3709         ret = ci_populate_all_graphic_levels(adev);
3710         if (ret)
3711                 return ret;
3712
3713         ret = ci_populate_all_memory_levels(adev);
3714         if (ret)
3715                 return ret;
3716
3717         ci_populate_smc_link_level(adev, table);
3718
3719         ret = ci_populate_smc_acpi_level(adev, table);
3720         if (ret)
3721                 return ret;
3722
3723         ret = ci_populate_smc_vce_level(adev, table);
3724         if (ret)
3725                 return ret;
3726
3727         ret = ci_populate_smc_acp_level(adev, table);
3728         if (ret)
3729                 return ret;
3730
3731         ret = ci_populate_smc_samu_level(adev, table);
3732         if (ret)
3733                 return ret;
3734
3735         ret = ci_do_program_memory_timing_parameters(adev);
3736         if (ret)
3737                 return ret;
3738
3739         ret = ci_populate_smc_uvd_level(adev, table);
3740         if (ret)
3741                 return ret;
3742
3743         table->UvdBootLevel  = 0;
3744         table->VceBootLevel  = 0;
3745         table->AcpBootLevel  = 0;
3746         table->SamuBootLevel  = 0;
3747         table->GraphicsBootLevel  = 0;
3748         table->MemoryBootLevel  = 0;
3749
3750         ret = ci_find_boot_level(&pi->dpm_table.sclk_table,
3751                                  pi->vbios_boot_state.sclk_bootup_value,
3752                                  (u32 *)&pi->smc_state_table.GraphicsBootLevel);
3753
3754         ret = ci_find_boot_level(&pi->dpm_table.mclk_table,
3755                                  pi->vbios_boot_state.mclk_bootup_value,
3756                                  (u32 *)&pi->smc_state_table.MemoryBootLevel);
3757
3758         table->BootVddc = pi->vbios_boot_state.vddc_bootup_value;
3759         table->BootVddci = pi->vbios_boot_state.vddci_bootup_value;
3760         table->BootMVdd = pi->vbios_boot_state.mvdd_bootup_value;
3761
3762         ci_populate_smc_initial_state(adev, amdgpu_boot_state);
3763
3764         ret = ci_populate_bapm_parameters_in_dpm_table(adev);
3765         if (ret)
3766                 return ret;
3767
3768         table->UVDInterval = 1;
3769         table->VCEInterval = 1;
3770         table->ACPInterval = 1;
3771         table->SAMUInterval = 1;
3772         table->GraphicsVoltageChangeEnable = 1;
3773         table->GraphicsThermThrottleEnable = 1;
3774         table->GraphicsInterval = 1;
3775         table->VoltageInterval = 1;
3776         table->ThermalInterval = 1;
3777         table->TemperatureLimitHigh = (u16)((pi->thermal_temp_setting.temperature_high *
3778                                              CISLANDS_Q88_FORMAT_CONVERSION_UNIT) / 1000);
3779         table->TemperatureLimitLow = (u16)((pi->thermal_temp_setting.temperature_low *
3780                                             CISLANDS_Q88_FORMAT_CONVERSION_UNIT) / 1000);
3781         table->MemoryVoltageChangeEnable = 1;
3782         table->MemoryInterval = 1;
3783         table->VoltageResponseTime = 0;
3784         table->VddcVddciDelta = 4000;
3785         table->PhaseResponseTime = 0;
3786         table->MemoryThermThrottleEnable = 1;
3787         table->PCIeBootLinkLevel = pi->dpm_table.pcie_speed_table.count - 1;
3788         table->PCIeGenInterval = 1;
3789         if (pi->voltage_control == CISLANDS_VOLTAGE_CONTROL_BY_SVID2)
3790                 table->SVI2Enable  = 1;
3791         else
3792                 table->SVI2Enable  = 0;
3793
3794         table->ThermGpio = 17;
3795         table->SclkStepSize = 0x4000;
3796
3797         table->SystemFlags = cpu_to_be32(table->SystemFlags);
3798         table->SmioMaskVddcVid = cpu_to_be32(table->SmioMaskVddcVid);
3799         table->SmioMaskVddcPhase = cpu_to_be32(table->SmioMaskVddcPhase);
3800         table->SmioMaskVddciVid = cpu_to_be32(table->SmioMaskVddciVid);
3801         table->SmioMaskMvddVid = cpu_to_be32(table->SmioMaskMvddVid);
3802         table->SclkStepSize = cpu_to_be32(table->SclkStepSize);
3803         table->TemperatureLimitHigh = cpu_to_be16(table->TemperatureLimitHigh);
3804         table->TemperatureLimitLow = cpu_to_be16(table->TemperatureLimitLow);
3805         table->VddcVddciDelta = cpu_to_be16(table->VddcVddciDelta);
3806         table->VoltageResponseTime = cpu_to_be16(table->VoltageResponseTime);
3807         table->PhaseResponseTime = cpu_to_be16(table->PhaseResponseTime);
3808         table->BootVddc = cpu_to_be16(table->BootVddc * VOLTAGE_SCALE);
3809         table->BootVddci = cpu_to_be16(table->BootVddci * VOLTAGE_SCALE);
3810         table->BootMVdd = cpu_to_be16(table->BootMVdd * VOLTAGE_SCALE);
3811
3812         ret = amdgpu_ci_copy_bytes_to_smc(adev,
3813                                    pi->dpm_table_start +
3814                                    offsetof(SMU7_Discrete_DpmTable, SystemFlags),
3815                                    (u8 *)&table->SystemFlags,
3816                                    sizeof(SMU7_Discrete_DpmTable) - 3 * sizeof(SMU7_PIDController),
3817                                    pi->sram_end);
3818         if (ret)
3819                 return ret;
3820
3821         return 0;
3822 }
3823
3824 static void ci_trim_single_dpm_states(struct amdgpu_device *adev,
3825                                       struct ci_single_dpm_table *dpm_table,
3826                                       u32 low_limit, u32 high_limit)
3827 {
3828         u32 i;
3829
3830         for (i = 0; i < dpm_table->count; i++) {
3831                 if ((dpm_table->dpm_levels[i].value < low_limit) ||
3832                     (dpm_table->dpm_levels[i].value > high_limit))
3833                         dpm_table->dpm_levels[i].enabled = false;
3834                 else
3835                         dpm_table->dpm_levels[i].enabled = true;
3836         }
3837 }
3838
3839 static void ci_trim_pcie_dpm_states(struct amdgpu_device *adev,
3840                                     u32 speed_low, u32 lanes_low,
3841                                     u32 speed_high, u32 lanes_high)
3842 {
3843         struct ci_power_info *pi = ci_get_pi(adev);
3844         struct ci_single_dpm_table *pcie_table = &pi->dpm_table.pcie_speed_table;
3845         u32 i, j;
3846
3847         for (i = 0; i < pcie_table->count; i++) {
3848                 if ((pcie_table->dpm_levels[i].value < speed_low) ||
3849                     (pcie_table->dpm_levels[i].param1 < lanes_low) ||
3850                     (pcie_table->dpm_levels[i].value > speed_high) ||
3851                     (pcie_table->dpm_levels[i].param1 > lanes_high))
3852                         pcie_table->dpm_levels[i].enabled = false;
3853                 else
3854                         pcie_table->dpm_levels[i].enabled = true;
3855         }
3856
3857         for (i = 0; i < pcie_table->count; i++) {
3858                 if (pcie_table->dpm_levels[i].enabled) {
3859                         for (j = i + 1; j < pcie_table->count; j++) {
3860                                 if (pcie_table->dpm_levels[j].enabled) {
3861                                         if ((pcie_table->dpm_levels[i].value == pcie_table->dpm_levels[j].value) &&
3862                                             (pcie_table->dpm_levels[i].param1 == pcie_table->dpm_levels[j].param1))
3863                                                 pcie_table->dpm_levels[j].enabled = false;
3864                                 }
3865                         }
3866                 }
3867         }
3868 }
3869
3870 static int ci_trim_dpm_states(struct amdgpu_device *adev,
3871                               struct amdgpu_ps *amdgpu_state)
3872 {
3873         struct ci_ps *state = ci_get_ps(amdgpu_state);
3874         struct ci_power_info *pi = ci_get_pi(adev);
3875         u32 high_limit_count;
3876
3877         if (state->performance_level_count < 1)
3878                 return -EINVAL;
3879
3880         if (state->performance_level_count == 1)
3881                 high_limit_count = 0;
3882         else
3883                 high_limit_count = 1;
3884
3885         ci_trim_single_dpm_states(adev,
3886                                   &pi->dpm_table.sclk_table,
3887                                   state->performance_levels[0].sclk,
3888                                   state->performance_levels[high_limit_count].sclk);
3889
3890         ci_trim_single_dpm_states(adev,
3891                                   &pi->dpm_table.mclk_table,
3892                                   state->performance_levels[0].mclk,
3893                                   state->performance_levels[high_limit_count].mclk);
3894
3895         ci_trim_pcie_dpm_states(adev,
3896                                 state->performance_levels[0].pcie_gen,
3897                                 state->performance_levels[0].pcie_lane,
3898                                 state->performance_levels[high_limit_count].pcie_gen,
3899                                 state->performance_levels[high_limit_count].pcie_lane);
3900
3901         return 0;
3902 }
3903
3904 static int ci_apply_disp_minimum_voltage_request(struct amdgpu_device *adev)
3905 {
3906         struct amdgpu_clock_voltage_dependency_table *disp_voltage_table =
3907                 &adev->pm.dpm.dyn_state.vddc_dependency_on_dispclk;
3908         struct amdgpu_clock_voltage_dependency_table *vddc_table =
3909                 &adev->pm.dpm.dyn_state.vddc_dependency_on_sclk;
3910         u32 requested_voltage = 0;
3911         u32 i;
3912
3913         if (disp_voltage_table == NULL)
3914                 return -EINVAL;
3915         if (!disp_voltage_table->count)
3916                 return -EINVAL;
3917
3918         for (i = 0; i < disp_voltage_table->count; i++) {
3919                 if (adev->clock.current_dispclk == disp_voltage_table->entries[i].clk)
3920                         requested_voltage = disp_voltage_table->entries[i].v;
3921         }
3922
3923         for (i = 0; i < vddc_table->count; i++) {
3924                 if (requested_voltage <= vddc_table->entries[i].v) {
3925                         requested_voltage = vddc_table->entries[i].v;
3926                         return (amdgpu_ci_send_msg_to_smc_with_parameter(adev,
3927                                                                   PPSMC_MSG_VddC_Request,
3928                                                                   requested_voltage * VOLTAGE_SCALE) == PPSMC_Result_OK) ?
3929                                 0 : -EINVAL;
3930                 }
3931         }
3932
3933         return -EINVAL;
3934 }
3935
3936 static int ci_upload_dpm_level_enable_mask(struct amdgpu_device *adev)
3937 {
3938         struct ci_power_info *pi = ci_get_pi(adev);
3939         PPSMC_Result result;
3940
3941         ci_apply_disp_minimum_voltage_request(adev);
3942
3943         if (!pi->sclk_dpm_key_disabled) {
3944                 if (pi->dpm_level_enable_mask.sclk_dpm_enable_mask) {
3945                         result = amdgpu_ci_send_msg_to_smc_with_parameter(adev,
3946                                                                    PPSMC_MSG_SCLKDPM_SetEnabledMask,
3947                                                                    pi->dpm_level_enable_mask.sclk_dpm_enable_mask);
3948                         if (result != PPSMC_Result_OK)
3949                                 return -EINVAL;
3950                 }
3951         }
3952
3953         if (!pi->mclk_dpm_key_disabled) {
3954                 if (pi->dpm_level_enable_mask.mclk_dpm_enable_mask) {
3955                         result = amdgpu_ci_send_msg_to_smc_with_parameter(adev,
3956                                                                    PPSMC_MSG_MCLKDPM_SetEnabledMask,
3957                                                                    pi->dpm_level_enable_mask.mclk_dpm_enable_mask);
3958                         if (result != PPSMC_Result_OK)
3959                                 return -EINVAL;
3960                 }
3961         }
3962
3963 #if 0
3964         if (!pi->pcie_dpm_key_disabled) {
3965                 if (pi->dpm_level_enable_mask.pcie_dpm_enable_mask) {
3966                         result = amdgpu_ci_send_msg_to_smc_with_parameter(adev,
3967                                                                    PPSMC_MSG_PCIeDPM_SetEnabledMask,
3968                                                                    pi->dpm_level_enable_mask.pcie_dpm_enable_mask);
3969                         if (result != PPSMC_Result_OK)
3970                                 return -EINVAL;
3971                 }
3972         }
3973 #endif
3974
3975         return 0;
3976 }
3977
3978 static void ci_find_dpm_states_clocks_in_dpm_table(struct amdgpu_device *adev,
3979                                                    struct amdgpu_ps *amdgpu_state)
3980 {
3981         struct ci_power_info *pi = ci_get_pi(adev);
3982         struct ci_ps *state = ci_get_ps(amdgpu_state);
3983         struct ci_single_dpm_table *sclk_table = &pi->dpm_table.sclk_table;
3984         u32 sclk = state->performance_levels[state->performance_level_count-1].sclk;
3985         struct ci_single_dpm_table *mclk_table = &pi->dpm_table.mclk_table;
3986         u32 mclk = state->performance_levels[state->performance_level_count-1].mclk;
3987         u32 i;
3988
3989         pi->need_update_smu7_dpm_table = 0;
3990
3991         for (i = 0; i < sclk_table->count; i++) {
3992                 if (sclk == sclk_table->dpm_levels[i].value)
3993                         break;
3994         }
3995
3996         if (i >= sclk_table->count) {
3997                 pi->need_update_smu7_dpm_table |= DPMTABLE_OD_UPDATE_SCLK;
3998         } else {
3999                 /* XXX check display min clock requirements */
4000                 if (CISLAND_MINIMUM_ENGINE_CLOCK != CISLAND_MINIMUM_ENGINE_CLOCK)
4001                         pi->need_update_smu7_dpm_table |= DPMTABLE_UPDATE_SCLK;
4002         }
4003
4004         for (i = 0; i < mclk_table->count; i++) {
4005                 if (mclk == mclk_table->dpm_levels[i].value)
4006                         break;
4007         }
4008
4009         if (i >= mclk_table->count)
4010                 pi->need_update_smu7_dpm_table |= DPMTABLE_OD_UPDATE_MCLK;
4011
4012         if (adev->pm.dpm.current_active_crtc_count !=
4013             adev->pm.dpm.new_active_crtc_count)
4014                 pi->need_update_smu7_dpm_table |= DPMTABLE_UPDATE_MCLK;
4015 }
4016
4017 static int ci_populate_and_upload_sclk_mclk_dpm_levels(struct amdgpu_device *adev,
4018                                                        struct amdgpu_ps *amdgpu_state)
4019 {
4020         struct ci_power_info *pi = ci_get_pi(adev);
4021         struct ci_ps *state = ci_get_ps(amdgpu_state);
4022         u32 sclk = state->performance_levels[state->performance_level_count-1].sclk;
4023         u32 mclk = state->performance_levels[state->performance_level_count-1].mclk;
4024         struct ci_dpm_table *dpm_table = &pi->dpm_table;
4025         int ret;
4026
4027         if (!pi->need_update_smu7_dpm_table)
4028                 return 0;
4029
4030         if (pi->need_update_smu7_dpm_table & DPMTABLE_OD_UPDATE_SCLK)
4031                 dpm_table->sclk_table.dpm_levels[dpm_table->sclk_table.count-1].value = sclk;
4032
4033         if (pi->need_update_smu7_dpm_table & DPMTABLE_OD_UPDATE_MCLK)
4034                 dpm_table->mclk_table.dpm_levels[dpm_table->mclk_table.count-1].value = mclk;
4035
4036         if (pi->need_update_smu7_dpm_table & (DPMTABLE_OD_UPDATE_SCLK | DPMTABLE_UPDATE_SCLK)) {
4037                 ret = ci_populate_all_graphic_levels(adev);
4038                 if (ret)
4039                         return ret;
4040         }
4041
4042         if (pi->need_update_smu7_dpm_table & (DPMTABLE_OD_UPDATE_MCLK | DPMTABLE_UPDATE_MCLK)) {
4043                 ret = ci_populate_all_memory_levels(adev);
4044                 if (ret)
4045                         return ret;
4046         }
4047
4048         return 0;
4049 }
4050
4051 static int ci_enable_uvd_dpm(struct amdgpu_device *adev, bool enable)
4052 {
4053         struct ci_power_info *pi = ci_get_pi(adev);
4054         const struct amdgpu_clock_and_voltage_limits *max_limits;
4055         int i;
4056
4057         if (adev->pm.dpm.ac_power)
4058                 max_limits = &adev->pm.dpm.dyn_state.max_clock_voltage_on_ac;
4059         else
4060                 max_limits = &adev->pm.dpm.dyn_state.max_clock_voltage_on_dc;
4061
4062         if (enable) {
4063                 pi->dpm_level_enable_mask.uvd_dpm_enable_mask = 0;
4064
4065                 for (i = adev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table.count - 1; i >= 0; i--) {
4066                         if (adev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table.entries[i].v <= max_limits->vddc) {
4067                                 pi->dpm_level_enable_mask.uvd_dpm_enable_mask |= 1 << i;
4068
4069                                 if (!pi->caps_uvd_dpm)
4070                                         break;
4071                         }
4072                 }
4073
4074                 amdgpu_ci_send_msg_to_smc_with_parameter(adev,
4075                                                   PPSMC_MSG_UVDDPM_SetEnabledMask,
4076                                                   pi->dpm_level_enable_mask.uvd_dpm_enable_mask);
4077
4078                 if (pi->last_mclk_dpm_enable_mask & 0x1) {
4079                         pi->uvd_enabled = true;
4080                         pi->dpm_level_enable_mask.mclk_dpm_enable_mask &= 0xFFFFFFFE;
4081                         amdgpu_ci_send_msg_to_smc_with_parameter(adev,
4082                                                           PPSMC_MSG_MCLKDPM_SetEnabledMask,
4083                                                           pi->dpm_level_enable_mask.mclk_dpm_enable_mask);
4084                 }
4085         } else {
4086                 if (pi->uvd_enabled) {
4087                         pi->uvd_enabled = false;
4088                         pi->dpm_level_enable_mask.mclk_dpm_enable_mask |= 1;
4089                         amdgpu_ci_send_msg_to_smc_with_parameter(adev,
4090                                                           PPSMC_MSG_MCLKDPM_SetEnabledMask,
4091                                                           pi->dpm_level_enable_mask.mclk_dpm_enable_mask);
4092                 }
4093         }
4094
4095         return (amdgpu_ci_send_msg_to_smc(adev, enable ?
4096                                    PPSMC_MSG_UVDDPM_Enable : PPSMC_MSG_UVDDPM_Disable) == PPSMC_Result_OK) ?
4097                 0 : -EINVAL;
4098 }
4099
4100 static int ci_enable_vce_dpm(struct amdgpu_device *adev, bool enable)
4101 {
4102         struct ci_power_info *pi = ci_get_pi(adev);
4103         const struct amdgpu_clock_and_voltage_limits *max_limits;
4104         int i;
4105
4106         if (adev->pm.dpm.ac_power)
4107                 max_limits = &adev->pm.dpm.dyn_state.max_clock_voltage_on_ac;
4108         else
4109                 max_limits = &adev->pm.dpm.dyn_state.max_clock_voltage_on_dc;
4110
4111         if (enable) {
4112                 pi->dpm_level_enable_mask.vce_dpm_enable_mask = 0;
4113                 for (i = adev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table.count - 1; i >= 0; i--) {
4114                         if (adev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table.entries[i].v <= max_limits->vddc) {
4115                                 pi->dpm_level_enable_mask.vce_dpm_enable_mask |= 1 << i;
4116
4117                                 if (!pi->caps_vce_dpm)
4118                                         break;
4119                         }
4120                 }
4121
4122                 amdgpu_ci_send_msg_to_smc_with_parameter(adev,
4123                                                   PPSMC_MSG_VCEDPM_SetEnabledMask,
4124                                                   pi->dpm_level_enable_mask.vce_dpm_enable_mask);
4125         }
4126
4127         return (amdgpu_ci_send_msg_to_smc(adev, enable ?
4128                                    PPSMC_MSG_VCEDPM_Enable : PPSMC_MSG_VCEDPM_Disable) == PPSMC_Result_OK) ?
4129                 0 : -EINVAL;
4130 }
4131
4132 #if 0
4133 static int ci_enable_samu_dpm(struct amdgpu_device *adev, bool enable)
4134 {
4135         struct ci_power_info *pi = ci_get_pi(adev);
4136         const struct amdgpu_clock_and_voltage_limits *max_limits;
4137         int i;
4138
4139         if (adev->pm.dpm.ac_power)
4140                 max_limits = &adev->pm.dpm.dyn_state.max_clock_voltage_on_ac;
4141         else
4142                 max_limits = &adev->pm.dpm.dyn_state.max_clock_voltage_on_dc;
4143
4144         if (enable) {
4145                 pi->dpm_level_enable_mask.samu_dpm_enable_mask = 0;
4146                 for (i = adev->pm.dpm.dyn_state.samu_clock_voltage_dependency_table.count - 1; i >= 0; i--) {
4147                         if (adev->pm.dpm.dyn_state.samu_clock_voltage_dependency_table.entries[i].v <= max_limits->vddc) {
4148                                 pi->dpm_level_enable_mask.samu_dpm_enable_mask |= 1 << i;
4149
4150                                 if (!pi->caps_samu_dpm)
4151                                         break;
4152                         }
4153                 }
4154
4155                 amdgpu_ci_send_msg_to_smc_with_parameter(adev,
4156                                                   PPSMC_MSG_SAMUDPM_SetEnabledMask,
4157                                                   pi->dpm_level_enable_mask.samu_dpm_enable_mask);
4158         }
4159         return (amdgpu_ci_send_msg_to_smc(adev, enable ?
4160                                    PPSMC_MSG_SAMUDPM_Enable : PPSMC_MSG_SAMUDPM_Disable) == PPSMC_Result_OK) ?
4161                 0 : -EINVAL;
4162 }
4163
4164 static int ci_enable_acp_dpm(struct amdgpu_device *adev, bool enable)
4165 {
4166         struct ci_power_info *pi = ci_get_pi(adev);
4167         const struct amdgpu_clock_and_voltage_limits *max_limits;
4168         int i;
4169
4170         if (adev->pm.dpm.ac_power)
4171                 max_limits = &adev->pm.dpm.dyn_state.max_clock_voltage_on_ac;
4172         else
4173                 max_limits = &adev->pm.dpm.dyn_state.max_clock_voltage_on_dc;
4174
4175         if (enable) {
4176                 pi->dpm_level_enable_mask.acp_dpm_enable_mask = 0;
4177                 for (i = adev->pm.dpm.dyn_state.acp_clock_voltage_dependency_table.count - 1; i >= 0; i--) {
4178                         if (adev->pm.dpm.dyn_state.acp_clock_voltage_dependency_table.entries[i].v <= max_limits->vddc) {
4179                                 pi->dpm_level_enable_mask.acp_dpm_enable_mask |= 1 << i;
4180
4181                                 if (!pi->caps_acp_dpm)
4182                                         break;
4183                         }
4184                 }
4185
4186                 amdgpu_ci_send_msg_to_smc_with_parameter(adev,
4187                                                   PPSMC_MSG_ACPDPM_SetEnabledMask,
4188                                                   pi->dpm_level_enable_mask.acp_dpm_enable_mask);
4189         }
4190
4191         return (amdgpu_ci_send_msg_to_smc(adev, enable ?
4192                                    PPSMC_MSG_ACPDPM_Enable : PPSMC_MSG_ACPDPM_Disable) == PPSMC_Result_OK) ?
4193                 0 : -EINVAL;
4194 }
4195 #endif
4196
4197 static int ci_update_uvd_dpm(struct amdgpu_device *adev, bool gate)
4198 {
4199         struct ci_power_info *pi = ci_get_pi(adev);
4200         u32 tmp;
4201         int ret = 0;
4202
4203         if (!gate) {
4204                 /* turn the clocks on when decoding */
4205                 ret = amdgpu_set_clockgating_state(adev, AMD_IP_BLOCK_TYPE_UVD,
4206                                                     AMD_CG_STATE_UNGATE);
4207                 if (ret)
4208                         return ret;
4209
4210                 if (pi->caps_uvd_dpm ||
4211                     (adev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table.count <= 0))
4212                         pi->smc_state_table.UvdBootLevel = 0;
4213                 else
4214                         pi->smc_state_table.UvdBootLevel =
4215                                 adev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table.count - 1;
4216
4217                 tmp = RREG32_SMC(ixDPM_TABLE_475);
4218                 tmp &= ~DPM_TABLE_475__UvdBootLevel_MASK;
4219                 tmp |= (pi->smc_state_table.UvdBootLevel << DPM_TABLE_475__UvdBootLevel__SHIFT);
4220                 WREG32_SMC(ixDPM_TABLE_475, tmp);
4221                 ret = ci_enable_uvd_dpm(adev, true);
4222         } else {
4223                 ret = ci_enable_uvd_dpm(adev, false);
4224                 if (ret)
4225                         return ret;
4226
4227                 ret = amdgpu_set_clockgating_state(adev, AMD_IP_BLOCK_TYPE_UVD,
4228                                                     AMD_CG_STATE_GATE);
4229         }
4230
4231         return ret;
4232 }
4233
4234 static u8 ci_get_vce_boot_level(struct amdgpu_device *adev)
4235 {
4236         u8 i;
4237         u32 min_evclk = 30000; /* ??? */
4238         struct amdgpu_vce_clock_voltage_dependency_table *table =
4239                 &adev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table;
4240
4241         for (i = 0; i < table->count; i++) {
4242                 if (table->entries[i].evclk >= min_evclk)
4243                         return i;
4244         }
4245
4246         return table->count - 1;
4247 }
4248
4249 static int ci_update_vce_dpm(struct amdgpu_device *adev,
4250                              struct amdgpu_ps *amdgpu_new_state,
4251                              struct amdgpu_ps *amdgpu_current_state)
4252 {
4253         struct ci_power_info *pi = ci_get_pi(adev);
4254         int ret = 0;
4255         u32 tmp;
4256
4257         if (amdgpu_current_state->evclk != amdgpu_new_state->evclk) {
4258                 if (amdgpu_new_state->evclk) {
4259                         /* turn the clocks on when encoding */
4260                         ret = amdgpu_set_clockgating_state(adev, AMD_IP_BLOCK_TYPE_VCE,
4261                                                             AMD_CG_STATE_UNGATE);
4262                         if (ret)
4263                                 return ret;
4264
4265                         pi->smc_state_table.VceBootLevel = ci_get_vce_boot_level(adev);
4266                         tmp = RREG32_SMC(ixDPM_TABLE_475);
4267                         tmp &= ~DPM_TABLE_475__VceBootLevel_MASK;
4268                         tmp |= (pi->smc_state_table.VceBootLevel << DPM_TABLE_475__VceBootLevel__SHIFT);
4269                         WREG32_SMC(ixDPM_TABLE_475, tmp);
4270
4271                         ret = ci_enable_vce_dpm(adev, true);
4272                 } else {
4273                         ret = ci_enable_vce_dpm(adev, false);
4274                         if (ret)
4275                                 return ret;
4276                         /* turn the clocks off when not encoding */
4277                         ret = amdgpu_set_clockgating_state(adev, AMD_IP_BLOCK_TYPE_VCE,
4278                                                             AMD_CG_STATE_GATE);
4279                 }
4280         }
4281         return ret;
4282 }
4283
4284 #if 0
4285 static int ci_update_samu_dpm(struct amdgpu_device *adev, bool gate)
4286 {
4287         return ci_enable_samu_dpm(adev, gate);
4288 }
4289
4290 static int ci_update_acp_dpm(struct amdgpu_device *adev, bool gate)
4291 {
4292         struct ci_power_info *pi = ci_get_pi(adev);
4293         u32 tmp;
4294
4295         if (!gate) {
4296                 pi->smc_state_table.AcpBootLevel = 0;
4297
4298                 tmp = RREG32_SMC(ixDPM_TABLE_475);
4299                 tmp &= ~AcpBootLevel_MASK;
4300                 tmp |= AcpBootLevel(pi->smc_state_table.AcpBootLevel);
4301                 WREG32_SMC(ixDPM_TABLE_475, tmp);
4302         }
4303
4304         return ci_enable_acp_dpm(adev, !gate);
4305 }
4306 #endif
4307
4308 static int ci_generate_dpm_level_enable_mask(struct amdgpu_device *adev,
4309                                              struct amdgpu_ps *amdgpu_state)
4310 {
4311         struct ci_power_info *pi = ci_get_pi(adev);
4312         int ret;
4313
4314         ret = ci_trim_dpm_states(adev, amdgpu_state);
4315         if (ret)
4316                 return ret;
4317
4318         pi->dpm_level_enable_mask.sclk_dpm_enable_mask =
4319                 ci_get_dpm_level_enable_mask_value(&pi->dpm_table.sclk_table);
4320         pi->dpm_level_enable_mask.mclk_dpm_enable_mask =
4321                 ci_get_dpm_level_enable_mask_value(&pi->dpm_table.mclk_table);
4322         pi->last_mclk_dpm_enable_mask =
4323                 pi->dpm_level_enable_mask.mclk_dpm_enable_mask;
4324         if (pi->uvd_enabled) {
4325                 if (pi->dpm_level_enable_mask.mclk_dpm_enable_mask & 1)
4326                         pi->dpm_level_enable_mask.mclk_dpm_enable_mask &= 0xFFFFFFFE;
4327         }
4328         pi->dpm_level_enable_mask.pcie_dpm_enable_mask =
4329                 ci_get_dpm_level_enable_mask_value(&pi->dpm_table.pcie_speed_table);
4330
4331         return 0;
4332 }
4333
4334 static u32 ci_get_lowest_enabled_level(struct amdgpu_device *adev,
4335                                        u32 level_mask)
4336 {
4337         u32 level = 0;
4338
4339         while ((level_mask & (1 << level)) == 0)
4340                 level++;
4341
4342         return level;
4343 }
4344
4345
4346 static int ci_dpm_force_performance_level(struct amdgpu_device *adev,
4347                                           enum amdgpu_dpm_forced_level level)
4348 {
4349         struct ci_power_info *pi = ci_get_pi(adev);
4350         u32 tmp, levels, i;
4351         int ret;
4352
4353         if (level == AMDGPU_DPM_FORCED_LEVEL_HIGH) {
4354                 if ((!pi->pcie_dpm_key_disabled) &&
4355                     pi->dpm_level_enable_mask.pcie_dpm_enable_mask) {
4356                         levels = 0;
4357                         tmp = pi->dpm_level_enable_mask.pcie_dpm_enable_mask;
4358                         while (tmp >>= 1)
4359                                 levels++;
4360                         if (levels) {
4361                                 ret = ci_dpm_force_state_pcie(adev, level);
4362                                 if (ret)
4363                                         return ret;
4364                                 for (i = 0; i < adev->usec_timeout; i++) {
4365                                         tmp = (RREG32_SMC(ixTARGET_AND_CURRENT_PROFILE_INDEX_1) &
4366                                         TARGET_AND_CURRENT_PROFILE_INDEX_1__CURR_PCIE_INDEX_MASK) >>
4367                                         TARGET_AND_CURRENT_PROFILE_INDEX_1__CURR_PCIE_INDEX__SHIFT;
4368                                         if (tmp == levels)
4369                                                 break;
4370                                         udelay(1);
4371                                 }
4372                         }
4373                 }
4374                 if ((!pi->sclk_dpm_key_disabled) &&
4375                     pi->dpm_level_enable_mask.sclk_dpm_enable_mask) {
4376                         levels = 0;
4377                         tmp = pi->dpm_level_enable_mask.sclk_dpm_enable_mask;
4378                         while (tmp >>= 1)
4379                                 levels++;
4380                         if (levels) {
4381                                 ret = ci_dpm_force_state_sclk(adev, levels);
4382                                 if (ret)
4383                                         return ret;
4384                                 for (i = 0; i < adev->usec_timeout; i++) {
4385                                         tmp = (RREG32_SMC(ixTARGET_AND_CURRENT_PROFILE_INDEX) &
4386                                         TARGET_AND_CURRENT_PROFILE_INDEX__CURR_SCLK_INDEX_MASK) >>
4387                                         TARGET_AND_CURRENT_PROFILE_INDEX__CURR_SCLK_INDEX__SHIFT;
4388                                         if (tmp == levels)
4389                                                 break;
4390                                         udelay(1);
4391                                 }
4392                         }
4393                 }
4394                 if ((!pi->mclk_dpm_key_disabled) &&
4395                     pi->dpm_level_enable_mask.mclk_dpm_enable_mask) {
4396                         levels = 0;
4397                         tmp = pi->dpm_level_enable_mask.mclk_dpm_enable_mask;
4398                         while (tmp >>= 1)
4399                                 levels++;
4400                         if (levels) {
4401                                 ret = ci_dpm_force_state_mclk(adev, levels);
4402                                 if (ret)
4403                                         return ret;
4404                                 for (i = 0; i < adev->usec_timeout; i++) {
4405                                         tmp = (RREG32_SMC(ixTARGET_AND_CURRENT_PROFILE_INDEX) &
4406                                         TARGET_AND_CURRENT_PROFILE_INDEX__CURR_MCLK_INDEX_MASK) >>
4407                                         TARGET_AND_CURRENT_PROFILE_INDEX__CURR_MCLK_INDEX__SHIFT;
4408                                         if (tmp == levels)
4409                                                 break;
4410                                         udelay(1);
4411                                 }
4412                         }
4413                 }
4414         } else if (level == AMDGPU_DPM_FORCED_LEVEL_LOW) {
4415                 if ((!pi->sclk_dpm_key_disabled) &&
4416                     pi->dpm_level_enable_mask.sclk_dpm_enable_mask) {
4417                         levels = ci_get_lowest_enabled_level(adev,
4418                                                              pi->dpm_level_enable_mask.sclk_dpm_enable_mask);
4419                         ret = ci_dpm_force_state_sclk(adev, levels);
4420                         if (ret)
4421                                 return ret;
4422                         for (i = 0; i < adev->usec_timeout; i++) {
4423                                 tmp = (RREG32_SMC(ixTARGET_AND_CURRENT_PROFILE_INDEX) &
4424                                 TARGET_AND_CURRENT_PROFILE_INDEX__CURR_SCLK_INDEX_MASK) >>
4425                                 TARGET_AND_CURRENT_PROFILE_INDEX__CURR_SCLK_INDEX__SHIFT;
4426                                 if (tmp == levels)
4427                                         break;
4428                                 udelay(1);
4429                         }
4430                 }
4431                 if ((!pi->mclk_dpm_key_disabled) &&
4432                     pi->dpm_level_enable_mask.mclk_dpm_enable_mask) {
4433                         levels = ci_get_lowest_enabled_level(adev,
4434                                                              pi->dpm_level_enable_mask.mclk_dpm_enable_mask);
4435                         ret = ci_dpm_force_state_mclk(adev, levels);
4436                         if (ret)
4437                                 return ret;
4438                         for (i = 0; i < adev->usec_timeout; i++) {
4439                                 tmp = (RREG32_SMC(ixTARGET_AND_CURRENT_PROFILE_INDEX) &
4440                                 TARGET_AND_CURRENT_PROFILE_INDEX__CURR_MCLK_INDEX_MASK) >>
4441                                 TARGET_AND_CURRENT_PROFILE_INDEX__CURR_MCLK_INDEX__SHIFT;
4442                                 if (tmp == levels)
4443                                         break;
4444                                 udelay(1);
4445                         }
4446                 }
4447                 if ((!pi->pcie_dpm_key_disabled) &&
4448                     pi->dpm_level_enable_mask.pcie_dpm_enable_mask) {
4449                         levels = ci_get_lowest_enabled_level(adev,
4450                                                              pi->dpm_level_enable_mask.pcie_dpm_enable_mask);
4451                         ret = ci_dpm_force_state_pcie(adev, levels);
4452                         if (ret)
4453                                 return ret;
4454                         for (i = 0; i < adev->usec_timeout; i++) {
4455                                 tmp = (RREG32_SMC(ixTARGET_AND_CURRENT_PROFILE_INDEX_1) &
4456                                 TARGET_AND_CURRENT_PROFILE_INDEX_1__CURR_PCIE_INDEX_MASK) >>
4457                                 TARGET_AND_CURRENT_PROFILE_INDEX_1__CURR_PCIE_INDEX__SHIFT;
4458                                 if (tmp == levels)
4459                                         break;
4460                                 udelay(1);
4461                         }
4462                 }
4463         } else if (level == AMDGPU_DPM_FORCED_LEVEL_AUTO) {
4464                 if (!pi->pcie_dpm_key_disabled) {
4465                         PPSMC_Result smc_result;
4466
4467                         smc_result = amdgpu_ci_send_msg_to_smc(adev,
4468                                                                PPSMC_MSG_PCIeDPM_UnForceLevel);
4469                         if (smc_result != PPSMC_Result_OK)
4470                                 return -EINVAL;
4471                 }
4472                 ret = ci_upload_dpm_level_enable_mask(adev);
4473                 if (ret)
4474                         return ret;
4475         }
4476
4477         adev->pm.dpm.forced_level = level;
4478
4479         return 0;
4480 }
4481
4482 static int ci_set_mc_special_registers(struct amdgpu_device *adev,
4483                                        struct ci_mc_reg_table *table)
4484 {
4485         u8 i, j, k;
4486         u32 temp_reg;
4487
4488         for (i = 0, j = table->last; i < table->last; i++) {
4489                 if (j >= SMU7_DISCRETE_MC_REGISTER_ARRAY_SIZE)
4490                         return -EINVAL;
4491                 switch(table->mc_reg_address[i].s1) {
4492                 case mmMC_SEQ_MISC1:
4493                         temp_reg = RREG32(mmMC_PMG_CMD_EMRS);
4494                         table->mc_reg_address[j].s1 = mmMC_PMG_CMD_EMRS;
4495                         table->mc_reg_address[j].s0 = mmMC_SEQ_PMG_CMD_EMRS_LP;
4496                         for (k = 0; k < table->num_entries; k++) {
4497                                 table->mc_reg_table_entry[k].mc_data[j] =
4498                                         ((temp_reg & 0xffff0000)) | ((table->mc_reg_table_entry[k].mc_data[i] & 0xffff0000) >> 16);
4499                         }
4500                         j++;
4501                         if (j >= SMU7_DISCRETE_MC_REGISTER_ARRAY_SIZE)
4502                                 return -EINVAL;
4503
4504                         temp_reg = RREG32(mmMC_PMG_CMD_MRS);
4505                         table->mc_reg_address[j].s1 = mmMC_PMG_CMD_MRS;
4506                         table->mc_reg_address[j].s0 = mmMC_SEQ_PMG_CMD_MRS_LP;
4507                         for (k = 0; k < table->num_entries; k++) {
4508                                 table->mc_reg_table_entry[k].mc_data[j] =
4509                                         (temp_reg & 0xffff0000) | (table->mc_reg_table_entry[k].mc_data[i] & 0x0000ffff);
4510                                 if (adev->mc.vram_type != AMDGPU_VRAM_TYPE_GDDR5)
4511                                         table->mc_reg_table_entry[k].mc_data[j] |= 0x100;
4512                         }
4513                         j++;
4514                         if (j > SMU7_DISCRETE_MC_REGISTER_ARRAY_SIZE)
4515                                 return -EINVAL;
4516
4517                         if (adev->mc.vram_type != AMDGPU_VRAM_TYPE_GDDR5) {
4518                                 table->mc_reg_address[j].s1 = mmMC_PMG_AUTO_CMD;
4519                                 table->mc_reg_address[j].s0 = mmMC_PMG_AUTO_CMD;
4520                                 for (k = 0; k < table->num_entries; k++) {
4521                                         table->mc_reg_table_entry[k].mc_data[j] =
4522                                                 (table->mc_reg_table_entry[k].mc_data[i] & 0xffff0000) >> 16;
4523                                 }
4524                                 j++;
4525                                 if (j > SMU7_DISCRETE_MC_REGISTER_ARRAY_SIZE)
4526                                         return -EINVAL;
4527                         }
4528                         break;
4529                 case mmMC_SEQ_RESERVE_M:
4530                         temp_reg = RREG32(mmMC_PMG_CMD_MRS1);
4531                         table->mc_reg_address[j].s1 = mmMC_PMG_CMD_MRS1;
4532                         table->mc_reg_address[j].s0 = mmMC_SEQ_PMG_CMD_MRS1_LP;
4533                         for (k = 0; k < table->num_entries; k++) {
4534                                 table->mc_reg_table_entry[k].mc_data[j] =
4535                                         (temp_reg & 0xffff0000) | (table->mc_reg_table_entry[k].mc_data[i] & 0x0000ffff);
4536                         }
4537                         j++;
4538                         if (j > SMU7_DISCRETE_MC_REGISTER_ARRAY_SIZE)
4539                                 return -EINVAL;
4540                         break;
4541                 default:
4542                         break;
4543                 }
4544
4545         }
4546
4547         table->last = j;
4548
4549         return 0;
4550 }
4551
4552 static bool ci_check_s0_mc_reg_index(u16 in_reg, u16 *out_reg)
4553 {
4554         bool result = true;
4555
4556         switch(in_reg) {
4557         case mmMC_SEQ_RAS_TIMING:
4558                 *out_reg = mmMC_SEQ_RAS_TIMING_LP;
4559                 break;
4560         case mmMC_SEQ_DLL_STBY:
4561                 *out_reg = mmMC_SEQ_DLL_STBY_LP;
4562                 break;
4563         case mmMC_SEQ_G5PDX_CMD0:
4564                 *out_reg = mmMC_SEQ_G5PDX_CMD0_LP;
4565                 break;
4566         case mmMC_SEQ_G5PDX_CMD1:
4567                 *out_reg = mmMC_SEQ_G5PDX_CMD1_LP;
4568                 break;
4569         case mmMC_SEQ_G5PDX_CTRL:
4570                 *out_reg = mmMC_SEQ_G5PDX_CTRL_LP;
4571                 break;
4572         case mmMC_SEQ_CAS_TIMING:
4573                 *out_reg = mmMC_SEQ_CAS_TIMING_LP;
4574             break;
4575         case mmMC_SEQ_MISC_TIMING:
4576                 *out_reg = mmMC_SEQ_MISC_TIMING_LP;
4577                 break;
4578         case mmMC_SEQ_MISC_TIMING2:
4579                 *out_reg = mmMC_SEQ_MISC_TIMING2_LP;
4580                 break;
4581         case mmMC_SEQ_PMG_DVS_CMD:
4582                 *out_reg = mmMC_SEQ_PMG_DVS_CMD_LP;
4583                 break;
4584         case mmMC_SEQ_PMG_DVS_CTL:
4585                 *out_reg = mmMC_SEQ_PMG_DVS_CTL_LP;
4586                 break;
4587         case mmMC_SEQ_RD_CTL_D0:
4588                 *out_reg = mmMC_SEQ_RD_CTL_D0_LP;
4589                 break;
4590         case mmMC_SEQ_RD_CTL_D1:
4591                 *out_reg = mmMC_SEQ_RD_CTL_D1_LP;
4592                 break;
4593         case mmMC_SEQ_WR_CTL_D0:
4594                 *out_reg = mmMC_SEQ_WR_CTL_D0_LP;
4595                 break;
4596         case mmMC_SEQ_WR_CTL_D1:
4597                 *out_reg = mmMC_SEQ_WR_CTL_D1_LP;
4598                 break;
4599         case mmMC_PMG_CMD_EMRS:
4600                 *out_reg = mmMC_SEQ_PMG_CMD_EMRS_LP;
4601                 break;
4602         case mmMC_PMG_CMD_MRS:
4603                 *out_reg = mmMC_SEQ_PMG_CMD_MRS_LP;
4604                 break;
4605         case mmMC_PMG_CMD_MRS1:
4606                 *out_reg = mmMC_SEQ_PMG_CMD_MRS1_LP;
4607                 break;
4608         case mmMC_SEQ_PMG_TIMING:
4609                 *out_reg = mmMC_SEQ_PMG_TIMING_LP;
4610                 break;
4611         case mmMC_PMG_CMD_MRS2:
4612                 *out_reg = mmMC_SEQ_PMG_CMD_MRS2_LP;
4613                 break;
4614         case mmMC_SEQ_WR_CTL_2:
4615                 *out_reg = mmMC_SEQ_WR_CTL_2_LP;
4616                 break;
4617         default:
4618                 result = false;
4619                 break;
4620         }
4621
4622         return result;
4623 }
4624
4625 static void ci_set_valid_flag(struct ci_mc_reg_table *table)
4626 {
4627         u8 i, j;
4628
4629         for (i = 0; i < table->last; i++) {
4630                 for (j = 1; j < table->num_entries; j++) {
4631                         if (table->mc_reg_table_entry[j-1].mc_data[i] !=
4632                             table->mc_reg_table_entry[j].mc_data[i]) {
4633                                 table->valid_flag |= 1 << i;
4634                                 break;
4635                         }
4636                 }
4637         }
4638 }
4639
4640 static void ci_set_s0_mc_reg_index(struct ci_mc_reg_table *table)
4641 {
4642         u32 i;
4643         u16 address;
4644
4645         for (i = 0; i < table->last; i++) {
4646                 table->mc_reg_address[i].s0 =
4647                         ci_check_s0_mc_reg_index(table->mc_reg_address[i].s1, &address) ?
4648                         address : table->mc_reg_address[i].s1;
4649         }
4650 }
4651
4652 static int ci_copy_vbios_mc_reg_table(const struct atom_mc_reg_table *table,
4653                                       struct ci_mc_reg_table *ci_table)
4654 {
4655         u8 i, j;
4656
4657         if (table->last > SMU7_DISCRETE_MC_REGISTER_ARRAY_SIZE)
4658                 return -EINVAL;
4659         if (table->num_entries > MAX_AC_TIMING_ENTRIES)
4660                 return -EINVAL;
4661
4662         for (i = 0; i < table->last; i++)
4663                 ci_table->mc_reg_address[i].s1 = table->mc_reg_address[i].s1;
4664
4665         ci_table->last = table->last;
4666
4667         for (i = 0; i < table->num_entries; i++) {
4668                 ci_table->mc_reg_table_entry[i].mclk_max =
4669                         table->mc_reg_table_entry[i].mclk_max;
4670                 for (j = 0; j < table->last; j++)
4671                         ci_table->mc_reg_table_entry[i].mc_data[j] =
4672                                 table->mc_reg_table_entry[i].mc_data[j];
4673         }
4674         ci_table->num_entries = table->num_entries;
4675
4676         return 0;
4677 }
4678
4679 static int ci_register_patching_mc_seq(struct amdgpu_device *adev,
4680                                        struct ci_mc_reg_table *table)
4681 {
4682         u8 i, k;
4683         u32 tmp;
4684         bool patch;
4685
4686         tmp = RREG32(mmMC_SEQ_MISC0);
4687         patch = ((tmp & 0x0000f00) == 0x300) ? true : false;
4688
4689         if (patch &&
4690             ((adev->pdev->device == 0x67B0) ||
4691              (adev->pdev->device == 0x67B1))) {
4692                 for (i = 0; i < table->last; i++) {
4693                         if (table->last >= SMU7_DISCRETE_MC_REGISTER_ARRAY_SIZE)
4694                                 return -EINVAL;
4695                         switch (table->mc_reg_address[i].s1) {
4696                         case mmMC_SEQ_MISC1:
4697                                 for (k = 0; k < table->num_entries; k++) {
4698                                         if ((table->mc_reg_table_entry[k].mclk_max == 125000) ||
4699                                             (table->mc_reg_table_entry[k].mclk_max == 137500))
4700                                                 table->mc_reg_table_entry[k].mc_data[i] =
4701                                                         (table->mc_reg_table_entry[k].mc_data[i] & 0xFFFFFFF8) |
4702                                                         0x00000007;
4703                                 }
4704                                 break;
4705                         case mmMC_SEQ_WR_CTL_D0:
4706                                 for (k = 0; k < table->num_entries; k++) {
4707                                         if ((table->mc_reg_table_entry[k].mclk_max == 125000) ||
4708                                             (table->mc_reg_table_entry[k].mclk_max == 137500))
4709                                                 table->mc_reg_table_entry[k].mc_data[i] =
4710                                                         (table->mc_reg_table_entry[k].mc_data[i] & 0xFFFF0F00) |
4711                                                         0x0000D0DD;
4712                                 }
4713                                 break;
4714                         case mmMC_SEQ_WR_CTL_D1:
4715                                 for (k = 0; k < table->num_entries; k++) {
4716                                         if ((table->mc_reg_table_entry[k].mclk_max == 125000) ||
4717                                             (table->mc_reg_table_entry[k].mclk_max == 137500))
4718                                                 table->mc_reg_table_entry[k].mc_data[i] =
4719                                                         (table->mc_reg_table_entry[k].mc_data[i] & 0xFFFF0F00) |
4720                                                         0x0000D0DD;
4721                                 }
4722                                 break;
4723                         case mmMC_SEQ_WR_CTL_2:
4724                                 for (k = 0; k < table->num_entries; k++) {
4725                                         if ((table->mc_reg_table_entry[k].mclk_max == 125000) ||
4726                                             (table->mc_reg_table_entry[k].mclk_max == 137500))
4727                                                 table->mc_reg_table_entry[k].mc_data[i] = 0;
4728                                 }
4729                                 break;
4730                         case mmMC_SEQ_CAS_TIMING:
4731                                 for (k = 0; k < table->num_entries; k++) {
4732                                         if (table->mc_reg_table_entry[k].mclk_max == 125000)
4733                                                 table->mc_reg_table_entry[k].mc_data[i] =
4734                                                         (table->mc_reg_table_entry[k].mc_data[i] & 0xFFE0FE0F) |
4735                                                         0x000C0140;
4736                                         else if (table->mc_reg_table_entry[k].mclk_max == 137500)
4737                                                 table->mc_reg_table_entry[k].mc_data[i] =
4738                                                         (table->mc_reg_table_entry[k].mc_data[i] & 0xFFE0FE0F) |
4739                                                         0x000C0150;
4740                                 }
4741                                 break;
4742                         case mmMC_SEQ_MISC_TIMING:
4743                                 for (k = 0; k < table->num_entries; k++) {
4744                                         if (table->mc_reg_table_entry[k].mclk_max == 125000)
4745                                                 table->mc_reg_table_entry[k].mc_data[i] =
4746                                                         (table->mc_reg_table_entry[k].mc_data[i] & 0xFFFFFFE0) |
4747                                                         0x00000030;
4748                                         else if (table->mc_reg_table_entry[k].mclk_max == 137500)
4749                                                 table->mc_reg_table_entry[k].mc_data[i] =
4750                                                         (table->mc_reg_table_entry[k].mc_data[i] & 0xFFFFFFE0) |
4751                                                         0x00000035;
4752                                 }
4753                                 break;
4754                         default:
4755                                 break;
4756                         }
4757                 }
4758
4759                 WREG32(mmMC_SEQ_IO_DEBUG_INDEX, 3);
4760                 tmp = RREG32(mmMC_SEQ_IO_DEBUG_DATA);
4761                 tmp = (tmp & 0xFFF8FFFF) | (1 << 16);
4762                 WREG32(mmMC_SEQ_IO_DEBUG_INDEX, 3);
4763                 WREG32(mmMC_SEQ_IO_DEBUG_DATA, tmp);
4764         }
4765
4766         return 0;
4767 }
4768
4769 static int ci_initialize_mc_reg_table(struct amdgpu_device *adev)
4770 {
4771         struct ci_power_info *pi = ci_get_pi(adev);
4772         struct atom_mc_reg_table *table;
4773         struct ci_mc_reg_table *ci_table = &pi->mc_reg_table;
4774         u8 module_index = ci_get_memory_module_index(adev);
4775         int ret;
4776
4777         table = kzalloc(sizeof(struct atom_mc_reg_table), GFP_KERNEL);
4778         if (!table)
4779                 return -ENOMEM;
4780
4781         WREG32(mmMC_SEQ_RAS_TIMING_LP, RREG32(mmMC_SEQ_RAS_TIMING));
4782         WREG32(mmMC_SEQ_CAS_TIMING_LP, RREG32(mmMC_SEQ_CAS_TIMING));
4783         WREG32(mmMC_SEQ_DLL_STBY_LP, RREG32(mmMC_SEQ_DLL_STBY));
4784         WREG32(mmMC_SEQ_G5PDX_CMD0_LP, RREG32(mmMC_SEQ_G5PDX_CMD0));
4785         WREG32(mmMC_SEQ_G5PDX_CMD1_LP, RREG32(mmMC_SEQ_G5PDX_CMD1));
4786         WREG32(mmMC_SEQ_G5PDX_CTRL_LP, RREG32(mmMC_SEQ_G5PDX_CTRL));
4787         WREG32(mmMC_SEQ_PMG_DVS_CMD_LP, RREG32(mmMC_SEQ_PMG_DVS_CMD));
4788         WREG32(mmMC_SEQ_PMG_DVS_CTL_LP, RREG32(mmMC_SEQ_PMG_DVS_CTL));
4789         WREG32(mmMC_SEQ_MISC_TIMING_LP, RREG32(mmMC_SEQ_MISC_TIMING));
4790         WREG32(mmMC_SEQ_MISC_TIMING2_LP, RREG32(mmMC_SEQ_MISC_TIMING2));
4791         WREG32(mmMC_SEQ_PMG_CMD_EMRS_LP, RREG32(mmMC_PMG_CMD_EMRS));
4792         WREG32(mmMC_SEQ_PMG_CMD_MRS_LP, RREG32(mmMC_PMG_CMD_MRS));
4793         WREG32(mmMC_SEQ_PMG_CMD_MRS1_LP, RREG32(mmMC_PMG_CMD_MRS1));
4794         WREG32(mmMC_SEQ_WR_CTL_D0_LP, RREG32(mmMC_SEQ_WR_CTL_D0));
4795         WREG32(mmMC_SEQ_WR_CTL_D1_LP, RREG32(mmMC_SEQ_WR_CTL_D1));
4796         WREG32(mmMC_SEQ_RD_CTL_D0_LP, RREG32(mmMC_SEQ_RD_CTL_D0));
4797         WREG32(mmMC_SEQ_RD_CTL_D1_LP, RREG32(mmMC_SEQ_RD_CTL_D1));
4798         WREG32(mmMC_SEQ_PMG_TIMING_LP, RREG32(mmMC_SEQ_PMG_TIMING));
4799         WREG32(mmMC_SEQ_PMG_CMD_MRS2_LP, RREG32(mmMC_PMG_CMD_MRS2));
4800         WREG32(mmMC_SEQ_WR_CTL_2_LP, RREG32(mmMC_SEQ_WR_CTL_2));
4801
4802         ret = amdgpu_atombios_init_mc_reg_table(adev, module_index, table);
4803         if (ret)
4804                 goto init_mc_done;
4805
4806         ret = ci_copy_vbios_mc_reg_table(table, ci_table);
4807         if (ret)
4808                 goto init_mc_done;
4809
4810         ci_set_s0_mc_reg_index(ci_table);
4811
4812         ret = ci_register_patching_mc_seq(adev, ci_table);
4813         if (ret)
4814                 goto init_mc_done;
4815
4816         ret = ci_set_mc_special_registers(adev, ci_table);
4817         if (ret)
4818                 goto init_mc_done;
4819
4820         ci_set_valid_flag(ci_table);
4821
4822 init_mc_done:
4823         kfree(table);
4824
4825         return ret;
4826 }
4827
4828 static int ci_populate_mc_reg_addresses(struct amdgpu_device *adev,
4829                                         SMU7_Discrete_MCRegisters *mc_reg_table)
4830 {
4831         struct ci_power_info *pi = ci_get_pi(adev);
4832         u32 i, j;
4833
4834         for (i = 0, j = 0; j < pi->mc_reg_table.last; j++) {
4835                 if (pi->mc_reg_table.valid_flag & (1 << j)) {
4836                         if (i >= SMU7_DISCRETE_MC_REGISTER_ARRAY_SIZE)
4837                                 return -EINVAL;
4838                         mc_reg_table->address[i].s0 = cpu_to_be16(pi->mc_reg_table.mc_reg_address[j].s0);
4839                         mc_reg_table->address[i].s1 = cpu_to_be16(pi->mc_reg_table.mc_reg_address[j].s1);
4840                         i++;
4841                 }
4842         }
4843
4844         mc_reg_table->last = (u8)i;
4845
4846         return 0;
4847 }
4848
4849 static void ci_convert_mc_registers(const struct ci_mc_reg_entry *entry,
4850                                     SMU7_Discrete_MCRegisterSet *data,
4851                                     u32 num_entries, u32 valid_flag)
4852 {
4853         u32 i, j;
4854
4855         for (i = 0, j = 0; j < num_entries; j++) {
4856                 if (valid_flag & (1 << j)) {
4857                         data->value[i] = cpu_to_be32(entry->mc_data[j]);
4858                         i++;
4859                 }
4860         }
4861 }
4862
4863 static void ci_convert_mc_reg_table_entry_to_smc(struct amdgpu_device *adev,
4864                                                  const u32 memory_clock,
4865                                                  SMU7_Discrete_MCRegisterSet *mc_reg_table_data)
4866 {
4867         struct ci_power_info *pi = ci_get_pi(adev);
4868         u32 i = 0;
4869
4870         for(i = 0; i < pi->mc_reg_table.num_entries; i++) {
4871                 if (memory_clock <= pi->mc_reg_table.mc_reg_table_entry[i].mclk_max)
4872                         break;
4873         }
4874
4875         if ((i == pi->mc_reg_table.num_entries) && (i > 0))
4876                 --i;
4877
4878         ci_convert_mc_registers(&pi->mc_reg_table.mc_reg_table_entry[i],
4879                                 mc_reg_table_data, pi->mc_reg_table.last,
4880                                 pi->mc_reg_table.valid_flag);
4881 }
4882
4883 static void ci_convert_mc_reg_table_to_smc(struct amdgpu_device *adev,
4884                                            SMU7_Discrete_MCRegisters *mc_reg_table)
4885 {
4886         struct ci_power_info *pi = ci_get_pi(adev);
4887         u32 i;
4888
4889         for (i = 0; i < pi->dpm_table.mclk_table.count; i++)
4890                 ci_convert_mc_reg_table_entry_to_smc(adev,
4891                                                      pi->dpm_table.mclk_table.dpm_levels[i].value,
4892                                                      &mc_reg_table->data[i]);
4893 }
4894
4895 static int ci_populate_initial_mc_reg_table(struct amdgpu_device *adev)
4896 {
4897         struct ci_power_info *pi = ci_get_pi(adev);
4898         int ret;
4899
4900         memset(&pi->smc_mc_reg_table, 0, sizeof(SMU7_Discrete_MCRegisters));
4901
4902         ret = ci_populate_mc_reg_addresses(adev, &pi->smc_mc_reg_table);
4903         if (ret)
4904                 return ret;
4905         ci_convert_mc_reg_table_to_smc(adev, &pi->smc_mc_reg_table);
4906
4907         return amdgpu_ci_copy_bytes_to_smc(adev,
4908                                     pi->mc_reg_table_start,
4909                                     (u8 *)&pi->smc_mc_reg_table,
4910                                     sizeof(SMU7_Discrete_MCRegisters),
4911                                     pi->sram_end);
4912 }
4913
4914 static int ci_update_and_upload_mc_reg_table(struct amdgpu_device *adev)
4915 {
4916         struct ci_power_info *pi = ci_get_pi(adev);
4917
4918         if (!(pi->need_update_smu7_dpm_table & DPMTABLE_OD_UPDATE_MCLK))
4919                 return 0;
4920
4921         memset(&pi->smc_mc_reg_table, 0, sizeof(SMU7_Discrete_MCRegisters));
4922
4923         ci_convert_mc_reg_table_to_smc(adev, &pi->smc_mc_reg_table);
4924
4925         return amdgpu_ci_copy_bytes_to_smc(adev,
4926                                     pi->mc_reg_table_start +
4927                                     offsetof(SMU7_Discrete_MCRegisters, data[0]),
4928                                     (u8 *)&pi->smc_mc_reg_table.data[0],
4929                                     sizeof(SMU7_Discrete_MCRegisterSet) *
4930                                     pi->dpm_table.mclk_table.count,
4931                                     pi->sram_end);
4932 }
4933
4934 static void ci_enable_voltage_control(struct amdgpu_device *adev)
4935 {
4936         u32 tmp = RREG32_SMC(ixGENERAL_PWRMGT);
4937
4938         tmp |= GENERAL_PWRMGT__VOLT_PWRMGT_EN_MASK;
4939         WREG32_SMC(ixGENERAL_PWRMGT, tmp);
4940 }
4941
4942 static enum amdgpu_pcie_gen ci_get_maximum_link_speed(struct amdgpu_device *adev,
4943                                                       struct amdgpu_ps *amdgpu_state)
4944 {
4945         struct ci_ps *state = ci_get_ps(amdgpu_state);
4946         int i;
4947         u16 pcie_speed, max_speed = 0;
4948
4949         for (i = 0; i < state->performance_level_count; i++) {
4950                 pcie_speed = state->performance_levels[i].pcie_gen;
4951                 if (max_speed < pcie_speed)
4952                         max_speed = pcie_speed;
4953         }
4954
4955         return max_speed;
4956 }
4957
4958 static u16 ci_get_current_pcie_speed(struct amdgpu_device *adev)
4959 {
4960         u32 speed_cntl = 0;
4961
4962         speed_cntl = RREG32_PCIE(ixPCIE_LC_SPEED_CNTL) &
4963                 PCIE_LC_SPEED_CNTL__LC_CURRENT_DATA_RATE_MASK;
4964         speed_cntl >>= PCIE_LC_SPEED_CNTL__LC_CURRENT_DATA_RATE__SHIFT;
4965
4966         return (u16)speed_cntl;
4967 }
4968
4969 static int ci_get_current_pcie_lane_number(struct amdgpu_device *adev)
4970 {
4971         u32 link_width = 0;
4972
4973         link_width = RREG32_PCIE(ixPCIE_LC_LINK_WIDTH_CNTL) &
4974                 PCIE_LC_LINK_WIDTH_CNTL__LC_LINK_WIDTH_RD_MASK;
4975         link_width >>= PCIE_LC_LINK_WIDTH_CNTL__LC_LINK_WIDTH_RD__SHIFT;
4976
4977         switch (link_width) {
4978         case 1:
4979                 return 1;
4980         case 2:
4981                 return 2;
4982         case 3:
4983                 return 4;
4984         case 4:
4985                 return 8;
4986         case 0:
4987         case 6:
4988         default:
4989                 return 16;
4990         }
4991 }
4992
4993 static void ci_request_link_speed_change_before_state_change(struct amdgpu_device *adev,
4994                                                              struct amdgpu_ps *amdgpu_new_state,
4995                                                              struct amdgpu_ps *amdgpu_current_state)
4996 {
4997         struct ci_power_info *pi = ci_get_pi(adev);
4998         enum amdgpu_pcie_gen target_link_speed =
4999                 ci_get_maximum_link_speed(adev, amdgpu_new_state);
5000         enum amdgpu_pcie_gen current_link_speed;
5001
5002         if (pi->force_pcie_gen == AMDGPU_PCIE_GEN_INVALID)
5003                 current_link_speed = ci_get_maximum_link_speed(adev, amdgpu_current_state);
5004         else
5005                 current_link_speed = pi->force_pcie_gen;
5006
5007         pi->force_pcie_gen = AMDGPU_PCIE_GEN_INVALID;
5008         pi->pspp_notify_required = false;
5009         if (target_link_speed > current_link_speed) {
5010                 switch (target_link_speed) {
5011 #ifdef CONFIG_ACPI
5012                 case AMDGPU_PCIE_GEN3:
5013                         if (amdgpu_acpi_pcie_performance_request(adev, PCIE_PERF_REQ_PECI_GEN3, false) == 0)
5014                                 break;
5015                         pi->force_pcie_gen = AMDGPU_PCIE_GEN2;
5016                         if (current_link_speed == AMDGPU_PCIE_GEN2)
5017                                 break;
5018                 case AMDGPU_PCIE_GEN2:
5019                         if (amdgpu_acpi_pcie_performance_request(adev, PCIE_PERF_REQ_PECI_GEN2, false) == 0)
5020                                 break;
5021 #endif
5022                 default:
5023                         pi->force_pcie_gen = ci_get_current_pcie_speed(adev);
5024                         break;
5025                 }
5026         } else {
5027                 if (target_link_speed < current_link_speed)
5028                         pi->pspp_notify_required = true;
5029         }
5030 }
5031
5032 static void ci_notify_link_speed_change_after_state_change(struct amdgpu_device *adev,
5033                                                            struct amdgpu_ps *amdgpu_new_state,
5034                                                            struct amdgpu_ps *amdgpu_current_state)
5035 {
5036         struct ci_power_info *pi = ci_get_pi(adev);
5037         enum amdgpu_pcie_gen target_link_speed =
5038                 ci_get_maximum_link_speed(adev, amdgpu_new_state);
5039         u8 request;
5040
5041         if (pi->pspp_notify_required) {
5042                 if (target_link_speed == AMDGPU_PCIE_GEN3)
5043                         request = PCIE_PERF_REQ_PECI_GEN3;
5044                 else if (target_link_speed == AMDGPU_PCIE_GEN2)
5045                         request = PCIE_PERF_REQ_PECI_GEN2;
5046                 else
5047                         request = PCIE_PERF_REQ_PECI_GEN1;
5048
5049                 if ((request == PCIE_PERF_REQ_PECI_GEN1) &&
5050                     (ci_get_current_pcie_speed(adev) > 0))
5051                         return;
5052
5053 #ifdef CONFIG_ACPI
5054                 amdgpu_acpi_pcie_performance_request(adev, request, false);
5055 #endif
5056         }
5057 }
5058
5059 static int ci_set_private_data_variables_based_on_pptable(struct amdgpu_device *adev)
5060 {
5061         struct ci_power_info *pi = ci_get_pi(adev);
5062         struct amdgpu_clock_voltage_dependency_table *allowed_sclk_vddc_table =
5063                 &adev->pm.dpm.dyn_state.vddc_dependency_on_sclk;
5064         struct amdgpu_clock_voltage_dependency_table *allowed_mclk_vddc_table =
5065                 &adev->pm.dpm.dyn_state.vddc_dependency_on_mclk;
5066         struct amdgpu_clock_voltage_dependency_table *allowed_mclk_vddci_table =
5067                 &adev->pm.dpm.dyn_state.vddci_dependency_on_mclk;
5068
5069         if (allowed_sclk_vddc_table == NULL)
5070                 return -EINVAL;
5071         if (allowed_sclk_vddc_table->count < 1)
5072                 return -EINVAL;
5073         if (allowed_mclk_vddc_table == NULL)
5074                 return -EINVAL;
5075         if (allowed_mclk_vddc_table->count < 1)
5076                 return -EINVAL;
5077         if (allowed_mclk_vddci_table == NULL)
5078                 return -EINVAL;
5079         if (allowed_mclk_vddci_table->count < 1)
5080                 return -EINVAL;
5081
5082         pi->min_vddc_in_pp_table = allowed_sclk_vddc_table->entries[0].v;
5083         pi->max_vddc_in_pp_table =
5084                 allowed_sclk_vddc_table->entries[allowed_sclk_vddc_table->count - 1].v;
5085
5086         pi->min_vddci_in_pp_table = allowed_mclk_vddci_table->entries[0].v;
5087         pi->max_vddci_in_pp_table =
5088                 allowed_mclk_vddci_table->entries[allowed_mclk_vddci_table->count - 1].v;
5089
5090         adev->pm.dpm.dyn_state.max_clock_voltage_on_ac.sclk =
5091                 allowed_sclk_vddc_table->entries[allowed_sclk_vddc_table->count - 1].clk;
5092         adev->pm.dpm.dyn_state.max_clock_voltage_on_ac.mclk =
5093                 allowed_mclk_vddc_table->entries[allowed_sclk_vddc_table->count - 1].clk;
5094         adev->pm.dpm.dyn_state.max_clock_voltage_on_ac.vddc =
5095                 allowed_sclk_vddc_table->entries[allowed_sclk_vddc_table->count - 1].v;
5096         adev->pm.dpm.dyn_state.max_clock_voltage_on_ac.vddci =
5097                 allowed_mclk_vddci_table->entries[allowed_mclk_vddci_table->count - 1].v;
5098
5099         return 0;
5100 }
5101
5102 static void ci_patch_with_vddc_leakage(struct amdgpu_device *adev, u16 *vddc)
5103 {
5104         struct ci_power_info *pi = ci_get_pi(adev);
5105         struct ci_leakage_voltage *leakage_table = &pi->vddc_leakage;
5106         u32 leakage_index;
5107
5108         for (leakage_index = 0; leakage_index < leakage_table->count; leakage_index++) {
5109                 if (leakage_table->leakage_id[leakage_index] == *vddc) {
5110                         *vddc = leakage_table->actual_voltage[leakage_index];
5111                         break;
5112                 }
5113         }
5114 }
5115
5116 static void ci_patch_with_vddci_leakage(struct amdgpu_device *adev, u16 *vddci)
5117 {
5118         struct ci_power_info *pi = ci_get_pi(adev);
5119         struct ci_leakage_voltage *leakage_table = &pi->vddci_leakage;
5120         u32 leakage_index;
5121
5122         for (leakage_index = 0; leakage_index < leakage_table->count; leakage_index++) {
5123                 if (leakage_table->leakage_id[leakage_index] == *vddci) {
5124                         *vddci = leakage_table->actual_voltage[leakage_index];
5125                         break;
5126                 }
5127         }
5128 }
5129
5130 static void ci_patch_clock_voltage_dependency_table_with_vddc_leakage(struct amdgpu_device *adev,
5131                                                                       struct amdgpu_clock_voltage_dependency_table *table)
5132 {
5133         u32 i;
5134
5135         if (table) {
5136                 for (i = 0; i < table->count; i++)
5137                         ci_patch_with_vddc_leakage(adev, &table->entries[i].v);
5138         }
5139 }
5140
5141 static void ci_patch_clock_voltage_dependency_table_with_vddci_leakage(struct amdgpu_device *adev,
5142                                                                        struct amdgpu_clock_voltage_dependency_table *table)
5143 {
5144         u32 i;
5145
5146         if (table) {
5147                 for (i = 0; i < table->count; i++)
5148                         ci_patch_with_vddci_leakage(adev, &table->entries[i].v);
5149         }
5150 }
5151
5152 static void ci_patch_vce_clock_voltage_dependency_table_with_vddc_leakage(struct amdgpu_device *adev,
5153                                                                           struct amdgpu_vce_clock_voltage_dependency_table *table)
5154 {
5155         u32 i;
5156
5157         if (table) {
5158                 for (i = 0; i < table->count; i++)
5159                         ci_patch_with_vddc_leakage(adev, &table->entries[i].v);
5160         }
5161 }
5162
5163 static void ci_patch_uvd_clock_voltage_dependency_table_with_vddc_leakage(struct amdgpu_device *adev,
5164                                                                           struct amdgpu_uvd_clock_voltage_dependency_table *table)
5165 {
5166         u32 i;
5167
5168         if (table) {
5169                 for (i = 0; i < table->count; i++)
5170                         ci_patch_with_vddc_leakage(adev, &table->entries[i].v);
5171         }
5172 }
5173
5174 static void ci_patch_vddc_phase_shed_limit_table_with_vddc_leakage(struct amdgpu_device *adev,
5175                                                                    struct amdgpu_phase_shedding_limits_table *table)
5176 {
5177         u32 i;
5178
5179         if (table) {
5180                 for (i = 0; i < table->count; i++)
5181                         ci_patch_with_vddc_leakage(adev, &table->entries[i].voltage);
5182         }
5183 }
5184
5185 static void ci_patch_clock_voltage_limits_with_vddc_leakage(struct amdgpu_device *adev,
5186                                                             struct amdgpu_clock_and_voltage_limits *table)
5187 {
5188         if (table) {
5189                 ci_patch_with_vddc_leakage(adev, (u16 *)&table->vddc);
5190                 ci_patch_with_vddci_leakage(adev, (u16 *)&table->vddci);
5191         }
5192 }
5193
5194 static void ci_patch_cac_leakage_table_with_vddc_leakage(struct amdgpu_device *adev,
5195                                                          struct amdgpu_cac_leakage_table *table)
5196 {
5197         u32 i;
5198
5199         if (table) {
5200                 for (i = 0; i < table->count; i++)
5201                         ci_patch_with_vddc_leakage(adev, &table->entries[i].vddc);
5202         }
5203 }
5204
5205 static void ci_patch_dependency_tables_with_leakage(struct amdgpu_device *adev)
5206 {
5207
5208         ci_patch_clock_voltage_dependency_table_with_vddc_leakage(adev,
5209                                                                   &adev->pm.dpm.dyn_state.vddc_dependency_on_sclk);
5210         ci_patch_clock_voltage_dependency_table_with_vddc_leakage(adev,
5211                                                                   &adev->pm.dpm.dyn_state.vddc_dependency_on_mclk);
5212         ci_patch_clock_voltage_dependency_table_with_vddc_leakage(adev,
5213                                                                   &adev->pm.dpm.dyn_state.vddc_dependency_on_dispclk);
5214         ci_patch_clock_voltage_dependency_table_with_vddci_leakage(adev,
5215                                                                    &adev->pm.dpm.dyn_state.vddci_dependency_on_mclk);
5216         ci_patch_vce_clock_voltage_dependency_table_with_vddc_leakage(adev,
5217                                                                       &adev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table);
5218         ci_patch_uvd_clock_voltage_dependency_table_with_vddc_leakage(adev,
5219                                                                       &adev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table);
5220         ci_patch_clock_voltage_dependency_table_with_vddc_leakage(adev,
5221                                                                   &adev->pm.dpm.dyn_state.samu_clock_voltage_dependency_table);
5222         ci_patch_clock_voltage_dependency_table_with_vddc_leakage(adev,
5223                                                                   &adev->pm.dpm.dyn_state.acp_clock_voltage_dependency_table);
5224         ci_patch_vddc_phase_shed_limit_table_with_vddc_leakage(adev,
5225                                                                &adev->pm.dpm.dyn_state.phase_shedding_limits_table);
5226         ci_patch_clock_voltage_limits_with_vddc_leakage(adev,
5227                                                         &adev->pm.dpm.dyn_state.max_clock_voltage_on_ac);
5228         ci_patch_clock_voltage_limits_with_vddc_leakage(adev,
5229                                                         &adev->pm.dpm.dyn_state.max_clock_voltage_on_dc);
5230         ci_patch_cac_leakage_table_with_vddc_leakage(adev,
5231                                                      &adev->pm.dpm.dyn_state.cac_leakage_table);
5232
5233 }
5234
5235 static void ci_update_current_ps(struct amdgpu_device *adev,
5236                                  struct amdgpu_ps *rps)
5237 {
5238         struct ci_ps *new_ps = ci_get_ps(rps);
5239         struct ci_power_info *pi = ci_get_pi(adev);
5240
5241         pi->current_rps = *rps;
5242         pi->current_ps = *new_ps;
5243         pi->current_rps.ps_priv = &pi->current_ps;
5244         adev->pm.dpm.current_ps = &pi->current_rps;
5245 }
5246
5247 static void ci_update_requested_ps(struct amdgpu_device *adev,
5248                                    struct amdgpu_ps *rps)
5249 {
5250         struct ci_ps *new_ps = ci_get_ps(rps);
5251         struct ci_power_info *pi = ci_get_pi(adev);
5252
5253         pi->requested_rps = *rps;
5254         pi->requested_ps = *new_ps;
5255         pi->requested_rps.ps_priv = &pi->requested_ps;
5256         adev->pm.dpm.requested_ps = &pi->requested_rps;
5257 }
5258
5259 static int ci_dpm_pre_set_power_state(struct amdgpu_device *adev)
5260 {
5261         struct ci_power_info *pi = ci_get_pi(adev);
5262         struct amdgpu_ps requested_ps = *adev->pm.dpm.requested_ps;
5263         struct amdgpu_ps *new_ps = &requested_ps;
5264
5265         ci_update_requested_ps(adev, new_ps);
5266
5267         ci_apply_state_adjust_rules(adev, &pi->requested_rps);
5268
5269         return 0;
5270 }
5271
5272 static void ci_dpm_post_set_power_state(struct amdgpu_device *adev)
5273 {
5274         struct ci_power_info *pi = ci_get_pi(adev);
5275         struct amdgpu_ps *new_ps = &pi->requested_rps;
5276
5277         ci_update_current_ps(adev, new_ps);
5278 }
5279
5280
5281 static void ci_dpm_setup_asic(struct amdgpu_device *adev)
5282 {
5283         ci_read_clock_registers(adev);
5284         ci_enable_acpi_power_management(adev);
5285         ci_init_sclk_t(adev);
5286 }
5287
5288 static int ci_dpm_enable(struct amdgpu_device *adev)
5289 {
5290         struct ci_power_info *pi = ci_get_pi(adev);
5291         struct amdgpu_ps *boot_ps = adev->pm.dpm.boot_ps;
5292         int ret;
5293
5294         if (pi->voltage_control != CISLANDS_VOLTAGE_CONTROL_NONE) {
5295                 ci_enable_voltage_control(adev);
5296                 ret = ci_construct_voltage_tables(adev);
5297                 if (ret) {
5298                         DRM_ERROR("ci_construct_voltage_tables failed\n");
5299                         return ret;
5300                 }
5301         }
5302         if (pi->caps_dynamic_ac_timing) {
5303                 ret = ci_initialize_mc_reg_table(adev);
5304                 if (ret)
5305                         pi->caps_dynamic_ac_timing = false;
5306         }
5307         if (pi->dynamic_ss)
5308                 ci_enable_spread_spectrum(adev, true);
5309         if (pi->thermal_protection)
5310                 ci_enable_thermal_protection(adev, true);
5311         ci_program_sstp(adev);
5312         ci_enable_display_gap(adev);
5313         ci_program_vc(adev);
5314         ret = ci_upload_firmware(adev);
5315         if (ret) {
5316                 DRM_ERROR("ci_upload_firmware failed\n");
5317                 return ret;
5318         }
5319         ret = ci_process_firmware_header(adev);
5320         if (ret) {
5321                 DRM_ERROR("ci_process_firmware_header failed\n");
5322                 return ret;
5323         }
5324         ret = ci_initial_switch_from_arb_f0_to_f1(adev);
5325         if (ret) {
5326                 DRM_ERROR("ci_initial_switch_from_arb_f0_to_f1 failed\n");
5327                 return ret;
5328         }
5329         ret = ci_init_smc_table(adev);
5330         if (ret) {
5331                 DRM_ERROR("ci_init_smc_table failed\n");
5332                 return ret;
5333         }
5334         ret = ci_init_arb_table_index(adev);
5335         if (ret) {
5336                 DRM_ERROR("ci_init_arb_table_index failed\n");
5337                 return ret;
5338         }
5339         if (pi->caps_dynamic_ac_timing) {
5340                 ret = ci_populate_initial_mc_reg_table(adev);
5341                 if (ret) {
5342                         DRM_ERROR("ci_populate_initial_mc_reg_table failed\n");
5343                         return ret;
5344                 }
5345         }
5346         ret = ci_populate_pm_base(adev);
5347         if (ret) {
5348                 DRM_ERROR("ci_populate_pm_base failed\n");
5349                 return ret;
5350         }
5351         ci_dpm_start_smc(adev);
5352         ci_enable_vr_hot_gpio_interrupt(adev);
5353         ret = ci_notify_smc_display_change(adev, false);
5354         if (ret) {
5355                 DRM_ERROR("ci_notify_smc_display_change failed\n");
5356                 return ret;
5357         }
5358         ci_enable_sclk_control(adev, true);
5359         ret = ci_enable_ulv(adev, true);
5360         if (ret) {
5361                 DRM_ERROR("ci_enable_ulv failed\n");
5362                 return ret;
5363         }
5364         ret = ci_enable_ds_master_switch(adev, true);
5365         if (ret) {
5366                 DRM_ERROR("ci_enable_ds_master_switch failed\n");
5367                 return ret;
5368         }
5369         ret = ci_start_dpm(adev);
5370         if (ret) {
5371                 DRM_ERROR("ci_start_dpm failed\n");
5372                 return ret;
5373         }
5374         ret = ci_enable_didt(adev, true);
5375         if (ret) {
5376                 DRM_ERROR("ci_enable_didt failed\n");
5377                 return ret;
5378         }
5379         ret = ci_enable_smc_cac(adev, true);
5380         if (ret) {
5381                 DRM_ERROR("ci_enable_smc_cac failed\n");
5382                 return ret;
5383         }
5384         ret = ci_enable_power_containment(adev, true);
5385         if (ret) {
5386                 DRM_ERROR("ci_enable_power_containment failed\n");
5387                 return ret;
5388         }
5389
5390         ret = ci_power_control_set_level(adev);
5391         if (ret) {
5392                 DRM_ERROR("ci_power_control_set_level failed\n");
5393                 return ret;
5394         }
5395
5396         ci_enable_auto_throttle_source(adev, AMDGPU_DPM_AUTO_THROTTLE_SRC_THERMAL, true);
5397
5398         ret = ci_enable_thermal_based_sclk_dpm(adev, true);
5399         if (ret) {
5400                 DRM_ERROR("ci_enable_thermal_based_sclk_dpm failed\n");
5401                 return ret;
5402         }
5403
5404         ci_thermal_start_thermal_controller(adev);
5405
5406         ci_update_current_ps(adev, boot_ps);
5407
5408         return 0;
5409 }
5410
5411 static void ci_dpm_disable(struct amdgpu_device *adev)
5412 {
5413         struct ci_power_info *pi = ci_get_pi(adev);
5414         struct amdgpu_ps *boot_ps = adev->pm.dpm.boot_ps;
5415
5416         amdgpu_irq_put(adev, &adev->pm.dpm.thermal.irq,
5417                        AMDGPU_THERMAL_IRQ_LOW_TO_HIGH);
5418         amdgpu_irq_put(adev, &adev->pm.dpm.thermal.irq,
5419                        AMDGPU_THERMAL_IRQ_HIGH_TO_LOW);
5420
5421         ci_dpm_powergate_uvd(adev, true);
5422
5423         if (!amdgpu_ci_is_smc_running(adev))
5424                 return;
5425
5426         ci_thermal_stop_thermal_controller(adev);
5427
5428         if (pi->thermal_protection)
5429                 ci_enable_thermal_protection(adev, false);
5430         ci_enable_power_containment(adev, false);
5431         ci_enable_smc_cac(adev, false);
5432         ci_enable_didt(adev, false);
5433         ci_enable_spread_spectrum(adev, false);
5434         ci_enable_auto_throttle_source(adev, AMDGPU_DPM_AUTO_THROTTLE_SRC_THERMAL, false);
5435         ci_stop_dpm(adev);
5436         ci_enable_ds_master_switch(adev, false);
5437         ci_enable_ulv(adev, false);
5438         ci_clear_vc(adev);
5439         ci_reset_to_default(adev);
5440         ci_dpm_stop_smc(adev);
5441         ci_force_switch_to_arb_f0(adev);
5442         ci_enable_thermal_based_sclk_dpm(adev, false);
5443
5444         ci_update_current_ps(adev, boot_ps);
5445 }
5446
5447 static int ci_dpm_set_power_state(struct amdgpu_device *adev)
5448 {
5449         struct ci_power_info *pi = ci_get_pi(adev);
5450         struct amdgpu_ps *new_ps = &pi->requested_rps;
5451         struct amdgpu_ps *old_ps = &pi->current_rps;
5452         int ret;
5453
5454         ci_find_dpm_states_clocks_in_dpm_table(adev, new_ps);
5455         if (pi->pcie_performance_request)
5456                 ci_request_link_speed_change_before_state_change(adev, new_ps, old_ps);
5457         ret = ci_freeze_sclk_mclk_dpm(adev);
5458         if (ret) {
5459                 DRM_ERROR("ci_freeze_sclk_mclk_dpm failed\n");
5460                 return ret;
5461         }
5462         ret = ci_populate_and_upload_sclk_mclk_dpm_levels(adev, new_ps);
5463         if (ret) {
5464                 DRM_ERROR("ci_populate_and_upload_sclk_mclk_dpm_levels failed\n");
5465                 return ret;
5466         }
5467         ret = ci_generate_dpm_level_enable_mask(adev, new_ps);
5468         if (ret) {
5469                 DRM_ERROR("ci_generate_dpm_level_enable_mask failed\n");
5470                 return ret;
5471         }
5472
5473         ret = ci_update_vce_dpm(adev, new_ps, old_ps);
5474         if (ret) {
5475                 DRM_ERROR("ci_update_vce_dpm failed\n");
5476                 return ret;
5477         }
5478
5479         ret = ci_update_sclk_t(adev);
5480         if (ret) {
5481                 DRM_ERROR("ci_update_sclk_t failed\n");
5482                 return ret;
5483         }
5484         if (pi->caps_dynamic_ac_timing) {
5485                 ret = ci_update_and_upload_mc_reg_table(adev);
5486                 if (ret) {
5487                         DRM_ERROR("ci_update_and_upload_mc_reg_table failed\n");
5488                         return ret;
5489                 }
5490         }
5491         ret = ci_program_memory_timing_parameters(adev);
5492         if (ret) {
5493                 DRM_ERROR("ci_program_memory_timing_parameters failed\n");
5494                 return ret;
5495         }
5496         ret = ci_unfreeze_sclk_mclk_dpm(adev);
5497         if (ret) {
5498                 DRM_ERROR("ci_unfreeze_sclk_mclk_dpm failed\n");
5499                 return ret;
5500         }
5501         ret = ci_upload_dpm_level_enable_mask(adev);
5502         if (ret) {
5503                 DRM_ERROR("ci_upload_dpm_level_enable_mask failed\n");
5504                 return ret;
5505         }
5506         if (pi->pcie_performance_request)
5507                 ci_notify_link_speed_change_after_state_change(adev, new_ps, old_ps);
5508
5509         return 0;
5510 }
5511
5512 #if 0
5513 static void ci_dpm_reset_asic(struct amdgpu_device *adev)
5514 {
5515         ci_set_boot_state(adev);
5516 }
5517 #endif
5518
5519 static void ci_dpm_display_configuration_changed(struct amdgpu_device *adev)
5520 {
5521         ci_program_display_gap(adev);
5522 }
5523
5524 union power_info {
5525         struct _ATOM_POWERPLAY_INFO info;
5526         struct _ATOM_POWERPLAY_INFO_V2 info_2;
5527         struct _ATOM_POWERPLAY_INFO_V3 info_3;
5528         struct _ATOM_PPLIB_POWERPLAYTABLE pplib;
5529         struct _ATOM_PPLIB_POWERPLAYTABLE2 pplib2;
5530         struct _ATOM_PPLIB_POWERPLAYTABLE3 pplib3;
5531 };
5532
5533 union pplib_clock_info {
5534         struct _ATOM_PPLIB_R600_CLOCK_INFO r600;
5535         struct _ATOM_PPLIB_RS780_CLOCK_INFO rs780;
5536         struct _ATOM_PPLIB_EVERGREEN_CLOCK_INFO evergreen;
5537         struct _ATOM_PPLIB_SUMO_CLOCK_INFO sumo;
5538         struct _ATOM_PPLIB_SI_CLOCK_INFO si;
5539         struct _ATOM_PPLIB_CI_CLOCK_INFO ci;
5540 };
5541
5542 union pplib_power_state {
5543         struct _ATOM_PPLIB_STATE v1;
5544         struct _ATOM_PPLIB_STATE_V2 v2;
5545 };
5546
5547 static void ci_parse_pplib_non_clock_info(struct amdgpu_device *adev,
5548                                           struct amdgpu_ps *rps,
5549                                           struct _ATOM_PPLIB_NONCLOCK_INFO *non_clock_info,
5550                                           u8 table_rev)
5551 {
5552         rps->caps = le32_to_cpu(non_clock_info->ulCapsAndSettings);
5553         rps->class = le16_to_cpu(non_clock_info->usClassification);
5554         rps->class2 = le16_to_cpu(non_clock_info->usClassification2);
5555
5556         if (ATOM_PPLIB_NONCLOCKINFO_VER1 < table_rev) {
5557                 rps->vclk = le32_to_cpu(non_clock_info->ulVCLK);
5558                 rps->dclk = le32_to_cpu(non_clock_info->ulDCLK);
5559         } else {
5560                 rps->vclk = 0;
5561                 rps->dclk = 0;
5562         }
5563
5564         if (rps->class & ATOM_PPLIB_CLASSIFICATION_BOOT)
5565                 adev->pm.dpm.boot_ps = rps;
5566         if (rps->class & ATOM_PPLIB_CLASSIFICATION_UVDSTATE)
5567                 adev->pm.dpm.uvd_ps = rps;
5568 }
5569
5570 static void ci_parse_pplib_clock_info(struct amdgpu_device *adev,
5571                                       struct amdgpu_ps *rps, int index,
5572                                       union pplib_clock_info *clock_info)
5573 {
5574         struct ci_power_info *pi = ci_get_pi(adev);
5575         struct ci_ps *ps = ci_get_ps(rps);
5576         struct ci_pl *pl = &ps->performance_levels[index];
5577
5578         ps->performance_level_count = index + 1;
5579
5580         pl->sclk = le16_to_cpu(clock_info->ci.usEngineClockLow);
5581         pl->sclk |= clock_info->ci.ucEngineClockHigh << 16;
5582         pl->mclk = le16_to_cpu(clock_info->ci.usMemoryClockLow);
5583         pl->mclk |= clock_info->ci.ucMemoryClockHigh << 16;
5584
5585         pl->pcie_gen = amdgpu_get_pcie_gen_support(adev,
5586                                                    pi->sys_pcie_mask,
5587                                                    pi->vbios_boot_state.pcie_gen_bootup_value,
5588                                                    clock_info->ci.ucPCIEGen);
5589         pl->pcie_lane = amdgpu_get_pcie_lane_support(adev,
5590                                                      pi->vbios_boot_state.pcie_lane_bootup_value,
5591                                                      le16_to_cpu(clock_info->ci.usPCIELane));
5592
5593         if (rps->class & ATOM_PPLIB_CLASSIFICATION_ACPI) {
5594                 pi->acpi_pcie_gen = pl->pcie_gen;
5595         }
5596
5597         if (rps->class2 & ATOM_PPLIB_CLASSIFICATION2_ULV) {
5598                 pi->ulv.supported = true;
5599                 pi->ulv.pl = *pl;
5600                 pi->ulv.cg_ulv_parameter = CISLANDS_CGULVPARAMETER_DFLT;
5601         }
5602
5603         /* patch up boot state */
5604         if (rps->class & ATOM_PPLIB_CLASSIFICATION_BOOT) {
5605                 pl->mclk = pi->vbios_boot_state.mclk_bootup_value;
5606                 pl->sclk = pi->vbios_boot_state.sclk_bootup_value;
5607                 pl->pcie_gen = pi->vbios_boot_state.pcie_gen_bootup_value;
5608                 pl->pcie_lane = pi->vbios_boot_state.pcie_lane_bootup_value;
5609         }
5610
5611         switch (rps->class & ATOM_PPLIB_CLASSIFICATION_UI_MASK) {
5612         case ATOM_PPLIB_CLASSIFICATION_UI_BATTERY:
5613                 pi->use_pcie_powersaving_levels = true;
5614                 if (pi->pcie_gen_powersaving.max < pl->pcie_gen)
5615                         pi->pcie_gen_powersaving.max = pl->pcie_gen;
5616                 if (pi->pcie_gen_powersaving.min > pl->pcie_gen)
5617                         pi->pcie_gen_powersaving.min = pl->pcie_gen;
5618                 if (pi->pcie_lane_powersaving.max < pl->pcie_lane)
5619                         pi->pcie_lane_powersaving.max = pl->pcie_lane;
5620                 if (pi->pcie_lane_powersaving.min > pl->pcie_lane)
5621                         pi->pcie_lane_powersaving.min = pl->pcie_lane;
5622                 break;
5623         case ATOM_PPLIB_CLASSIFICATION_UI_PERFORMANCE:
5624                 pi->use_pcie_performance_levels = true;
5625                 if (pi->pcie_gen_performance.max < pl->pcie_gen)
5626                         pi->pcie_gen_performance.max = pl->pcie_gen;
5627                 if (pi->pcie_gen_performance.min > pl->pcie_gen)
5628                         pi->pcie_gen_performance.min = pl->pcie_gen;
5629                 if (pi->pcie_lane_performance.max < pl->pcie_lane)
5630                         pi->pcie_lane_performance.max = pl->pcie_lane;
5631                 if (pi->pcie_lane_performance.min > pl->pcie_lane)
5632                         pi->pcie_lane_performance.min = pl->pcie_lane;
5633                 break;
5634         default:
5635                 break;
5636         }
5637 }
5638
5639 static int ci_parse_power_table(struct amdgpu_device *adev)
5640 {
5641         struct amdgpu_mode_info *mode_info = &adev->mode_info;
5642         struct _ATOM_PPLIB_NONCLOCK_INFO *non_clock_info;
5643         union pplib_power_state *power_state;
5644         int i, j, k, non_clock_array_index, clock_array_index;
5645         union pplib_clock_info *clock_info;
5646         struct _StateArray *state_array;
5647         struct _ClockInfoArray *clock_info_array;
5648         struct _NonClockInfoArray *non_clock_info_array;
5649         union power_info *power_info;
5650         int index = GetIndexIntoMasterTable(DATA, PowerPlayInfo);
5651         u16 data_offset;
5652         u8 frev, crev;
5653         u8 *power_state_offset;
5654         struct ci_ps *ps;
5655
5656         if (!amdgpu_atom_parse_data_header(mode_info->atom_context, index, NULL,
5657                                    &frev, &crev, &data_offset))
5658                 return -EINVAL;
5659         power_info = (union power_info *)(mode_info->atom_context->bios + data_offset);
5660
5661         amdgpu_add_thermal_controller(adev);
5662
5663         state_array = (struct _StateArray *)
5664                 (mode_info->atom_context->bios + data_offset +
5665                  le16_to_cpu(power_info->pplib.usStateArrayOffset));
5666         clock_info_array = (struct _ClockInfoArray *)
5667                 (mode_info->atom_context->bios + data_offset +
5668                  le16_to_cpu(power_info->pplib.usClockInfoArrayOffset));
5669         non_clock_info_array = (struct _NonClockInfoArray *)
5670                 (mode_info->atom_context->bios + data_offset +
5671                  le16_to_cpu(power_info->pplib.usNonClockInfoArrayOffset));
5672
5673         adev->pm.dpm.ps = kzalloc(sizeof(struct amdgpu_ps) *
5674                                   state_array->ucNumEntries, GFP_KERNEL);
5675         if (!adev->pm.dpm.ps)
5676                 return -ENOMEM;
5677         power_state_offset = (u8 *)state_array->states;
5678         for (i = 0; i < state_array->ucNumEntries; i++) {
5679                 u8 *idx;
5680                 power_state = (union pplib_power_state *)power_state_offset;
5681                 non_clock_array_index = power_state->v2.nonClockInfoIndex;
5682                 non_clock_info = (struct _ATOM_PPLIB_NONCLOCK_INFO *)
5683                         &non_clock_info_array->nonClockInfo[non_clock_array_index];
5684                 ps = kzalloc(sizeof(struct ci_ps), GFP_KERNEL);
5685                 if (ps == NULL) {
5686                         kfree(adev->pm.dpm.ps);
5687                         return -ENOMEM;
5688                 }
5689                 adev->pm.dpm.ps[i].ps_priv = ps;
5690                 ci_parse_pplib_non_clock_info(adev, &adev->pm.dpm.ps[i],
5691                                               non_clock_info,
5692                                               non_clock_info_array->ucEntrySize);
5693                 k = 0;
5694                 idx = (u8 *)&power_state->v2.clockInfoIndex[0];
5695                 for (j = 0; j < power_state->v2.ucNumDPMLevels; j++) {
5696                         clock_array_index = idx[j];
5697                         if (clock_array_index >= clock_info_array->ucNumEntries)
5698                                 continue;
5699                         if (k >= CISLANDS_MAX_HARDWARE_POWERLEVELS)
5700                                 break;
5701                         clock_info = (union pplib_clock_info *)
5702                                 ((u8 *)&clock_info_array->clockInfo[0] +
5703                                  (clock_array_index * clock_info_array->ucEntrySize));
5704                         ci_parse_pplib_clock_info(adev,
5705                                                   &adev->pm.dpm.ps[i], k,
5706                                                   clock_info);
5707                         k++;
5708                 }
5709                 power_state_offset += 2 + power_state->v2.ucNumDPMLevels;
5710         }
5711         adev->pm.dpm.num_ps = state_array->ucNumEntries;
5712
5713         /* fill in the vce power states */
5714         for (i = 0; i < adev->pm.dpm.num_of_vce_states; i++) {
5715                 u32 sclk, mclk;
5716                 clock_array_index = adev->pm.dpm.vce_states[i].clk_idx;
5717                 clock_info = (union pplib_clock_info *)
5718                         &clock_info_array->clockInfo[clock_array_index * clock_info_array->ucEntrySize];
5719                 sclk = le16_to_cpu(clock_info->ci.usEngineClockLow);
5720                 sclk |= clock_info->ci.ucEngineClockHigh << 16;
5721                 mclk = le16_to_cpu(clock_info->ci.usMemoryClockLow);
5722                 mclk |= clock_info->ci.ucMemoryClockHigh << 16;
5723                 adev->pm.dpm.vce_states[i].sclk = sclk;
5724                 adev->pm.dpm.vce_states[i].mclk = mclk;
5725         }
5726
5727         return 0;
5728 }
5729
5730 static int ci_get_vbios_boot_values(struct amdgpu_device *adev,
5731                                     struct ci_vbios_boot_state *boot_state)
5732 {
5733         struct amdgpu_mode_info *mode_info = &adev->mode_info;
5734         int index = GetIndexIntoMasterTable(DATA, FirmwareInfo);
5735         ATOM_FIRMWARE_INFO_V2_2 *firmware_info;
5736         u8 frev, crev;
5737         u16 data_offset;
5738
5739         if (amdgpu_atom_parse_data_header(mode_info->atom_context, index, NULL,
5740                                    &frev, &crev, &data_offset)) {
5741                 firmware_info =
5742                         (ATOM_FIRMWARE_INFO_V2_2 *)(mode_info->atom_context->bios +
5743                                                     data_offset);
5744                 boot_state->mvdd_bootup_value = le16_to_cpu(firmware_info->usBootUpMVDDCVoltage);
5745                 boot_state->vddc_bootup_value = le16_to_cpu(firmware_info->usBootUpVDDCVoltage);
5746                 boot_state->vddci_bootup_value = le16_to_cpu(firmware_info->usBootUpVDDCIVoltage);
5747                 boot_state->pcie_gen_bootup_value = ci_get_current_pcie_speed(adev);
5748                 boot_state->pcie_lane_bootup_value = ci_get_current_pcie_lane_number(adev);
5749                 boot_state->sclk_bootup_value = le32_to_cpu(firmware_info->ulDefaultEngineClock);
5750                 boot_state->mclk_bootup_value = le32_to_cpu(firmware_info->ulDefaultMemoryClock);
5751
5752                 return 0;
5753         }
5754         return -EINVAL;
5755 }
5756
5757 static void ci_dpm_fini(struct amdgpu_device *adev)
5758 {
5759         int i;
5760
5761         for (i = 0; i < adev->pm.dpm.num_ps; i++) {
5762                 kfree(adev->pm.dpm.ps[i].ps_priv);
5763         }
5764         kfree(adev->pm.dpm.ps);
5765         kfree(adev->pm.dpm.priv);
5766         kfree(adev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries);
5767         amdgpu_free_extended_power_table(adev);
5768 }
5769
5770 /**
5771  * ci_dpm_init_microcode - load ucode images from disk
5772  *
5773  * @adev: amdgpu_device pointer
5774  *
5775  * Use the firmware interface to load the ucode images into
5776  * the driver (not loaded into hw).
5777  * Returns 0 on success, error on failure.
5778  */
5779 static int ci_dpm_init_microcode(struct amdgpu_device *adev)
5780 {
5781         const char *chip_name;
5782         char fw_name[30];
5783         int err;
5784
5785         DRM_DEBUG("\n");
5786
5787         switch (adev->asic_type) {
5788         case CHIP_BONAIRE:
5789                 if ((adev->pdev->revision == 0x80) ||
5790                     (adev->pdev->revision == 0x81) ||
5791                     (adev->pdev->device == 0x665f))
5792                         chip_name = "bonaire_k";
5793                 else
5794                         chip_name = "bonaire";
5795                 break;
5796         case CHIP_HAWAII:
5797                 if (adev->pdev->revision == 0x80)
5798                         chip_name = "hawaii_k";
5799                 else
5800                         chip_name = "hawaii";
5801                 break;
5802         case CHIP_KAVERI:
5803         case CHIP_KABINI:
5804         case CHIP_MULLINS:
5805         default: BUG();
5806         }
5807
5808         snprintf(fw_name, sizeof(fw_name), "radeon/%s_smc.bin", chip_name);
5809         err = request_firmware(&adev->pm.fw, fw_name, adev->dev);
5810         if (err)
5811                 goto out;
5812         err = amdgpu_ucode_validate(adev->pm.fw);
5813
5814 out:
5815         if (err) {
5816                 printk(KERN_ERR
5817                        "cik_smc: Failed to load firmware \"%s\"\n",
5818                        fw_name);
5819                 release_firmware(adev->pm.fw);
5820                 adev->pm.fw = NULL;
5821         }
5822         return err;
5823 }
5824
5825 static int ci_dpm_init(struct amdgpu_device *adev)
5826 {
5827         int index = GetIndexIntoMasterTable(DATA, ASIC_InternalSS_Info);
5828         SMU7_Discrete_DpmTable *dpm_table;
5829         struct amdgpu_gpio_rec gpio;
5830         u16 data_offset, size;
5831         u8 frev, crev;
5832         struct ci_power_info *pi;
5833         int ret;
5834
5835         pi = kzalloc(sizeof(struct ci_power_info), GFP_KERNEL);
5836         if (pi == NULL)
5837                 return -ENOMEM;
5838         adev->pm.dpm.priv = pi;
5839
5840         pi->sys_pcie_mask =
5841                 (adev->pm.pcie_gen_mask & CAIL_PCIE_LINK_SPEED_SUPPORT_MASK) >>
5842                 CAIL_PCIE_LINK_SPEED_SUPPORT_SHIFT;
5843
5844         pi->force_pcie_gen = AMDGPU_PCIE_GEN_INVALID;
5845
5846         pi->pcie_gen_performance.max = AMDGPU_PCIE_GEN1;
5847         pi->pcie_gen_performance.min = AMDGPU_PCIE_GEN3;
5848         pi->pcie_gen_powersaving.max = AMDGPU_PCIE_GEN1;
5849         pi->pcie_gen_powersaving.min = AMDGPU_PCIE_GEN3;
5850
5851         pi->pcie_lane_performance.max = 0;
5852         pi->pcie_lane_performance.min = 16;
5853         pi->pcie_lane_powersaving.max = 0;
5854         pi->pcie_lane_powersaving.min = 16;
5855
5856         ret = ci_get_vbios_boot_values(adev, &pi->vbios_boot_state);
5857         if (ret) {
5858                 ci_dpm_fini(adev);
5859                 return ret;
5860         }
5861
5862         ret = amdgpu_get_platform_caps(adev);
5863         if (ret) {
5864                 ci_dpm_fini(adev);
5865                 return ret;
5866         }
5867
5868         ret = amdgpu_parse_extended_power_table(adev);
5869         if (ret) {
5870                 ci_dpm_fini(adev);
5871                 return ret;
5872         }
5873
5874         ret = ci_parse_power_table(adev);
5875         if (ret) {
5876                 ci_dpm_fini(adev);
5877                 return ret;
5878         }
5879
5880         pi->dll_default_on = false;
5881         pi->sram_end = SMC_RAM_END;
5882
5883         pi->activity_target[0] = CISLAND_TARGETACTIVITY_DFLT;
5884         pi->activity_target[1] = CISLAND_TARGETACTIVITY_DFLT;
5885         pi->activity_target[2] = CISLAND_TARGETACTIVITY_DFLT;
5886         pi->activity_target[3] = CISLAND_TARGETACTIVITY_DFLT;
5887         pi->activity_target[4] = CISLAND_TARGETACTIVITY_DFLT;
5888         pi->activity_target[5] = CISLAND_TARGETACTIVITY_DFLT;
5889         pi->activity_target[6] = CISLAND_TARGETACTIVITY_DFLT;
5890         pi->activity_target[7] = CISLAND_TARGETACTIVITY_DFLT;
5891
5892         pi->mclk_activity_target = CISLAND_MCLK_TARGETACTIVITY_DFLT;
5893
5894         pi->sclk_dpm_key_disabled = 0;
5895         pi->mclk_dpm_key_disabled = 0;
5896         pi->pcie_dpm_key_disabled = 0;
5897         pi->thermal_sclk_dpm_enabled = 0;
5898
5899         if (amdgpu_sclk_deep_sleep_en)
5900                 pi->caps_sclk_ds = true;
5901         else
5902                 pi->caps_sclk_ds = false;
5903
5904         pi->mclk_strobe_mode_threshold = 40000;
5905         pi->mclk_stutter_mode_threshold = 40000;
5906         pi->mclk_edc_enable_threshold = 40000;
5907         pi->mclk_edc_wr_enable_threshold = 40000;
5908
5909         ci_initialize_powertune_defaults(adev);
5910
5911         pi->caps_fps = false;
5912
5913         pi->caps_sclk_throttle_low_notification = false;
5914
5915         pi->caps_uvd_dpm = true;
5916         pi->caps_vce_dpm = true;
5917
5918         ci_get_leakage_voltages(adev);
5919         ci_patch_dependency_tables_with_leakage(adev);
5920         ci_set_private_data_variables_based_on_pptable(adev);
5921
5922         adev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries =
5923                 kzalloc(4 * sizeof(struct amdgpu_clock_voltage_dependency_entry), GFP_KERNEL);
5924         if (!adev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries) {
5925                 ci_dpm_fini(adev);
5926                 return -ENOMEM;
5927         }
5928         adev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.count = 4;
5929         adev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries[0].clk = 0;
5930         adev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries[0].v = 0;
5931         adev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries[1].clk = 36000;
5932         adev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries[1].v = 720;
5933         adev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries[2].clk = 54000;
5934         adev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries[2].v = 810;
5935         adev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries[3].clk = 72000;
5936         adev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries[3].v = 900;
5937
5938         adev->pm.dpm.dyn_state.mclk_sclk_ratio = 4;
5939         adev->pm.dpm.dyn_state.sclk_mclk_delta = 15000;
5940         adev->pm.dpm.dyn_state.vddc_vddci_delta = 200;
5941
5942         adev->pm.dpm.dyn_state.valid_sclk_values.count = 0;
5943         adev->pm.dpm.dyn_state.valid_sclk_values.values = NULL;
5944         adev->pm.dpm.dyn_state.valid_mclk_values.count = 0;
5945         adev->pm.dpm.dyn_state.valid_mclk_values.values = NULL;
5946
5947         if (adev->asic_type == CHIP_HAWAII) {
5948                 pi->thermal_temp_setting.temperature_low = 94500;
5949                 pi->thermal_temp_setting.temperature_high = 95000;
5950                 pi->thermal_temp_setting.temperature_shutdown = 104000;
5951         } else {
5952                 pi->thermal_temp_setting.temperature_low = 99500;
5953                 pi->thermal_temp_setting.temperature_high = 100000;
5954                 pi->thermal_temp_setting.temperature_shutdown = 104000;
5955         }
5956
5957         pi->uvd_enabled = false;
5958
5959         dpm_table = &pi->smc_state_table;
5960
5961         gpio = amdgpu_atombios_lookup_gpio(adev, VDDC_VRHOT_GPIO_PINID);
5962         if (gpio.valid) {
5963                 dpm_table->VRHotGpio = gpio.shift;
5964                 adev->pm.dpm.platform_caps |= ATOM_PP_PLATFORM_CAP_REGULATOR_HOT;
5965         } else {
5966                 dpm_table->VRHotGpio = CISLANDS_UNUSED_GPIO_PIN;
5967                 adev->pm.dpm.platform_caps &= ~ATOM_PP_PLATFORM_CAP_REGULATOR_HOT;
5968         }
5969
5970         gpio = amdgpu_atombios_lookup_gpio(adev, PP_AC_DC_SWITCH_GPIO_PINID);
5971         if (gpio.valid) {
5972                 dpm_table->AcDcGpio = gpio.shift;
5973                 adev->pm.dpm.platform_caps |= ATOM_PP_PLATFORM_CAP_HARDWAREDC;
5974         } else {
5975                 dpm_table->AcDcGpio = CISLANDS_UNUSED_GPIO_PIN;
5976                 adev->pm.dpm.platform_caps &= ~ATOM_PP_PLATFORM_CAP_HARDWAREDC;
5977         }
5978
5979         gpio = amdgpu_atombios_lookup_gpio(adev, VDDC_PCC_GPIO_PINID);
5980         if (gpio.valid) {
5981                 u32 tmp = RREG32_SMC(ixCNB_PWRMGT_CNTL);
5982
5983                 switch (gpio.shift) {
5984                 case 0:
5985                         tmp &= ~CNB_PWRMGT_CNTL__GNB_SLOW_MODE_MASK;
5986                         tmp |= 1 << CNB_PWRMGT_CNTL__GNB_SLOW_MODE__SHIFT;
5987                         break;
5988                 case 1:
5989                         tmp &= ~CNB_PWRMGT_CNTL__GNB_SLOW_MODE_MASK;
5990                         tmp |= 2 << CNB_PWRMGT_CNTL__GNB_SLOW_MODE__SHIFT;
5991                         break;
5992                 case 2:
5993                         tmp |= CNB_PWRMGT_CNTL__GNB_SLOW_MASK;
5994                         break;
5995                 case 3:
5996                         tmp |= CNB_PWRMGT_CNTL__FORCE_NB_PS1_MASK;
5997                         break;
5998                 case 4:
5999                         tmp |= CNB_PWRMGT_CNTL__DPM_ENABLED_MASK;
6000                         break;
6001                 default:
6002                         DRM_ERROR("Invalid PCC GPIO: %u!\n", gpio.shift);
6003                         break;
6004                 }
6005                 WREG32_SMC(ixCNB_PWRMGT_CNTL, tmp);
6006         }
6007
6008         pi->voltage_control = CISLANDS_VOLTAGE_CONTROL_NONE;
6009         pi->vddci_control = CISLANDS_VOLTAGE_CONTROL_NONE;
6010         pi->mvdd_control = CISLANDS_VOLTAGE_CONTROL_NONE;
6011         if (amdgpu_atombios_is_voltage_gpio(adev, VOLTAGE_TYPE_VDDC, VOLTAGE_OBJ_GPIO_LUT))
6012                 pi->voltage_control = CISLANDS_VOLTAGE_CONTROL_BY_GPIO;
6013         else if (amdgpu_atombios_is_voltage_gpio(adev, VOLTAGE_TYPE_VDDC, VOLTAGE_OBJ_SVID2))
6014                 pi->voltage_control = CISLANDS_VOLTAGE_CONTROL_BY_SVID2;
6015
6016         if (adev->pm.dpm.platform_caps & ATOM_PP_PLATFORM_CAP_VDDCI_CONTROL) {
6017                 if (amdgpu_atombios_is_voltage_gpio(adev, VOLTAGE_TYPE_VDDCI, VOLTAGE_OBJ_GPIO_LUT))
6018                         pi->vddci_control = CISLANDS_VOLTAGE_CONTROL_BY_GPIO;
6019                 else if (amdgpu_atombios_is_voltage_gpio(adev, VOLTAGE_TYPE_VDDCI, VOLTAGE_OBJ_SVID2))
6020                         pi->vddci_control = CISLANDS_VOLTAGE_CONTROL_BY_SVID2;
6021                 else
6022                         adev->pm.dpm.platform_caps &= ~ATOM_PP_PLATFORM_CAP_VDDCI_CONTROL;
6023         }
6024
6025         if (adev->pm.dpm.platform_caps & ATOM_PP_PLATFORM_CAP_MVDDCONTROL) {
6026                 if (amdgpu_atombios_is_voltage_gpio(adev, VOLTAGE_TYPE_MVDDC, VOLTAGE_OBJ_GPIO_LUT))
6027                         pi->mvdd_control = CISLANDS_VOLTAGE_CONTROL_BY_GPIO;
6028                 else if (amdgpu_atombios_is_voltage_gpio(adev, VOLTAGE_TYPE_MVDDC, VOLTAGE_OBJ_SVID2))
6029                         pi->mvdd_control = CISLANDS_VOLTAGE_CONTROL_BY_SVID2;
6030                 else
6031                         adev->pm.dpm.platform_caps &= ~ATOM_PP_PLATFORM_CAP_MVDDCONTROL;
6032         }
6033
6034         pi->vddc_phase_shed_control = true;
6035
6036 #if defined(CONFIG_ACPI)
6037         pi->pcie_performance_request =
6038                 amdgpu_acpi_is_pcie_performance_request_supported(adev);
6039 #else
6040         pi->pcie_performance_request = false;
6041 #endif
6042
6043         if (amdgpu_atom_parse_data_header(adev->mode_info.atom_context, index, &size,
6044                                    &frev, &crev, &data_offset)) {
6045                 pi->caps_sclk_ss_support = true;
6046                 pi->caps_mclk_ss_support = true;
6047                 pi->dynamic_ss = true;
6048         } else {
6049                 pi->caps_sclk_ss_support = false;
6050                 pi->caps_mclk_ss_support = false;
6051                 pi->dynamic_ss = true;
6052         }
6053
6054         if (adev->pm.int_thermal_type != THERMAL_TYPE_NONE)
6055                 pi->thermal_protection = true;
6056         else
6057                 pi->thermal_protection = false;
6058
6059         pi->caps_dynamic_ac_timing = true;
6060
6061         pi->uvd_power_gated = true;
6062
6063         /* make sure dc limits are valid */
6064         if ((adev->pm.dpm.dyn_state.max_clock_voltage_on_dc.sclk == 0) ||
6065             (adev->pm.dpm.dyn_state.max_clock_voltage_on_dc.mclk == 0))
6066                 adev->pm.dpm.dyn_state.max_clock_voltage_on_dc =
6067                         adev->pm.dpm.dyn_state.max_clock_voltage_on_ac;
6068
6069         pi->fan_ctrl_is_in_default_mode = true;
6070
6071         return 0;
6072 }
6073
6074 static void
6075 ci_dpm_debugfs_print_current_performance_level(struct amdgpu_device *adev,
6076                                                struct seq_file *m)
6077 {
6078         struct ci_power_info *pi = ci_get_pi(adev);
6079         struct amdgpu_ps *rps = &pi->current_rps;
6080         u32 sclk = ci_get_average_sclk_freq(adev);
6081         u32 mclk = ci_get_average_mclk_freq(adev);
6082         u32 activity_percent = 50;
6083         int ret;
6084
6085         ret = ci_read_smc_soft_register(adev, offsetof(SMU7_SoftRegisters, AverageGraphicsA),
6086                                         &activity_percent);
6087
6088         if (ret == 0) {
6089                 activity_percent += 0x80;
6090                 activity_percent >>= 8;
6091                 activity_percent = activity_percent > 100 ? 100 : activity_percent;
6092         }
6093
6094         seq_printf(m, "uvd %sabled\n", pi->uvd_enabled ? "en" : "dis");
6095         seq_printf(m, "vce %sabled\n", rps->vce_active ? "en" : "dis");
6096         seq_printf(m, "power level avg    sclk: %u mclk: %u\n",
6097                    sclk, mclk);
6098         seq_printf(m, "GPU load: %u %%\n", activity_percent);
6099 }
6100
6101 static void ci_dpm_print_power_state(struct amdgpu_device *adev,
6102                                      struct amdgpu_ps *rps)
6103 {
6104         struct ci_ps *ps = ci_get_ps(rps);
6105         struct ci_pl *pl;
6106         int i;
6107
6108         amdgpu_dpm_print_class_info(rps->class, rps->class2);
6109         amdgpu_dpm_print_cap_info(rps->caps);
6110         printk("\tuvd    vclk: %d dclk: %d\n", rps->vclk, rps->dclk);
6111         for (i = 0; i < ps->performance_level_count; i++) {
6112                 pl = &ps->performance_levels[i];
6113                 printk("\t\tpower level %d    sclk: %u mclk: %u pcie gen: %u pcie lanes: %u\n",
6114                        i, pl->sclk, pl->mclk, pl->pcie_gen + 1, pl->pcie_lane);
6115         }
6116         amdgpu_dpm_print_ps_status(adev, rps);
6117 }
6118
6119 static inline bool ci_are_power_levels_equal(const struct ci_pl *ci_cpl1,
6120                                                 const struct ci_pl *ci_cpl2)
6121 {
6122         return ((ci_cpl1->mclk == ci_cpl2->mclk) &&
6123                   (ci_cpl1->sclk == ci_cpl2->sclk) &&
6124                   (ci_cpl1->pcie_gen == ci_cpl2->pcie_gen) &&
6125                   (ci_cpl1->pcie_lane == ci_cpl2->pcie_lane));
6126 }
6127
6128 static int ci_check_state_equal(struct amdgpu_device *adev,
6129                                 struct amdgpu_ps *cps,
6130                                 struct amdgpu_ps *rps,
6131                                 bool *equal)
6132 {
6133         struct ci_ps *ci_cps;
6134         struct ci_ps *ci_rps;
6135         int i;
6136
6137         if (adev == NULL || cps == NULL || rps == NULL || equal == NULL)
6138                 return -EINVAL;
6139
6140         ci_cps = ci_get_ps(cps);
6141         ci_rps = ci_get_ps(rps);
6142
6143         if (ci_cps == NULL) {
6144                 *equal = false;
6145                 return 0;
6146         }
6147
6148         if (ci_cps->performance_level_count != ci_rps->performance_level_count) {
6149
6150                 *equal = false;
6151                 return 0;
6152         }
6153
6154         for (i = 0; i < ci_cps->performance_level_count; i++) {
6155                 if (!ci_are_power_levels_equal(&(ci_cps->performance_levels[i]),
6156                                         &(ci_rps->performance_levels[i]))) {
6157                         *equal = false;
6158                         return 0;
6159                 }
6160         }
6161
6162         /* If all performance levels are the same try to use the UVD clocks to break the tie.*/
6163         *equal = ((cps->vclk == rps->vclk) && (cps->dclk == rps->dclk));
6164         *equal &= ((cps->evclk == rps->evclk) && (cps->ecclk == rps->ecclk));
6165
6166         return 0;
6167 }
6168
6169 static u32 ci_dpm_get_sclk(struct amdgpu_device *adev, bool low)
6170 {
6171         struct ci_power_info *pi = ci_get_pi(adev);
6172         struct ci_ps *requested_state = ci_get_ps(&pi->requested_rps);
6173
6174         if (low)
6175                 return requested_state->performance_levels[0].sclk;
6176         else
6177                 return requested_state->performance_levels[requested_state->performance_level_count - 1].sclk;
6178 }
6179
6180 static u32 ci_dpm_get_mclk(struct amdgpu_device *adev, bool low)
6181 {
6182         struct ci_power_info *pi = ci_get_pi(adev);
6183         struct ci_ps *requested_state = ci_get_ps(&pi->requested_rps);
6184
6185         if (low)
6186                 return requested_state->performance_levels[0].mclk;
6187         else
6188                 return requested_state->performance_levels[requested_state->performance_level_count - 1].mclk;
6189 }
6190
6191 /* get temperature in millidegrees */
6192 static int ci_dpm_get_temp(struct amdgpu_device *adev)
6193 {
6194         u32 temp;
6195         int actual_temp = 0;
6196
6197         temp = (RREG32_SMC(ixCG_MULT_THERMAL_STATUS) & CG_MULT_THERMAL_STATUS__CTF_TEMP_MASK) >>
6198                 CG_MULT_THERMAL_STATUS__CTF_TEMP__SHIFT;
6199
6200         if (temp & 0x200)
6201                 actual_temp = 255;
6202         else
6203                 actual_temp = temp & 0x1ff;
6204
6205         actual_temp = actual_temp * 1000;
6206
6207         return actual_temp;
6208 }
6209
6210 static int ci_set_temperature_range(struct amdgpu_device *adev)
6211 {
6212         int ret;
6213
6214         ret = ci_thermal_enable_alert(adev, false);
6215         if (ret)
6216                 return ret;
6217         ret = ci_thermal_set_temperature_range(adev, CISLANDS_TEMP_RANGE_MIN,
6218                                                CISLANDS_TEMP_RANGE_MAX);
6219         if (ret)
6220                 return ret;
6221         ret = ci_thermal_enable_alert(adev, true);
6222         if (ret)
6223                 return ret;
6224         return ret;
6225 }
6226
6227 static int ci_dpm_early_init(void *handle)
6228 {
6229         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
6230
6231         ci_dpm_set_dpm_funcs(adev);
6232         ci_dpm_set_irq_funcs(adev);
6233
6234         return 0;
6235 }
6236
6237 static int ci_dpm_late_init(void *handle)
6238 {
6239         int ret;
6240         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
6241
6242         if (!amdgpu_dpm)
6243                 return 0;
6244
6245         /* init the sysfs and debugfs files late */
6246         ret = amdgpu_pm_sysfs_init(adev);
6247         if (ret)
6248                 return ret;
6249
6250         ret = ci_set_temperature_range(adev);
6251         if (ret)
6252                 return ret;
6253
6254         return 0;
6255 }
6256
6257 static int ci_dpm_sw_init(void *handle)
6258 {
6259         int ret;
6260         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
6261
6262         ret = amdgpu_irq_add_id(adev, 230, &adev->pm.dpm.thermal.irq);
6263         if (ret)
6264                 return ret;
6265
6266         ret = amdgpu_irq_add_id(adev, 231, &adev->pm.dpm.thermal.irq);
6267         if (ret)
6268                 return ret;
6269
6270         /* default to balanced state */
6271         adev->pm.dpm.state = POWER_STATE_TYPE_BALANCED;
6272         adev->pm.dpm.user_state = POWER_STATE_TYPE_BALANCED;
6273         adev->pm.dpm.forced_level = AMDGPU_DPM_FORCED_LEVEL_AUTO;
6274         adev->pm.default_sclk = adev->clock.default_sclk;
6275         adev->pm.default_mclk = adev->clock.default_mclk;
6276         adev->pm.current_sclk = adev->clock.default_sclk;
6277         adev->pm.current_mclk = adev->clock.default_mclk;
6278         adev->pm.int_thermal_type = THERMAL_TYPE_NONE;
6279
6280         if (amdgpu_dpm == 0)
6281                 return 0;
6282
6283         ret = ci_dpm_init_microcode(adev);
6284         if (ret)
6285                 return ret;
6286
6287         INIT_WORK(&adev->pm.dpm.thermal.work, amdgpu_dpm_thermal_work_handler);
6288         mutex_lock(&adev->pm.mutex);
6289         ret = ci_dpm_init(adev);
6290         if (ret)
6291                 goto dpm_failed;
6292         adev->pm.dpm.current_ps = adev->pm.dpm.requested_ps = adev->pm.dpm.boot_ps;
6293         if (amdgpu_dpm == 1)
6294                 amdgpu_pm_print_power_states(adev);
6295         mutex_unlock(&adev->pm.mutex);
6296         DRM_INFO("amdgpu: dpm initialized\n");
6297
6298         return 0;
6299
6300 dpm_failed:
6301         ci_dpm_fini(adev);
6302         mutex_unlock(&adev->pm.mutex);
6303         DRM_ERROR("amdgpu: dpm initialization failed\n");
6304         return ret;
6305 }
6306
6307 static int ci_dpm_sw_fini(void *handle)
6308 {
6309         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
6310
6311         flush_work(&adev->pm.dpm.thermal.work);
6312
6313         mutex_lock(&adev->pm.mutex);
6314         amdgpu_pm_sysfs_fini(adev);
6315         ci_dpm_fini(adev);
6316         mutex_unlock(&adev->pm.mutex);
6317
6318         release_firmware(adev->pm.fw);
6319         adev->pm.fw = NULL;
6320
6321         return 0;
6322 }
6323
6324 static int ci_dpm_hw_init(void *handle)
6325 {
6326         int ret;
6327
6328         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
6329
6330         if (!amdgpu_dpm)
6331                 return 0;
6332
6333         mutex_lock(&adev->pm.mutex);
6334         ci_dpm_setup_asic(adev);
6335         ret = ci_dpm_enable(adev);
6336         if (ret)
6337                 adev->pm.dpm_enabled = false;
6338         else
6339                 adev->pm.dpm_enabled = true;
6340         mutex_unlock(&adev->pm.mutex);
6341
6342         return ret;
6343 }
6344
6345 static int ci_dpm_hw_fini(void *handle)
6346 {
6347         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
6348
6349         if (adev->pm.dpm_enabled) {
6350                 mutex_lock(&adev->pm.mutex);
6351                 ci_dpm_disable(adev);
6352                 mutex_unlock(&adev->pm.mutex);
6353         }
6354
6355         return 0;
6356 }
6357
6358 static int ci_dpm_suspend(void *handle)
6359 {
6360         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
6361
6362         if (adev->pm.dpm_enabled) {
6363                 mutex_lock(&adev->pm.mutex);
6364                 amdgpu_irq_put(adev, &adev->pm.dpm.thermal.irq,
6365                                AMDGPU_THERMAL_IRQ_LOW_TO_HIGH);
6366                 amdgpu_irq_put(adev, &adev->pm.dpm.thermal.irq,
6367                                AMDGPU_THERMAL_IRQ_HIGH_TO_LOW);
6368                 adev->pm.dpm.last_user_state = adev->pm.dpm.user_state;
6369                 adev->pm.dpm.last_state = adev->pm.dpm.state;
6370                 adev->pm.dpm.user_state = POWER_STATE_TYPE_INTERNAL_BOOT;
6371                 adev->pm.dpm.state = POWER_STATE_TYPE_INTERNAL_BOOT;
6372                 mutex_unlock(&adev->pm.mutex);
6373                 amdgpu_pm_compute_clocks(adev);
6374
6375         }
6376
6377         return 0;
6378 }
6379
6380 static int ci_dpm_resume(void *handle)
6381 {
6382         int ret;
6383         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
6384
6385         if (adev->pm.dpm_enabled) {
6386                 /* asic init will reset to the boot state */
6387                 mutex_lock(&adev->pm.mutex);
6388                 ci_dpm_setup_asic(adev);
6389                 ret = ci_dpm_enable(adev);
6390                 if (ret)
6391                         adev->pm.dpm_enabled = false;
6392                 else
6393                         adev->pm.dpm_enabled = true;
6394                 adev->pm.dpm.user_state = adev->pm.dpm.last_user_state;
6395                 adev->pm.dpm.state = adev->pm.dpm.last_state;
6396                 mutex_unlock(&adev->pm.mutex);
6397                 if (adev->pm.dpm_enabled)
6398                         amdgpu_pm_compute_clocks(adev);
6399         }
6400         return 0;
6401 }
6402
6403 static bool ci_dpm_is_idle(void *handle)
6404 {
6405         /* XXX */
6406         return true;
6407 }
6408
6409 static int ci_dpm_wait_for_idle(void *handle)
6410 {
6411         /* XXX */
6412         return 0;
6413 }
6414
6415 static int ci_dpm_soft_reset(void *handle)
6416 {
6417         return 0;
6418 }
6419
6420 static int ci_dpm_set_interrupt_state(struct amdgpu_device *adev,
6421                                       struct amdgpu_irq_src *source,
6422                                       unsigned type,
6423                                       enum amdgpu_interrupt_state state)
6424 {
6425         u32 cg_thermal_int;
6426
6427         switch (type) {
6428         case AMDGPU_THERMAL_IRQ_LOW_TO_HIGH:
6429                 switch (state) {
6430                 case AMDGPU_IRQ_STATE_DISABLE:
6431                         cg_thermal_int = RREG32_SMC(ixCG_THERMAL_INT);
6432                         cg_thermal_int |= CG_THERMAL_INT_CTRL__THERM_INTH_MASK_MASK;
6433                         WREG32_SMC(ixCG_THERMAL_INT, cg_thermal_int);
6434                         break;
6435                 case AMDGPU_IRQ_STATE_ENABLE:
6436                         cg_thermal_int = RREG32_SMC(ixCG_THERMAL_INT);
6437                         cg_thermal_int &= ~CG_THERMAL_INT_CTRL__THERM_INTH_MASK_MASK;
6438                         WREG32_SMC(ixCG_THERMAL_INT, cg_thermal_int);
6439                         break;
6440                 default:
6441                         break;
6442                 }
6443                 break;
6444
6445         case AMDGPU_THERMAL_IRQ_HIGH_TO_LOW:
6446                 switch (state) {
6447                 case AMDGPU_IRQ_STATE_DISABLE:
6448                         cg_thermal_int = RREG32_SMC(ixCG_THERMAL_INT);
6449                         cg_thermal_int |= CG_THERMAL_INT_CTRL__THERM_INTL_MASK_MASK;
6450                         WREG32_SMC(ixCG_THERMAL_INT, cg_thermal_int);
6451                         break;
6452                 case AMDGPU_IRQ_STATE_ENABLE:
6453                         cg_thermal_int = RREG32_SMC(ixCG_THERMAL_INT);
6454                         cg_thermal_int &= ~CG_THERMAL_INT_CTRL__THERM_INTL_MASK_MASK;
6455                         WREG32_SMC(ixCG_THERMAL_INT, cg_thermal_int);
6456                         break;
6457                 default:
6458                         break;
6459                 }
6460                 break;
6461
6462         default:
6463                 break;
6464         }
6465         return 0;
6466 }
6467
6468 static int ci_dpm_process_interrupt(struct amdgpu_device *adev,
6469                                     struct amdgpu_irq_src *source,
6470                                     struct amdgpu_iv_entry *entry)
6471 {
6472         bool queue_thermal = false;
6473
6474         if (entry == NULL)
6475                 return -EINVAL;
6476
6477         switch (entry->src_id) {
6478         case 230: /* thermal low to high */
6479                 DRM_DEBUG("IH: thermal low to high\n");
6480                 adev->pm.dpm.thermal.high_to_low = false;
6481                 queue_thermal = true;
6482                 break;
6483         case 231: /* thermal high to low */
6484                 DRM_DEBUG("IH: thermal high to low\n");
6485                 adev->pm.dpm.thermal.high_to_low = true;
6486                 queue_thermal = true;
6487                 break;
6488         default:
6489                 break;
6490         }
6491
6492         if (queue_thermal)
6493                 schedule_work(&adev->pm.dpm.thermal.work);
6494
6495         return 0;
6496 }
6497
6498 static int ci_dpm_set_clockgating_state(void *handle,
6499                                           enum amd_clockgating_state state)
6500 {
6501         return 0;
6502 }
6503
6504 static int ci_dpm_set_powergating_state(void *handle,
6505                                           enum amd_powergating_state state)
6506 {
6507         return 0;
6508 }
6509
6510 static int ci_dpm_print_clock_levels(struct amdgpu_device *adev,
6511                 enum pp_clock_type type, char *buf)
6512 {
6513         struct ci_power_info *pi = ci_get_pi(adev);
6514         struct ci_single_dpm_table *sclk_table = &pi->dpm_table.sclk_table;
6515         struct ci_single_dpm_table *mclk_table = &pi->dpm_table.mclk_table;
6516         struct ci_single_dpm_table *pcie_table = &pi->dpm_table.pcie_speed_table;
6517
6518         int i, now, size = 0;
6519         uint32_t clock, pcie_speed;
6520
6521         switch (type) {
6522         case PP_SCLK:
6523                 amdgpu_ci_send_msg_to_smc(adev, PPSMC_MSG_API_GetSclkFrequency);
6524                 clock = RREG32(mmSMC_MSG_ARG_0);
6525
6526                 for (i = 0; i < sclk_table->count; i++) {
6527                         if (clock > sclk_table->dpm_levels[i].value)
6528                                 continue;
6529                         break;
6530                 }
6531                 now = i;
6532
6533                 for (i = 0; i < sclk_table->count; i++)
6534                         size += sprintf(buf + size, "%d: %uMhz %s\n",
6535                                         i, sclk_table->dpm_levels[i].value / 100,
6536                                         (i == now) ? "*" : "");
6537                 break;
6538         case PP_MCLK:
6539                 amdgpu_ci_send_msg_to_smc(adev, PPSMC_MSG_API_GetMclkFrequency);
6540                 clock = RREG32(mmSMC_MSG_ARG_0);
6541
6542                 for (i = 0; i < mclk_table->count; i++) {
6543                         if (clock > mclk_table->dpm_levels[i].value)
6544                                 continue;
6545                         break;
6546                 }
6547                 now = i;
6548
6549                 for (i = 0; i < mclk_table->count; i++)
6550                         size += sprintf(buf + size, "%d: %uMhz %s\n",
6551                                         i, mclk_table->dpm_levels[i].value / 100,
6552                                         (i == now) ? "*" : "");
6553                 break;
6554         case PP_PCIE:
6555                 pcie_speed = ci_get_current_pcie_speed(adev);
6556                 for (i = 0; i < pcie_table->count; i++) {
6557                         if (pcie_speed != pcie_table->dpm_levels[i].value)
6558                                 continue;
6559                         break;
6560                 }
6561                 now = i;
6562
6563                 for (i = 0; i < pcie_table->count; i++)
6564                         size += sprintf(buf + size, "%d: %s %s\n", i,
6565                                         (pcie_table->dpm_levels[i].value == 0) ? "2.5GB, x1" :
6566                                         (pcie_table->dpm_levels[i].value == 1) ? "5.0GB, x16" :
6567                                         (pcie_table->dpm_levels[i].value == 2) ? "8.0GB, x16" : "",
6568                                         (i == now) ? "*" : "");
6569                 break;
6570         default:
6571                 break;
6572         }
6573
6574         return size;
6575 }
6576
6577 static int ci_dpm_force_clock_level(struct amdgpu_device *adev,
6578                 enum pp_clock_type type, uint32_t mask)
6579 {
6580         struct ci_power_info *pi = ci_get_pi(adev);
6581
6582         if (adev->pm.dpm.forced_level
6583                         != AMDGPU_DPM_FORCED_LEVEL_MANUAL)
6584                 return -EINVAL;
6585
6586         switch (type) {
6587         case PP_SCLK:
6588                 if (!pi->sclk_dpm_key_disabled)
6589                         amdgpu_ci_send_msg_to_smc_with_parameter(adev,
6590                                         PPSMC_MSG_SCLKDPM_SetEnabledMask,
6591                                         pi->dpm_level_enable_mask.sclk_dpm_enable_mask & mask);
6592                 break;
6593
6594         case PP_MCLK:
6595                 if (!pi->mclk_dpm_key_disabled)
6596                         amdgpu_ci_send_msg_to_smc_with_parameter(adev,
6597                                         PPSMC_MSG_MCLKDPM_SetEnabledMask,
6598                                         pi->dpm_level_enable_mask.mclk_dpm_enable_mask & mask);
6599                 break;
6600
6601         case PP_PCIE:
6602         {
6603                 uint32_t tmp = mask & pi->dpm_level_enable_mask.pcie_dpm_enable_mask;
6604                 uint32_t level = 0;
6605
6606                 while (tmp >>= 1)
6607                         level++;
6608
6609                 if (!pi->pcie_dpm_key_disabled)
6610                         amdgpu_ci_send_msg_to_smc_with_parameter(adev,
6611                                         PPSMC_MSG_PCIeDPM_ForceLevel,
6612                                         level);
6613                 break;
6614         }
6615         default:
6616                 break;
6617         }
6618
6619         return 0;
6620 }
6621
6622 static int ci_dpm_get_sclk_od(struct amdgpu_device *adev)
6623 {
6624         struct ci_power_info *pi = ci_get_pi(adev);
6625         struct ci_single_dpm_table *sclk_table = &(pi->dpm_table.sclk_table);
6626         struct ci_single_dpm_table *golden_sclk_table =
6627                         &(pi->golden_dpm_table.sclk_table);
6628         int value;
6629
6630         value = (sclk_table->dpm_levels[sclk_table->count - 1].value -
6631                         golden_sclk_table->dpm_levels[golden_sclk_table->count - 1].value) *
6632                         100 /
6633                         golden_sclk_table->dpm_levels[golden_sclk_table->count - 1].value;
6634
6635         return value;
6636 }
6637
6638 static int ci_dpm_set_sclk_od(struct amdgpu_device *adev, uint32_t value)
6639 {
6640         struct ci_power_info *pi = ci_get_pi(adev);
6641         struct ci_ps *ps = ci_get_ps(adev->pm.dpm.requested_ps);
6642         struct ci_single_dpm_table *golden_sclk_table =
6643                         &(pi->golden_dpm_table.sclk_table);
6644
6645         if (value > 20)
6646                 value = 20;
6647
6648         ps->performance_levels[ps->performance_level_count - 1].sclk =
6649                         golden_sclk_table->dpm_levels[golden_sclk_table->count - 1].value *
6650                         value / 100 +
6651                         golden_sclk_table->dpm_levels[golden_sclk_table->count - 1].value;
6652
6653         return 0;
6654 }
6655
6656 static int ci_dpm_get_mclk_od(struct amdgpu_device *adev)
6657 {
6658         struct ci_power_info *pi = ci_get_pi(adev);
6659         struct ci_single_dpm_table *mclk_table = &(pi->dpm_table.mclk_table);
6660         struct ci_single_dpm_table *golden_mclk_table =
6661                         &(pi->golden_dpm_table.mclk_table);
6662         int value;
6663
6664         value = (mclk_table->dpm_levels[mclk_table->count - 1].value -
6665                         golden_mclk_table->dpm_levels[golden_mclk_table->count - 1].value) *
6666                         100 /
6667                         golden_mclk_table->dpm_levels[golden_mclk_table->count - 1].value;
6668
6669         return value;
6670 }
6671
6672 static int ci_dpm_set_mclk_od(struct amdgpu_device *adev, uint32_t value)
6673 {
6674         struct ci_power_info *pi = ci_get_pi(adev);
6675         struct ci_ps *ps = ci_get_ps(adev->pm.dpm.requested_ps);
6676         struct ci_single_dpm_table *golden_mclk_table =
6677                         &(pi->golden_dpm_table.mclk_table);
6678
6679         if (value > 20)
6680                 value = 20;
6681
6682         ps->performance_levels[ps->performance_level_count - 1].mclk =
6683                         golden_mclk_table->dpm_levels[golden_mclk_table->count - 1].value *
6684                         value / 100 +
6685                         golden_mclk_table->dpm_levels[golden_mclk_table->count - 1].value;
6686
6687         return 0;
6688 }
6689
6690 const struct amd_ip_funcs ci_dpm_ip_funcs = {
6691         .name = "ci_dpm",
6692         .early_init = ci_dpm_early_init,
6693         .late_init = ci_dpm_late_init,
6694         .sw_init = ci_dpm_sw_init,
6695         .sw_fini = ci_dpm_sw_fini,
6696         .hw_init = ci_dpm_hw_init,
6697         .hw_fini = ci_dpm_hw_fini,
6698         .suspend = ci_dpm_suspend,
6699         .resume = ci_dpm_resume,
6700         .is_idle = ci_dpm_is_idle,
6701         .wait_for_idle = ci_dpm_wait_for_idle,
6702         .soft_reset = ci_dpm_soft_reset,
6703         .set_clockgating_state = ci_dpm_set_clockgating_state,
6704         .set_powergating_state = ci_dpm_set_powergating_state,
6705 };
6706
6707 static const struct amdgpu_dpm_funcs ci_dpm_funcs = {
6708         .get_temperature = &ci_dpm_get_temp,
6709         .pre_set_power_state = &ci_dpm_pre_set_power_state,
6710         .set_power_state = &ci_dpm_set_power_state,
6711         .post_set_power_state = &ci_dpm_post_set_power_state,
6712         .display_configuration_changed = &ci_dpm_display_configuration_changed,
6713         .get_sclk = &ci_dpm_get_sclk,
6714         .get_mclk = &ci_dpm_get_mclk,
6715         .print_power_state = &ci_dpm_print_power_state,
6716         .debugfs_print_current_performance_level = &ci_dpm_debugfs_print_current_performance_level,
6717         .force_performance_level = &ci_dpm_force_performance_level,
6718         .vblank_too_short = &ci_dpm_vblank_too_short,
6719         .powergate_uvd = &ci_dpm_powergate_uvd,
6720         .set_fan_control_mode = &ci_dpm_set_fan_control_mode,
6721         .get_fan_control_mode = &ci_dpm_get_fan_control_mode,
6722         .set_fan_speed_percent = &ci_dpm_set_fan_speed_percent,
6723         .get_fan_speed_percent = &ci_dpm_get_fan_speed_percent,
6724         .print_clock_levels = ci_dpm_print_clock_levels,
6725         .force_clock_level = ci_dpm_force_clock_level,
6726         .get_sclk_od = ci_dpm_get_sclk_od,
6727         .set_sclk_od = ci_dpm_set_sclk_od,
6728         .get_mclk_od = ci_dpm_get_mclk_od,
6729         .set_mclk_od = ci_dpm_set_mclk_od,
6730         .check_state_equal = ci_check_state_equal,
6731         .get_vce_clock_state = amdgpu_get_vce_clock_state,
6732 };
6733
6734 static void ci_dpm_set_dpm_funcs(struct amdgpu_device *adev)
6735 {
6736         if (adev->pm.funcs == NULL)
6737                 adev->pm.funcs = &ci_dpm_funcs;
6738 }
6739
6740 static const struct amdgpu_irq_src_funcs ci_dpm_irq_funcs = {
6741         .set = ci_dpm_set_interrupt_state,
6742         .process = ci_dpm_process_interrupt,
6743 };
6744
6745 static void ci_dpm_set_irq_funcs(struct amdgpu_device *adev)
6746 {
6747         adev->pm.dpm.thermal.irq.num_types = AMDGPU_THERMAL_IRQ_LAST;
6748         adev->pm.dpm.thermal.irq.funcs = &ci_dpm_irq_funcs;
6749 }
6750
6751 const struct amdgpu_ip_block_version ci_dpm_ip_block =
6752 {
6753         .type = AMD_IP_BLOCK_TYPE_SMC,
6754         .major = 7,
6755         .minor = 0,
6756         .rev = 0,
6757         .funcs = &ci_dpm_ip_funcs,
6758 };
This page took 0.446795 seconds and 4 git commands to generate.