]> Git Repo - linux.git/blob - drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c
Merge tag 'drm-msm-fixes-2023-06-08' of https://gitlab.freedesktop.org/drm/msm into...
[linux.git] / drivers / gpu / drm / amd / pm / swsmu / amdgpu_smu.c
1 /*
2  * Copyright 2019 Advanced Micro Devices, Inc.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20  * OTHER DEALINGS IN THE SOFTWARE.
21  */
22
23 #define SWSMU_CODE_LAYER_L1
24
25 #include <linux/firmware.h>
26 #include <linux/pci.h>
27
28 #include "amdgpu.h"
29 #include "amdgpu_smu.h"
30 #include "smu_internal.h"
31 #include "atom.h"
32 #include "arcturus_ppt.h"
33 #include "navi10_ppt.h"
34 #include "sienna_cichlid_ppt.h"
35 #include "renoir_ppt.h"
36 #include "vangogh_ppt.h"
37 #include "aldebaran_ppt.h"
38 #include "yellow_carp_ppt.h"
39 #include "cyan_skillfish_ppt.h"
40 #include "smu_v13_0_0_ppt.h"
41 #include "smu_v13_0_4_ppt.h"
42 #include "smu_v13_0_5_ppt.h"
43 #include "smu_v13_0_6_ppt.h"
44 #include "smu_v13_0_7_ppt.h"
45 #include "amd_pcie.h"
46
47 /*
48  * DO NOT use these for err/warn/info/debug messages.
49  * Use dev_err, dev_warn, dev_info and dev_dbg instead.
50  * They are more MGPU friendly.
51  */
52 #undef pr_err
53 #undef pr_warn
54 #undef pr_info
55 #undef pr_debug
56
57 static const struct amd_pm_funcs swsmu_pm_funcs;
58 static int smu_force_smuclk_levels(struct smu_context *smu,
59                                    enum smu_clk_type clk_type,
60                                    uint32_t mask);
61 static int smu_handle_task(struct smu_context *smu,
62                            enum amd_dpm_forced_level level,
63                            enum amd_pp_task task_id);
64 static int smu_reset(struct smu_context *smu);
65 static int smu_set_fan_speed_pwm(void *handle, u32 speed);
66 static int smu_set_fan_control_mode(void *handle, u32 value);
67 static int smu_set_power_limit(void *handle, uint32_t limit);
68 static int smu_set_fan_speed_rpm(void *handle, uint32_t speed);
69 static int smu_set_gfx_cgpg(struct smu_context *smu, bool enabled);
70 static int smu_set_mp1_state(void *handle, enum pp_mp1_state mp1_state);
71
72 static int smu_sys_get_pp_feature_mask(void *handle,
73                                        char *buf)
74 {
75         struct smu_context *smu = handle;
76
77         if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
78                 return -EOPNOTSUPP;
79
80         return smu_get_pp_feature_mask(smu, buf);
81 }
82
83 static int smu_sys_set_pp_feature_mask(void *handle,
84                                        uint64_t new_mask)
85 {
86         struct smu_context *smu = handle;
87
88         if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
89                 return -EOPNOTSUPP;
90
91         return smu_set_pp_feature_mask(smu, new_mask);
92 }
93
94 int smu_set_residency_gfxoff(struct smu_context *smu, bool value)
95 {
96         if (!smu->ppt_funcs->set_gfx_off_residency)
97                 return -EINVAL;
98
99         return smu_set_gfx_off_residency(smu, value);
100 }
101
102 int smu_get_residency_gfxoff(struct smu_context *smu, u32 *value)
103 {
104         if (!smu->ppt_funcs->get_gfx_off_residency)
105                 return -EINVAL;
106
107         return smu_get_gfx_off_residency(smu, value);
108 }
109
110 int smu_get_entrycount_gfxoff(struct smu_context *smu, u64 *value)
111 {
112         if (!smu->ppt_funcs->get_gfx_off_entrycount)
113                 return -EINVAL;
114
115         return smu_get_gfx_off_entrycount(smu, value);
116 }
117
118 int smu_get_status_gfxoff(struct smu_context *smu, uint32_t *value)
119 {
120         if (!smu->ppt_funcs->get_gfx_off_status)
121                 return -EINVAL;
122
123         *value = smu_get_gfx_off_status(smu);
124
125         return 0;
126 }
127
128 int smu_set_soft_freq_range(struct smu_context *smu,
129                             enum smu_clk_type clk_type,
130                             uint32_t min,
131                             uint32_t max)
132 {
133         int ret = 0;
134
135         if (smu->ppt_funcs->set_soft_freq_limited_range)
136                 ret = smu->ppt_funcs->set_soft_freq_limited_range(smu,
137                                                                   clk_type,
138                                                                   min,
139                                                                   max);
140
141         return ret;
142 }
143
144 int smu_get_dpm_freq_range(struct smu_context *smu,
145                            enum smu_clk_type clk_type,
146                            uint32_t *min,
147                            uint32_t *max)
148 {
149         int ret = -ENOTSUPP;
150
151         if (!min && !max)
152                 return -EINVAL;
153
154         if (smu->ppt_funcs->get_dpm_ultimate_freq)
155                 ret = smu->ppt_funcs->get_dpm_ultimate_freq(smu,
156                                                             clk_type,
157                                                             min,
158                                                             max);
159
160         return ret;
161 }
162
163 int smu_set_gfx_power_up_by_imu(struct smu_context *smu)
164 {
165         int ret = 0;
166         struct amdgpu_device *adev = smu->adev;
167
168         if (smu->ppt_funcs->set_gfx_power_up_by_imu) {
169                 ret = smu->ppt_funcs->set_gfx_power_up_by_imu(smu);
170                 if (ret)
171                         dev_err(adev->dev, "Failed to enable gfx imu!\n");
172         }
173         return ret;
174 }
175
176 static u32 smu_get_mclk(void *handle, bool low)
177 {
178         struct smu_context *smu = handle;
179         uint32_t clk_freq;
180         int ret = 0;
181
182         ret = smu_get_dpm_freq_range(smu, SMU_UCLK,
183                                      low ? &clk_freq : NULL,
184                                      !low ? &clk_freq : NULL);
185         if (ret)
186                 return 0;
187         return clk_freq * 100;
188 }
189
190 static u32 smu_get_sclk(void *handle, bool low)
191 {
192         struct smu_context *smu = handle;
193         uint32_t clk_freq;
194         int ret = 0;
195
196         ret = smu_get_dpm_freq_range(smu, SMU_GFXCLK,
197                                      low ? &clk_freq : NULL,
198                                      !low ? &clk_freq : NULL);
199         if (ret)
200                 return 0;
201         return clk_freq * 100;
202 }
203
204 static int smu_set_gfx_imu_enable(struct smu_context *smu)
205 {
206         struct amdgpu_device *adev = smu->adev;
207
208         if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP)
209                 return 0;
210
211         if (amdgpu_in_reset(smu->adev) || adev->in_s0ix)
212                 return 0;
213
214         return smu_set_gfx_power_up_by_imu(smu);
215 }
216
217 static int smu_dpm_set_vcn_enable(struct smu_context *smu,
218                                   bool enable)
219 {
220         struct smu_power_context *smu_power = &smu->smu_power;
221         struct smu_power_gate *power_gate = &smu_power->power_gate;
222         int ret = 0;
223
224         if (!smu->ppt_funcs->dpm_set_vcn_enable)
225                 return 0;
226
227         if (atomic_read(&power_gate->vcn_gated) ^ enable)
228                 return 0;
229
230         ret = smu->ppt_funcs->dpm_set_vcn_enable(smu, enable);
231         if (!ret)
232                 atomic_set(&power_gate->vcn_gated, !enable);
233
234         return ret;
235 }
236
237 static int smu_dpm_set_jpeg_enable(struct smu_context *smu,
238                                    bool enable)
239 {
240         struct smu_power_context *smu_power = &smu->smu_power;
241         struct smu_power_gate *power_gate = &smu_power->power_gate;
242         int ret = 0;
243
244         if (!smu->ppt_funcs->dpm_set_jpeg_enable)
245                 return 0;
246
247         if (atomic_read(&power_gate->jpeg_gated) ^ enable)
248                 return 0;
249
250         ret = smu->ppt_funcs->dpm_set_jpeg_enable(smu, enable);
251         if (!ret)
252                 atomic_set(&power_gate->jpeg_gated, !enable);
253
254         return ret;
255 }
256
257 /**
258  * smu_dpm_set_power_gate - power gate/ungate the specific IP block
259  *
260  * @handle:        smu_context pointer
261  * @block_type: the IP block to power gate/ungate
262  * @gate:       to power gate if true, ungate otherwise
263  *
264  * This API uses no smu->mutex lock protection due to:
265  * 1. It is either called by other IP block(gfx/sdma/vcn/uvd/vce).
266  *    This is guarded to be race condition free by the caller.
267  * 2. Or get called on user setting request of power_dpm_force_performance_level.
268  *    Under this case, the smu->mutex lock protection is already enforced on
269  *    the parent API smu_force_performance_level of the call path.
270  */
271 static int smu_dpm_set_power_gate(void *handle,
272                                   uint32_t block_type,
273                                   bool gate)
274 {
275         struct smu_context *smu = handle;
276         int ret = 0;
277
278         if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled) {
279                 dev_WARN(smu->adev->dev,
280                          "SMU uninitialized but power %s requested for %u!\n",
281                          gate ? "gate" : "ungate", block_type);
282                 return -EOPNOTSUPP;
283         }
284
285         switch (block_type) {
286         /*
287          * Some legacy code of amdgpu_vcn.c and vcn_v2*.c still uses
288          * AMD_IP_BLOCK_TYPE_UVD for VCN. So, here both of them are kept.
289          */
290         case AMD_IP_BLOCK_TYPE_UVD:
291         case AMD_IP_BLOCK_TYPE_VCN:
292                 ret = smu_dpm_set_vcn_enable(smu, !gate);
293                 if (ret)
294                         dev_err(smu->adev->dev, "Failed to power %s VCN!\n",
295                                 gate ? "gate" : "ungate");
296                 break;
297         case AMD_IP_BLOCK_TYPE_GFX:
298                 ret = smu_gfx_off_control(smu, gate);
299                 if (ret)
300                         dev_err(smu->adev->dev, "Failed to %s gfxoff!\n",
301                                 gate ? "enable" : "disable");
302                 break;
303         case AMD_IP_BLOCK_TYPE_SDMA:
304                 ret = smu_powergate_sdma(smu, gate);
305                 if (ret)
306                         dev_err(smu->adev->dev, "Failed to power %s SDMA!\n",
307                                 gate ? "gate" : "ungate");
308                 break;
309         case AMD_IP_BLOCK_TYPE_JPEG:
310                 ret = smu_dpm_set_jpeg_enable(smu, !gate);
311                 if (ret)
312                         dev_err(smu->adev->dev, "Failed to power %s JPEG!\n",
313                                 gate ? "gate" : "ungate");
314                 break;
315         default:
316                 dev_err(smu->adev->dev, "Unsupported block type!\n");
317                 return -EINVAL;
318         }
319
320         return ret;
321 }
322
323 /**
324  * smu_set_user_clk_dependencies - set user profile clock dependencies
325  *
326  * @smu:        smu_context pointer
327  * @clk:        enum smu_clk_type type
328  *
329  * Enable/Disable the clock dependency for the @clk type.
330  */
331 static void smu_set_user_clk_dependencies(struct smu_context *smu, enum smu_clk_type clk)
332 {
333         if (smu->adev->in_suspend)
334                 return;
335
336         if (clk == SMU_MCLK) {
337                 smu->user_dpm_profile.clk_dependency = 0;
338                 smu->user_dpm_profile.clk_dependency = BIT(SMU_FCLK) | BIT(SMU_SOCCLK);
339         } else if (clk == SMU_FCLK) {
340                 /* MCLK takes precedence over FCLK */
341                 if (smu->user_dpm_profile.clk_dependency == (BIT(SMU_FCLK) | BIT(SMU_SOCCLK)))
342                         return;
343
344                 smu->user_dpm_profile.clk_dependency = 0;
345                 smu->user_dpm_profile.clk_dependency = BIT(SMU_MCLK) | BIT(SMU_SOCCLK);
346         } else if (clk == SMU_SOCCLK) {
347                 /* MCLK takes precedence over SOCCLK */
348                 if (smu->user_dpm_profile.clk_dependency == (BIT(SMU_FCLK) | BIT(SMU_SOCCLK)))
349                         return;
350
351                 smu->user_dpm_profile.clk_dependency = 0;
352                 smu->user_dpm_profile.clk_dependency = BIT(SMU_MCLK) | BIT(SMU_FCLK);
353         } else
354                 /* Add clk dependencies here, if any */
355                 return;
356 }
357
358 /**
359  * smu_restore_dpm_user_profile - reinstate user dpm profile
360  *
361  * @smu:        smu_context pointer
362  *
363  * Restore the saved user power configurations include power limit,
364  * clock frequencies, fan control mode and fan speed.
365  */
366 static void smu_restore_dpm_user_profile(struct smu_context *smu)
367 {
368         struct smu_dpm_context *smu_dpm_ctx = &(smu->smu_dpm);
369         int ret = 0;
370
371         if (!smu->adev->in_suspend)
372                 return;
373
374         if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
375                 return;
376
377         /* Enable restore flag */
378         smu->user_dpm_profile.flags |= SMU_DPM_USER_PROFILE_RESTORE;
379
380         /* set the user dpm power limit */
381         if (smu->user_dpm_profile.power_limit) {
382                 ret = smu_set_power_limit(smu, smu->user_dpm_profile.power_limit);
383                 if (ret)
384                         dev_err(smu->adev->dev, "Failed to set power limit value\n");
385         }
386
387         /* set the user dpm clock configurations */
388         if (smu_dpm_ctx->dpm_level == AMD_DPM_FORCED_LEVEL_MANUAL) {
389                 enum smu_clk_type clk_type;
390
391                 for (clk_type = 0; clk_type < SMU_CLK_COUNT; clk_type++) {
392                         /*
393                          * Iterate over smu clk type and force the saved user clk
394                          * configs, skip if clock dependency is enabled
395                          */
396                         if (!(smu->user_dpm_profile.clk_dependency & BIT(clk_type)) &&
397                                         smu->user_dpm_profile.clk_mask[clk_type]) {
398                                 ret = smu_force_smuclk_levels(smu, clk_type,
399                                                 smu->user_dpm_profile.clk_mask[clk_type]);
400                                 if (ret)
401                                         dev_err(smu->adev->dev,
402                                                 "Failed to set clock type = %d\n", clk_type);
403                         }
404                 }
405         }
406
407         /* set the user dpm fan configurations */
408         if (smu->user_dpm_profile.fan_mode == AMD_FAN_CTRL_MANUAL ||
409             smu->user_dpm_profile.fan_mode == AMD_FAN_CTRL_NONE) {
410                 ret = smu_set_fan_control_mode(smu, smu->user_dpm_profile.fan_mode);
411                 if (ret != -EOPNOTSUPP) {
412                         smu->user_dpm_profile.fan_speed_pwm = 0;
413                         smu->user_dpm_profile.fan_speed_rpm = 0;
414                         smu->user_dpm_profile.fan_mode = AMD_FAN_CTRL_AUTO;
415                         dev_err(smu->adev->dev, "Failed to set manual fan control mode\n");
416                 }
417
418                 if (smu->user_dpm_profile.fan_speed_pwm) {
419                         ret = smu_set_fan_speed_pwm(smu, smu->user_dpm_profile.fan_speed_pwm);
420                         if (ret != -EOPNOTSUPP)
421                                 dev_err(smu->adev->dev, "Failed to set manual fan speed in pwm\n");
422                 }
423
424                 if (smu->user_dpm_profile.fan_speed_rpm) {
425                         ret = smu_set_fan_speed_rpm(smu, smu->user_dpm_profile.fan_speed_rpm);
426                         if (ret != -EOPNOTSUPP)
427                                 dev_err(smu->adev->dev, "Failed to set manual fan speed in rpm\n");
428                 }
429         }
430
431         /* Restore user customized OD settings */
432         if (smu->user_dpm_profile.user_od) {
433                 if (smu->ppt_funcs->restore_user_od_settings) {
434                         ret = smu->ppt_funcs->restore_user_od_settings(smu);
435                         if (ret)
436                                 dev_err(smu->adev->dev, "Failed to upload customized OD settings\n");
437                 }
438         }
439
440         /* Disable restore flag */
441         smu->user_dpm_profile.flags &= ~SMU_DPM_USER_PROFILE_RESTORE;
442 }
443
444 static int smu_get_power_num_states(void *handle,
445                                     struct pp_states_info *state_info)
446 {
447         if (!state_info)
448                 return -EINVAL;
449
450         /* not support power state */
451         memset(state_info, 0, sizeof(struct pp_states_info));
452         state_info->nums = 1;
453         state_info->states[0] = POWER_STATE_TYPE_DEFAULT;
454
455         return 0;
456 }
457
458 bool is_support_sw_smu(struct amdgpu_device *adev)
459 {
460         /* vega20 is 11.0.2, but it's supported via the powerplay code */
461         if (adev->asic_type == CHIP_VEGA20)
462                 return false;
463
464         if (adev->ip_versions[MP1_HWIP][0] >= IP_VERSION(11, 0, 0))
465                 return true;
466
467         return false;
468 }
469
470 bool is_support_cclk_dpm(struct amdgpu_device *adev)
471 {
472         struct smu_context *smu = adev->powerplay.pp_handle;
473
474         if (!smu_feature_is_enabled(smu, SMU_FEATURE_CCLK_DPM_BIT))
475                 return false;
476
477         return true;
478 }
479
480
481 static int smu_sys_get_pp_table(void *handle,
482                                 char **table)
483 {
484         struct smu_context *smu = handle;
485         struct smu_table_context *smu_table = &smu->smu_table;
486
487         if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
488                 return -EOPNOTSUPP;
489
490         if (!smu_table->power_play_table && !smu_table->hardcode_pptable)
491                 return -EINVAL;
492
493         if (smu_table->hardcode_pptable)
494                 *table = smu_table->hardcode_pptable;
495         else
496                 *table = smu_table->power_play_table;
497
498         return smu_table->power_play_table_size;
499 }
500
501 static int smu_sys_set_pp_table(void *handle,
502                                 const char *buf,
503                                 size_t size)
504 {
505         struct smu_context *smu = handle;
506         struct smu_table_context *smu_table = &smu->smu_table;
507         ATOM_COMMON_TABLE_HEADER *header = (ATOM_COMMON_TABLE_HEADER *)buf;
508         int ret = 0;
509
510         if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
511                 return -EOPNOTSUPP;
512
513         if (header->usStructureSize != size) {
514                 dev_err(smu->adev->dev, "pp table size not matched !\n");
515                 return -EIO;
516         }
517
518         if (!smu_table->hardcode_pptable) {
519                 smu_table->hardcode_pptable = kzalloc(size, GFP_KERNEL);
520                 if (!smu_table->hardcode_pptable)
521                         return -ENOMEM;
522         }
523
524         memcpy(smu_table->hardcode_pptable, buf, size);
525         smu_table->power_play_table = smu_table->hardcode_pptable;
526         smu_table->power_play_table_size = size;
527
528         /*
529          * Special hw_fini action(for Navi1x, the DPMs disablement will be
530          * skipped) may be needed for custom pptable uploading.
531          */
532         smu->uploading_custom_pp_table = true;
533
534         ret = smu_reset(smu);
535         if (ret)
536                 dev_info(smu->adev->dev, "smu reset failed, ret = %d\n", ret);
537
538         smu->uploading_custom_pp_table = false;
539
540         return ret;
541 }
542
543 static int smu_get_driver_allowed_feature_mask(struct smu_context *smu)
544 {
545         struct smu_feature *feature = &smu->smu_feature;
546         uint32_t allowed_feature_mask[SMU_FEATURE_MAX/32];
547         int ret = 0;
548
549         /*
550          * With SCPM enabled, the allowed featuremasks setting(via
551          * PPSMC_MSG_SetAllowedFeaturesMaskLow/High) is not permitted.
552          * That means there is no way to let PMFW knows the settings below.
553          * Thus, we just assume all the features are allowed under
554          * such scenario.
555          */
556         if (smu->adev->scpm_enabled) {
557                 bitmap_fill(feature->allowed, SMU_FEATURE_MAX);
558                 return 0;
559         }
560
561         bitmap_zero(feature->allowed, SMU_FEATURE_MAX);
562
563         ret = smu_get_allowed_feature_mask(smu, allowed_feature_mask,
564                                              SMU_FEATURE_MAX/32);
565         if (ret)
566                 return ret;
567
568         bitmap_or(feature->allowed, feature->allowed,
569                       (unsigned long *)allowed_feature_mask,
570                       feature->feature_num);
571
572         return ret;
573 }
574
575 static int smu_set_funcs(struct amdgpu_device *adev)
576 {
577         struct smu_context *smu = adev->powerplay.pp_handle;
578
579         if (adev->pm.pp_feature & PP_OVERDRIVE_MASK)
580                 smu->od_enabled = true;
581
582         switch (adev->ip_versions[MP1_HWIP][0]) {
583         case IP_VERSION(11, 0, 0):
584         case IP_VERSION(11, 0, 5):
585         case IP_VERSION(11, 0, 9):
586                 navi10_set_ppt_funcs(smu);
587                 break;
588         case IP_VERSION(11, 0, 7):
589         case IP_VERSION(11, 0, 11):
590         case IP_VERSION(11, 0, 12):
591         case IP_VERSION(11, 0, 13):
592                 sienna_cichlid_set_ppt_funcs(smu);
593                 break;
594         case IP_VERSION(12, 0, 0):
595         case IP_VERSION(12, 0, 1):
596                 renoir_set_ppt_funcs(smu);
597                 break;
598         case IP_VERSION(11, 5, 0):
599                 vangogh_set_ppt_funcs(smu);
600                 break;
601         case IP_VERSION(13, 0, 1):
602         case IP_VERSION(13, 0, 3):
603         case IP_VERSION(13, 0, 8):
604                 yellow_carp_set_ppt_funcs(smu);
605                 break;
606         case IP_VERSION(13, 0, 4):
607         case IP_VERSION(13, 0, 11):
608                 smu_v13_0_4_set_ppt_funcs(smu);
609                 break;
610         case IP_VERSION(13, 0, 5):
611                 smu_v13_0_5_set_ppt_funcs(smu);
612                 break;
613         case IP_VERSION(11, 0, 8):
614                 cyan_skillfish_set_ppt_funcs(smu);
615                 break;
616         case IP_VERSION(11, 0, 2):
617                 adev->pm.pp_feature &= ~PP_GFXOFF_MASK;
618                 arcturus_set_ppt_funcs(smu);
619                 /* OD is not supported on Arcturus */
620                 smu->od_enabled =false;
621                 break;
622         case IP_VERSION(13, 0, 2):
623                 aldebaran_set_ppt_funcs(smu);
624                 /* Enable pp_od_clk_voltage node */
625                 smu->od_enabled = true;
626                 break;
627         case IP_VERSION(13, 0, 0):
628         case IP_VERSION(13, 0, 10):
629                 smu_v13_0_0_set_ppt_funcs(smu);
630                 break;
631         case IP_VERSION(13, 0, 6):
632                 smu_v13_0_6_set_ppt_funcs(smu);
633                 /* Enable pp_od_clk_voltage node */
634                 smu->od_enabled = true;
635                 break;
636         case IP_VERSION(13, 0, 7):
637                 smu_v13_0_7_set_ppt_funcs(smu);
638                 break;
639         default:
640                 return -EINVAL;
641         }
642
643         return 0;
644 }
645
646 static int smu_early_init(void *handle)
647 {
648         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
649         struct smu_context *smu;
650         int r;
651
652         smu = kzalloc(sizeof(struct smu_context), GFP_KERNEL);
653         if (!smu)
654                 return -ENOMEM;
655
656         smu->adev = adev;
657         smu->pm_enabled = !!amdgpu_dpm;
658         smu->is_apu = false;
659         smu->smu_baco.state = SMU_BACO_STATE_EXIT;
660         smu->smu_baco.platform_support = false;
661         smu->user_dpm_profile.fan_mode = -1;
662
663         mutex_init(&smu->message_lock);
664
665         adev->powerplay.pp_handle = smu;
666         adev->powerplay.pp_funcs = &swsmu_pm_funcs;
667
668         r = smu_set_funcs(adev);
669         if (r)
670                 return r;
671         return smu_init_microcode(smu);
672 }
673
674 static int smu_set_default_dpm_table(struct smu_context *smu)
675 {
676         struct smu_power_context *smu_power = &smu->smu_power;
677         struct smu_power_gate *power_gate = &smu_power->power_gate;
678         int vcn_gate, jpeg_gate;
679         int ret = 0;
680
681         if (!smu->ppt_funcs->set_default_dpm_table)
682                 return 0;
683
684         vcn_gate = atomic_read(&power_gate->vcn_gated);
685         jpeg_gate = atomic_read(&power_gate->jpeg_gated);
686
687         ret = smu_dpm_set_vcn_enable(smu, true);
688         if (ret)
689                 return ret;
690
691         ret = smu_dpm_set_jpeg_enable(smu, true);
692         if (ret)
693                 goto err_out;
694
695         ret = smu->ppt_funcs->set_default_dpm_table(smu);
696         if (ret)
697                 dev_err(smu->adev->dev,
698                         "Failed to setup default dpm clock tables!\n");
699
700         smu_dpm_set_jpeg_enable(smu, !jpeg_gate);
701 err_out:
702         smu_dpm_set_vcn_enable(smu, !vcn_gate);
703         return ret;
704 }
705
706 static int smu_apply_default_config_table_settings(struct smu_context *smu)
707 {
708         struct amdgpu_device *adev = smu->adev;
709         int ret = 0;
710
711         ret = smu_get_default_config_table_settings(smu,
712                                                     &adev->pm.config_table);
713         if (ret)
714                 return ret;
715
716         return smu_set_config_table(smu, &adev->pm.config_table);
717 }
718
719 static int smu_late_init(void *handle)
720 {
721         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
722         struct smu_context *smu = adev->powerplay.pp_handle;
723         int ret = 0;
724
725         smu_set_fine_grain_gfx_freq_parameters(smu);
726
727         if (!smu->pm_enabled)
728                 return 0;
729
730         ret = smu_post_init(smu);
731         if (ret) {
732                 dev_err(adev->dev, "Failed to post smu init!\n");
733                 return ret;
734         }
735
736         /*
737          * Explicitly notify PMFW the power mode the system in. Since
738          * the PMFW may boot the ASIC with a different mode.
739          * For those supporting ACDC switch via gpio, PMFW will
740          * handle the switch automatically. Driver involvement
741          * is unnecessary.
742          */
743         if (!smu->dc_controlled_by_gpio) {
744                 ret = smu_set_power_source(smu,
745                                            adev->pm.ac_power ? SMU_POWER_SOURCE_AC :
746                                            SMU_POWER_SOURCE_DC);
747                 if (ret) {
748                         dev_err(adev->dev, "Failed to switch to %s mode!\n",
749                                 adev->pm.ac_power ? "AC" : "DC");
750                         return ret;
751                 }
752         }
753
754         if ((adev->ip_versions[MP1_HWIP][0] == IP_VERSION(13, 0, 1)) ||
755             (adev->ip_versions[MP1_HWIP][0] == IP_VERSION(13, 0, 3)))
756                 return 0;
757
758         if (!amdgpu_sriov_vf(adev) || smu->od_enabled) {
759                 ret = smu_set_default_od_settings(smu);
760                 if (ret) {
761                         dev_err(adev->dev, "Failed to setup default OD settings!\n");
762                         return ret;
763                 }
764         }
765
766         ret = smu_populate_umd_state_clk(smu);
767         if (ret) {
768                 dev_err(adev->dev, "Failed to populate UMD state clocks!\n");
769                 return ret;
770         }
771
772         ret = smu_get_asic_power_limits(smu,
773                                         &smu->current_power_limit,
774                                         &smu->default_power_limit,
775                                         &smu->max_power_limit);
776         if (ret) {
777                 dev_err(adev->dev, "Failed to get asic power limits!\n");
778                 return ret;
779         }
780
781         if (!amdgpu_sriov_vf(adev))
782                 smu_get_unique_id(smu);
783
784         smu_get_fan_parameters(smu);
785
786         smu_handle_task(smu,
787                         smu->smu_dpm.dpm_level,
788                         AMD_PP_TASK_COMPLETE_INIT);
789
790         ret = smu_apply_default_config_table_settings(smu);
791         if (ret && (ret != -EOPNOTSUPP)) {
792                 dev_err(adev->dev, "Failed to apply default DriverSmuConfig settings!\n");
793                 return ret;
794         }
795
796         smu_restore_dpm_user_profile(smu);
797
798         return 0;
799 }
800
801 static int smu_init_fb_allocations(struct smu_context *smu)
802 {
803         struct amdgpu_device *adev = smu->adev;
804         struct smu_table_context *smu_table = &smu->smu_table;
805         struct smu_table *tables = smu_table->tables;
806         struct smu_table *driver_table = &(smu_table->driver_table);
807         uint32_t max_table_size = 0;
808         int ret, i;
809
810         /* VRAM allocation for tool table */
811         if (tables[SMU_TABLE_PMSTATUSLOG].size) {
812                 ret = amdgpu_bo_create_kernel(adev,
813                                               tables[SMU_TABLE_PMSTATUSLOG].size,
814                                               tables[SMU_TABLE_PMSTATUSLOG].align,
815                                               tables[SMU_TABLE_PMSTATUSLOG].domain,
816                                               &tables[SMU_TABLE_PMSTATUSLOG].bo,
817                                               &tables[SMU_TABLE_PMSTATUSLOG].mc_address,
818                                               &tables[SMU_TABLE_PMSTATUSLOG].cpu_addr);
819                 if (ret) {
820                         dev_err(adev->dev, "VRAM allocation for tool table failed!\n");
821                         return ret;
822                 }
823         }
824
825         /* VRAM allocation for driver table */
826         for (i = 0; i < SMU_TABLE_COUNT; i++) {
827                 if (tables[i].size == 0)
828                         continue;
829
830                 if (i == SMU_TABLE_PMSTATUSLOG)
831                         continue;
832
833                 if (max_table_size < tables[i].size)
834                         max_table_size = tables[i].size;
835         }
836
837         driver_table->size = max_table_size;
838         driver_table->align = PAGE_SIZE;
839         driver_table->domain = AMDGPU_GEM_DOMAIN_VRAM;
840
841         ret = amdgpu_bo_create_kernel(adev,
842                                       driver_table->size,
843                                       driver_table->align,
844                                       driver_table->domain,
845                                       &driver_table->bo,
846                                       &driver_table->mc_address,
847                                       &driver_table->cpu_addr);
848         if (ret) {
849                 dev_err(adev->dev, "VRAM allocation for driver table failed!\n");
850                 if (tables[SMU_TABLE_PMSTATUSLOG].mc_address)
851                         amdgpu_bo_free_kernel(&tables[SMU_TABLE_PMSTATUSLOG].bo,
852                                               &tables[SMU_TABLE_PMSTATUSLOG].mc_address,
853                                               &tables[SMU_TABLE_PMSTATUSLOG].cpu_addr);
854         }
855
856         return ret;
857 }
858
859 static int smu_fini_fb_allocations(struct smu_context *smu)
860 {
861         struct smu_table_context *smu_table = &smu->smu_table;
862         struct smu_table *tables = smu_table->tables;
863         struct smu_table *driver_table = &(smu_table->driver_table);
864
865         if (tables[SMU_TABLE_PMSTATUSLOG].mc_address)
866                 amdgpu_bo_free_kernel(&tables[SMU_TABLE_PMSTATUSLOG].bo,
867                                       &tables[SMU_TABLE_PMSTATUSLOG].mc_address,
868                                       &tables[SMU_TABLE_PMSTATUSLOG].cpu_addr);
869
870         amdgpu_bo_free_kernel(&driver_table->bo,
871                               &driver_table->mc_address,
872                               &driver_table->cpu_addr);
873
874         return 0;
875 }
876
877 /**
878  * smu_alloc_memory_pool - allocate memory pool in the system memory
879  *
880  * @smu: amdgpu_device pointer
881  *
882  * This memory pool will be used for SMC use and msg SetSystemVirtualDramAddr
883  * and DramLogSetDramAddr can notify it changed.
884  *
885  * Returns 0 on success, error on failure.
886  */
887 static int smu_alloc_memory_pool(struct smu_context *smu)
888 {
889         struct amdgpu_device *adev = smu->adev;
890         struct smu_table_context *smu_table = &smu->smu_table;
891         struct smu_table *memory_pool = &smu_table->memory_pool;
892         uint64_t pool_size = smu->pool_size;
893         int ret = 0;
894
895         if (pool_size == SMU_MEMORY_POOL_SIZE_ZERO)
896                 return ret;
897
898         memory_pool->size = pool_size;
899         memory_pool->align = PAGE_SIZE;
900         memory_pool->domain = AMDGPU_GEM_DOMAIN_GTT;
901
902         switch (pool_size) {
903         case SMU_MEMORY_POOL_SIZE_256_MB:
904         case SMU_MEMORY_POOL_SIZE_512_MB:
905         case SMU_MEMORY_POOL_SIZE_1_GB:
906         case SMU_MEMORY_POOL_SIZE_2_GB:
907                 ret = amdgpu_bo_create_kernel(adev,
908                                               memory_pool->size,
909                                               memory_pool->align,
910                                               memory_pool->domain,
911                                               &memory_pool->bo,
912                                               &memory_pool->mc_address,
913                                               &memory_pool->cpu_addr);
914                 if (ret)
915                         dev_err(adev->dev, "VRAM allocation for dramlog failed!\n");
916                 break;
917         default:
918                 break;
919         }
920
921         return ret;
922 }
923
924 static int smu_free_memory_pool(struct smu_context *smu)
925 {
926         struct smu_table_context *smu_table = &smu->smu_table;
927         struct smu_table *memory_pool = &smu_table->memory_pool;
928
929         if (memory_pool->size == SMU_MEMORY_POOL_SIZE_ZERO)
930                 return 0;
931
932         amdgpu_bo_free_kernel(&memory_pool->bo,
933                               &memory_pool->mc_address,
934                               &memory_pool->cpu_addr);
935
936         memset(memory_pool, 0, sizeof(struct smu_table));
937
938         return 0;
939 }
940
941 static int smu_alloc_dummy_read_table(struct smu_context *smu)
942 {
943         struct smu_table_context *smu_table = &smu->smu_table;
944         struct smu_table *dummy_read_1_table =
945                         &smu_table->dummy_read_1_table;
946         struct amdgpu_device *adev = smu->adev;
947         int ret = 0;
948
949         if (!dummy_read_1_table->size)
950                 return 0;
951
952         ret = amdgpu_bo_create_kernel(adev,
953                                       dummy_read_1_table->size,
954                                       dummy_read_1_table->align,
955                                       dummy_read_1_table->domain,
956                                       &dummy_read_1_table->bo,
957                                       &dummy_read_1_table->mc_address,
958                                       &dummy_read_1_table->cpu_addr);
959         if (ret)
960                 dev_err(adev->dev, "VRAM allocation for dummy read table failed!\n");
961
962         return ret;
963 }
964
965 static void smu_free_dummy_read_table(struct smu_context *smu)
966 {
967         struct smu_table_context *smu_table = &smu->smu_table;
968         struct smu_table *dummy_read_1_table =
969                         &smu_table->dummy_read_1_table;
970
971
972         amdgpu_bo_free_kernel(&dummy_read_1_table->bo,
973                               &dummy_read_1_table->mc_address,
974                               &dummy_read_1_table->cpu_addr);
975
976         memset(dummy_read_1_table, 0, sizeof(struct smu_table));
977 }
978
979 static int smu_smc_table_sw_init(struct smu_context *smu)
980 {
981         int ret;
982
983         /**
984          * Create smu_table structure, and init smc tables such as
985          * TABLE_PPTABLE, TABLE_WATERMARKS, TABLE_SMU_METRICS, and etc.
986          */
987         ret = smu_init_smc_tables(smu);
988         if (ret) {
989                 dev_err(smu->adev->dev, "Failed to init smc tables!\n");
990                 return ret;
991         }
992
993         /**
994          * Create smu_power_context structure, and allocate smu_dpm_context and
995          * context size to fill the smu_power_context data.
996          */
997         ret = smu_init_power(smu);
998         if (ret) {
999                 dev_err(smu->adev->dev, "Failed to init smu_init_power!\n");
1000                 return ret;
1001         }
1002
1003         /*
1004          * allocate vram bos to store smc table contents.
1005          */
1006         ret = smu_init_fb_allocations(smu);
1007         if (ret)
1008                 return ret;
1009
1010         ret = smu_alloc_memory_pool(smu);
1011         if (ret)
1012                 return ret;
1013
1014         ret = smu_alloc_dummy_read_table(smu);
1015         if (ret)
1016                 return ret;
1017
1018         ret = smu_i2c_init(smu);
1019         if (ret)
1020                 return ret;
1021
1022         return 0;
1023 }
1024
1025 static int smu_smc_table_sw_fini(struct smu_context *smu)
1026 {
1027         int ret;
1028
1029         smu_i2c_fini(smu);
1030
1031         smu_free_dummy_read_table(smu);
1032
1033         ret = smu_free_memory_pool(smu);
1034         if (ret)
1035                 return ret;
1036
1037         ret = smu_fini_fb_allocations(smu);
1038         if (ret)
1039                 return ret;
1040
1041         ret = smu_fini_power(smu);
1042         if (ret) {
1043                 dev_err(smu->adev->dev, "Failed to init smu_fini_power!\n");
1044                 return ret;
1045         }
1046
1047         ret = smu_fini_smc_tables(smu);
1048         if (ret) {
1049                 dev_err(smu->adev->dev, "Failed to smu_fini_smc_tables!\n");
1050                 return ret;
1051         }
1052
1053         return 0;
1054 }
1055
1056 static void smu_throttling_logging_work_fn(struct work_struct *work)
1057 {
1058         struct smu_context *smu = container_of(work, struct smu_context,
1059                                                throttling_logging_work);
1060
1061         smu_log_thermal_throttling(smu);
1062 }
1063
1064 static void smu_interrupt_work_fn(struct work_struct *work)
1065 {
1066         struct smu_context *smu = container_of(work, struct smu_context,
1067                                                interrupt_work);
1068
1069         if (smu->ppt_funcs && smu->ppt_funcs->interrupt_work)
1070                 smu->ppt_funcs->interrupt_work(smu);
1071 }
1072
1073 static int smu_sw_init(void *handle)
1074 {
1075         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1076         struct smu_context *smu = adev->powerplay.pp_handle;
1077         int ret;
1078
1079         smu->pool_size = adev->pm.smu_prv_buffer_size;
1080         smu->smu_feature.feature_num = SMU_FEATURE_MAX;
1081         bitmap_zero(smu->smu_feature.supported, SMU_FEATURE_MAX);
1082         bitmap_zero(smu->smu_feature.allowed, SMU_FEATURE_MAX);
1083
1084         INIT_WORK(&smu->throttling_logging_work, smu_throttling_logging_work_fn);
1085         INIT_WORK(&smu->interrupt_work, smu_interrupt_work_fn);
1086         atomic64_set(&smu->throttle_int_counter, 0);
1087         smu->watermarks_bitmap = 0;
1088         smu->power_profile_mode = PP_SMC_POWER_PROFILE_BOOTUP_DEFAULT;
1089         smu->default_power_profile_mode = PP_SMC_POWER_PROFILE_BOOTUP_DEFAULT;
1090
1091         atomic_set(&smu->smu_power.power_gate.vcn_gated, 1);
1092         atomic_set(&smu->smu_power.power_gate.jpeg_gated, 1);
1093
1094         smu->workload_mask = 1 << smu->workload_prority[PP_SMC_POWER_PROFILE_BOOTUP_DEFAULT];
1095         smu->workload_prority[PP_SMC_POWER_PROFILE_BOOTUP_DEFAULT] = 0;
1096         smu->workload_prority[PP_SMC_POWER_PROFILE_FULLSCREEN3D] = 1;
1097         smu->workload_prority[PP_SMC_POWER_PROFILE_POWERSAVING] = 2;
1098         smu->workload_prority[PP_SMC_POWER_PROFILE_VIDEO] = 3;
1099         smu->workload_prority[PP_SMC_POWER_PROFILE_VR] = 4;
1100         smu->workload_prority[PP_SMC_POWER_PROFILE_COMPUTE] = 5;
1101         smu->workload_prority[PP_SMC_POWER_PROFILE_CUSTOM] = 6;
1102
1103         smu->workload_setting[0] = PP_SMC_POWER_PROFILE_BOOTUP_DEFAULT;
1104         smu->workload_setting[1] = PP_SMC_POWER_PROFILE_FULLSCREEN3D;
1105         smu->workload_setting[2] = PP_SMC_POWER_PROFILE_POWERSAVING;
1106         smu->workload_setting[3] = PP_SMC_POWER_PROFILE_VIDEO;
1107         smu->workload_setting[4] = PP_SMC_POWER_PROFILE_VR;
1108         smu->workload_setting[5] = PP_SMC_POWER_PROFILE_COMPUTE;
1109         smu->workload_setting[6] = PP_SMC_POWER_PROFILE_CUSTOM;
1110         smu->display_config = &adev->pm.pm_display_cfg;
1111
1112         smu->smu_dpm.dpm_level = AMD_DPM_FORCED_LEVEL_AUTO;
1113         smu->smu_dpm.requested_dpm_level = AMD_DPM_FORCED_LEVEL_AUTO;
1114
1115         ret = smu_smc_table_sw_init(smu);
1116         if (ret) {
1117                 dev_err(adev->dev, "Failed to sw init smc table!\n");
1118                 return ret;
1119         }
1120
1121         /* get boot_values from vbios to set revision, gfxclk, and etc. */
1122         ret = smu_get_vbios_bootup_values(smu);
1123         if (ret) {
1124                 dev_err(adev->dev, "Failed to get VBIOS boot clock values!\n");
1125                 return ret;
1126         }
1127
1128         ret = smu_init_pptable_microcode(smu);
1129         if (ret) {
1130                 dev_err(adev->dev, "Failed to setup pptable firmware!\n");
1131                 return ret;
1132         }
1133
1134         ret = smu_register_irq_handler(smu);
1135         if (ret) {
1136                 dev_err(adev->dev, "Failed to register smc irq handler!\n");
1137                 return ret;
1138         }
1139
1140         /* If there is no way to query fan control mode, fan control is not supported */
1141         if (!smu->ppt_funcs->get_fan_control_mode)
1142                 smu->adev->pm.no_fan = true;
1143
1144         return 0;
1145 }
1146
1147 static int smu_sw_fini(void *handle)
1148 {
1149         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1150         struct smu_context *smu = adev->powerplay.pp_handle;
1151         int ret;
1152
1153         ret = smu_smc_table_sw_fini(smu);
1154         if (ret) {
1155                 dev_err(adev->dev, "Failed to sw fini smc table!\n");
1156                 return ret;
1157         }
1158
1159         smu_fini_microcode(smu);
1160
1161         return 0;
1162 }
1163
1164 static int smu_get_thermal_temperature_range(struct smu_context *smu)
1165 {
1166         struct amdgpu_device *adev = smu->adev;
1167         struct smu_temperature_range *range =
1168                                 &smu->thermal_range;
1169         int ret = 0;
1170
1171         if (!smu->ppt_funcs->get_thermal_temperature_range)
1172                 return 0;
1173
1174         ret = smu->ppt_funcs->get_thermal_temperature_range(smu, range);
1175         if (ret)
1176                 return ret;
1177
1178         adev->pm.dpm.thermal.min_temp = range->min;
1179         adev->pm.dpm.thermal.max_temp = range->max;
1180         adev->pm.dpm.thermal.max_edge_emergency_temp = range->edge_emergency_max;
1181         adev->pm.dpm.thermal.min_hotspot_temp = range->hotspot_min;
1182         adev->pm.dpm.thermal.max_hotspot_crit_temp = range->hotspot_crit_max;
1183         adev->pm.dpm.thermal.max_hotspot_emergency_temp = range->hotspot_emergency_max;
1184         adev->pm.dpm.thermal.min_mem_temp = range->mem_min;
1185         adev->pm.dpm.thermal.max_mem_crit_temp = range->mem_crit_max;
1186         adev->pm.dpm.thermal.max_mem_emergency_temp = range->mem_emergency_max;
1187
1188         return ret;
1189 }
1190
1191 static int smu_smc_hw_setup(struct smu_context *smu)
1192 {
1193         struct smu_feature *feature = &smu->smu_feature;
1194         struct amdgpu_device *adev = smu->adev;
1195         uint32_t pcie_gen = 0, pcie_width = 0;
1196         uint64_t features_supported;
1197         int ret = 0;
1198
1199         switch (adev->ip_versions[MP1_HWIP][0]) {
1200         case IP_VERSION(11, 0, 7):
1201         case IP_VERSION(11, 0, 11):
1202         case IP_VERSION(11, 5, 0):
1203         case IP_VERSION(11, 0, 12):
1204                 if (adev->in_suspend && smu_is_dpm_running(smu)) {
1205                         dev_info(adev->dev, "dpm has been enabled\n");
1206                         ret = smu_system_features_control(smu, true);
1207                         if (ret)
1208                                 dev_err(adev->dev, "Failed system features control!\n");
1209                         return ret;
1210                 }
1211                 break;
1212         default:
1213                 break;
1214         }
1215
1216         ret = smu_init_display_count(smu, 0);
1217         if (ret) {
1218                 dev_info(adev->dev, "Failed to pre-set display count as 0!\n");
1219                 return ret;
1220         }
1221
1222         ret = smu_set_driver_table_location(smu);
1223         if (ret) {
1224                 dev_err(adev->dev, "Failed to SetDriverDramAddr!\n");
1225                 return ret;
1226         }
1227
1228         /*
1229          * Set PMSTATUSLOG table bo address with SetToolsDramAddr MSG for tools.
1230          */
1231         ret = smu_set_tool_table_location(smu);
1232         if (ret) {
1233                 dev_err(adev->dev, "Failed to SetToolsDramAddr!\n");
1234                 return ret;
1235         }
1236
1237         /*
1238          * Use msg SetSystemVirtualDramAddr and DramLogSetDramAddr can notify
1239          * pool location.
1240          */
1241         ret = smu_notify_memory_pool_location(smu);
1242         if (ret) {
1243                 dev_err(adev->dev, "Failed to SetDramLogDramAddr!\n");
1244                 return ret;
1245         }
1246
1247         /*
1248          * It is assumed the pptable used before runpm is same as
1249          * the one used afterwards. Thus, we can reuse the stored
1250          * copy and do not need to resetup the pptable again.
1251          */
1252         if (!adev->in_runpm) {
1253                 ret = smu_setup_pptable(smu);
1254                 if (ret) {
1255                         dev_err(adev->dev, "Failed to setup pptable!\n");
1256                         return ret;
1257                 }
1258         }
1259
1260         /* smu_dump_pptable(smu); */
1261
1262         /*
1263          * With SCPM enabled, PSP is responsible for the PPTable transferring
1264          * (to SMU). Driver involvement is not needed and permitted.
1265          */
1266         if (!adev->scpm_enabled) {
1267                 /*
1268                  * Copy pptable bo in the vram to smc with SMU MSGs such as
1269                  * SetDriverDramAddr and TransferTableDram2Smu.
1270                  */
1271                 ret = smu_write_pptable(smu);
1272                 if (ret) {
1273                         dev_err(adev->dev, "Failed to transfer pptable to SMC!\n");
1274                         return ret;
1275                 }
1276         }
1277
1278         /* issue Run*Btc msg */
1279         ret = smu_run_btc(smu);
1280         if (ret)
1281                 return ret;
1282
1283         /*
1284          * With SCPM enabled, these actions(and relevant messages) are
1285          * not needed and permitted.
1286          */
1287         if (!adev->scpm_enabled) {
1288                 ret = smu_feature_set_allowed_mask(smu);
1289                 if (ret) {
1290                         dev_err(adev->dev, "Failed to set driver allowed features mask!\n");
1291                         return ret;
1292                 }
1293         }
1294
1295         ret = smu_system_features_control(smu, true);
1296         if (ret) {
1297                 dev_err(adev->dev, "Failed to enable requested dpm features!\n");
1298                 return ret;
1299         }
1300
1301         ret = smu_feature_get_enabled_mask(smu, &features_supported);
1302         if (ret) {
1303                 dev_err(adev->dev, "Failed to retrieve supported dpm features!\n");
1304                 return ret;
1305         }
1306         bitmap_copy(feature->supported,
1307                     (unsigned long *)&features_supported,
1308                     feature->feature_num);
1309
1310         if (!smu_is_dpm_running(smu))
1311                 dev_info(adev->dev, "dpm has been disabled\n");
1312
1313         /*
1314          * Set initialized values (get from vbios) to dpm tables context such as
1315          * gfxclk, memclk, dcefclk, and etc. And enable the DPM feature for each
1316          * type of clks.
1317          */
1318         ret = smu_set_default_dpm_table(smu);
1319         if (ret) {
1320                 dev_err(adev->dev, "Failed to setup default dpm clock tables!\n");
1321                 return ret;
1322         }
1323
1324         if (adev->pm.pcie_gen_mask & CAIL_PCIE_LINK_SPEED_SUPPORT_GEN4)
1325                 pcie_gen = 3;
1326         else if (adev->pm.pcie_gen_mask & CAIL_PCIE_LINK_SPEED_SUPPORT_GEN3)
1327                 pcie_gen = 2;
1328         else if (adev->pm.pcie_gen_mask & CAIL_PCIE_LINK_SPEED_SUPPORT_GEN2)
1329                 pcie_gen = 1;
1330         else if (adev->pm.pcie_gen_mask & CAIL_PCIE_LINK_SPEED_SUPPORT_GEN1)
1331                 pcie_gen = 0;
1332
1333         /* Bit 31:16: LCLK DPM level. 0 is DPM0, and 1 is DPM1
1334          * Bit 15:8:  PCIE GEN, 0 to 3 corresponds to GEN1 to GEN4
1335          * Bit 7:0:   PCIE lane width, 1 to 7 corresponds is x1 to x32
1336          */
1337         if (adev->pm.pcie_mlw_mask & CAIL_PCIE_LINK_WIDTH_SUPPORT_X16)
1338                 pcie_width = 6;
1339         else if (adev->pm.pcie_mlw_mask & CAIL_PCIE_LINK_WIDTH_SUPPORT_X12)
1340                 pcie_width = 5;
1341         else if (adev->pm.pcie_mlw_mask & CAIL_PCIE_LINK_WIDTH_SUPPORT_X8)
1342                 pcie_width = 4;
1343         else if (adev->pm.pcie_mlw_mask & CAIL_PCIE_LINK_WIDTH_SUPPORT_X4)
1344                 pcie_width = 3;
1345         else if (adev->pm.pcie_mlw_mask & CAIL_PCIE_LINK_WIDTH_SUPPORT_X2)
1346                 pcie_width = 2;
1347         else if (adev->pm.pcie_mlw_mask & CAIL_PCIE_LINK_WIDTH_SUPPORT_X1)
1348                 pcie_width = 1;
1349         ret = smu_update_pcie_parameters(smu, pcie_gen, pcie_width);
1350         if (ret) {
1351                 dev_err(adev->dev, "Attempt to override pcie params failed!\n");
1352                 return ret;
1353         }
1354
1355         ret = smu_get_thermal_temperature_range(smu);
1356         if (ret) {
1357                 dev_err(adev->dev, "Failed to get thermal temperature ranges!\n");
1358                 return ret;
1359         }
1360
1361         ret = smu_enable_thermal_alert(smu);
1362         if (ret) {
1363           dev_err(adev->dev, "Failed to enable thermal alert!\n");
1364           return ret;
1365         }
1366
1367         ret = smu_notify_display_change(smu);
1368         if (ret) {
1369                 dev_err(adev->dev, "Failed to notify display change!\n");
1370                 return ret;
1371         }
1372
1373         /*
1374          * Set min deep sleep dce fclk with bootup value from vbios via
1375          * SetMinDeepSleepDcefclk MSG.
1376          */
1377         ret = smu_set_min_dcef_deep_sleep(smu,
1378                                           smu->smu_table.boot_values.dcefclk / 100);
1379
1380         return ret;
1381 }
1382
1383 static int smu_start_smc_engine(struct smu_context *smu)
1384 {
1385         struct amdgpu_device *adev = smu->adev;
1386         int ret = 0;
1387
1388         if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) {
1389                 if (adev->ip_versions[MP1_HWIP][0] < IP_VERSION(11, 0, 0)) {
1390                         if (smu->ppt_funcs->load_microcode) {
1391                                 ret = smu->ppt_funcs->load_microcode(smu);
1392                                 if (ret)
1393                                         return ret;
1394                         }
1395                 }
1396         }
1397
1398         if (smu->ppt_funcs->check_fw_status) {
1399                 ret = smu->ppt_funcs->check_fw_status(smu);
1400                 if (ret) {
1401                         dev_err(adev->dev, "SMC is not ready\n");
1402                         return ret;
1403                 }
1404         }
1405
1406         /*
1407          * Send msg GetDriverIfVersion to check if the return value is equal
1408          * with DRIVER_IF_VERSION of smc header.
1409          */
1410         ret = smu_check_fw_version(smu);
1411         if (ret)
1412                 return ret;
1413
1414         return ret;
1415 }
1416
1417 static int smu_hw_init(void *handle)
1418 {
1419         int ret;
1420         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1421         struct smu_context *smu = adev->powerplay.pp_handle;
1422
1423         if (amdgpu_sriov_vf(adev) && !amdgpu_sriov_is_pp_one_vf(adev)) {
1424                 smu->pm_enabled = false;
1425                 return 0;
1426         }
1427
1428         ret = smu_start_smc_engine(smu);
1429         if (ret) {
1430                 dev_err(adev->dev, "SMC engine is not correctly up!\n");
1431                 return ret;
1432         }
1433
1434         if (smu->is_apu) {
1435                 ret = smu_set_gfx_imu_enable(smu);
1436                 if (ret)
1437                         return ret;
1438                 smu_dpm_set_vcn_enable(smu, true);
1439                 smu_dpm_set_jpeg_enable(smu, true);
1440                 smu_set_gfx_cgpg(smu, true);
1441         }
1442
1443         if (!smu->pm_enabled)
1444                 return 0;
1445
1446         ret = smu_get_driver_allowed_feature_mask(smu);
1447         if (ret)
1448                 return ret;
1449
1450         ret = smu_smc_hw_setup(smu);
1451         if (ret) {
1452                 dev_err(adev->dev, "Failed to setup smc hw!\n");
1453                 return ret;
1454         }
1455
1456         /*
1457          * Move maximum sustainable clock retrieving here considering
1458          * 1. It is not needed on resume(from S3).
1459          * 2. DAL settings come between .hw_init and .late_init of SMU.
1460          *    And DAL needs to know the maximum sustainable clocks. Thus
1461          *    it cannot be put in .late_init().
1462          */
1463         ret = smu_init_max_sustainable_clocks(smu);
1464         if (ret) {
1465                 dev_err(adev->dev, "Failed to init max sustainable clocks!\n");
1466                 return ret;
1467         }
1468
1469         adev->pm.dpm_enabled = true;
1470
1471         dev_info(adev->dev, "SMU is initialized successfully!\n");
1472
1473         return 0;
1474 }
1475
1476 static int smu_disable_dpms(struct smu_context *smu)
1477 {
1478         struct amdgpu_device *adev = smu->adev;
1479         int ret = 0;
1480         bool use_baco = !smu->is_apu &&
1481                 ((amdgpu_in_reset(adev) &&
1482                   (amdgpu_asic_reset_method(adev) == AMD_RESET_METHOD_BACO)) ||
1483                  ((adev->in_runpm || adev->in_s4) && amdgpu_asic_supports_baco(adev)));
1484
1485         /*
1486          * For SMU 13.0.0 and 13.0.7, PMFW will handle the DPM features(disablement or others)
1487          * properly on suspend/reset/unload. Driver involvement may cause some unexpected issues.
1488          */
1489         switch (adev->ip_versions[MP1_HWIP][0]) {
1490         case IP_VERSION(13, 0, 0):
1491         case IP_VERSION(13, 0, 7):
1492         case IP_VERSION(13, 0, 10):
1493                 return 0;
1494         default:
1495                 break;
1496         }
1497
1498         /*
1499          * For custom pptable uploading, skip the DPM features
1500          * disable process on Navi1x ASICs.
1501          *   - As the gfx related features are under control of
1502          *     RLC on those ASICs. RLC reinitialization will be
1503          *     needed to reenable them. That will cost much more
1504          *     efforts.
1505          *
1506          *   - SMU firmware can handle the DPM reenablement
1507          *     properly.
1508          */
1509         if (smu->uploading_custom_pp_table) {
1510                 switch (adev->ip_versions[MP1_HWIP][0]) {
1511                 case IP_VERSION(11, 0, 0):
1512                 case IP_VERSION(11, 0, 5):
1513                 case IP_VERSION(11, 0, 9):
1514                 case IP_VERSION(11, 0, 7):
1515                 case IP_VERSION(11, 0, 11):
1516                 case IP_VERSION(11, 5, 0):
1517                 case IP_VERSION(11, 0, 12):
1518                 case IP_VERSION(11, 0, 13):
1519                         return 0;
1520                 default:
1521                         break;
1522                 }
1523         }
1524
1525         /*
1526          * For Sienna_Cichlid, PMFW will handle the features disablement properly
1527          * on BACO in. Driver involvement is unnecessary.
1528          */
1529         if (use_baco) {
1530                 switch (adev->ip_versions[MP1_HWIP][0]) {
1531                 case IP_VERSION(11, 0, 7):
1532                 case IP_VERSION(11, 0, 0):
1533                 case IP_VERSION(11, 0, 5):
1534                 case IP_VERSION(11, 0, 9):
1535                 case IP_VERSION(13, 0, 7):
1536                         return 0;
1537                 default:
1538                         break;
1539                 }
1540         }
1541
1542         /*
1543          * For SMU 13.0.4/11, PMFW will handle the features disablement properly
1544          * for gpu reset case. Driver involvement is unnecessary.
1545          */
1546         if (amdgpu_in_reset(adev)) {
1547                 switch (adev->ip_versions[MP1_HWIP][0]) {
1548                 case IP_VERSION(13, 0, 4):
1549                 case IP_VERSION(13, 0, 11):
1550                         return 0;
1551                 default:
1552                         break;
1553                 }
1554         }
1555
1556         /*
1557          * For gpu reset, runpm and hibernation through BACO,
1558          * BACO feature has to be kept enabled.
1559          */
1560         if (use_baco && smu_feature_is_enabled(smu, SMU_FEATURE_BACO_BIT)) {
1561                 ret = smu_disable_all_features_with_exception(smu,
1562                                                               SMU_FEATURE_BACO_BIT);
1563                 if (ret)
1564                         dev_err(adev->dev, "Failed to disable smu features except BACO.\n");
1565         } else {
1566                 /* DisableAllSmuFeatures message is not permitted with SCPM enabled */
1567                 if (!adev->scpm_enabled) {
1568                         ret = smu_system_features_control(smu, false);
1569                         if (ret)
1570                                 dev_err(adev->dev, "Failed to disable smu features.\n");
1571                 }
1572         }
1573
1574         if (adev->ip_versions[GC_HWIP][0] >= IP_VERSION(9, 4, 2) &&
1575             !amdgpu_sriov_vf(adev) && adev->gfx.rlc.funcs->stop)
1576                 adev->gfx.rlc.funcs->stop(adev);
1577
1578         return ret;
1579 }
1580
1581 static int smu_smc_hw_cleanup(struct smu_context *smu)
1582 {
1583         struct amdgpu_device *adev = smu->adev;
1584         int ret = 0;
1585
1586         cancel_work_sync(&smu->throttling_logging_work);
1587         cancel_work_sync(&smu->interrupt_work);
1588
1589         ret = smu_disable_thermal_alert(smu);
1590         if (ret) {
1591                 dev_err(adev->dev, "Fail to disable thermal alert!\n");
1592                 return ret;
1593         }
1594
1595         ret = smu_disable_dpms(smu);
1596         if (ret) {
1597                 dev_err(adev->dev, "Fail to disable dpm features!\n");
1598                 return ret;
1599         }
1600
1601         return 0;
1602 }
1603
1604 static int smu_hw_fini(void *handle)
1605 {
1606         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1607         struct smu_context *smu = adev->powerplay.pp_handle;
1608
1609         if (amdgpu_sriov_vf(adev)&& !amdgpu_sriov_is_pp_one_vf(adev))
1610                 return 0;
1611
1612         smu_dpm_set_vcn_enable(smu, false);
1613         smu_dpm_set_jpeg_enable(smu, false);
1614
1615         adev->vcn.cur_state = AMD_PG_STATE_GATE;
1616         adev->jpeg.cur_state = AMD_PG_STATE_GATE;
1617
1618         if (!smu->pm_enabled)
1619                 return 0;
1620
1621         adev->pm.dpm_enabled = false;
1622
1623         return smu_smc_hw_cleanup(smu);
1624 }
1625
1626 static void smu_late_fini(void *handle)
1627 {
1628         struct amdgpu_device *adev = handle;
1629         struct smu_context *smu = adev->powerplay.pp_handle;
1630
1631         kfree(smu);
1632 }
1633
1634 static int smu_reset(struct smu_context *smu)
1635 {
1636         struct amdgpu_device *adev = smu->adev;
1637         int ret;
1638
1639         ret = smu_hw_fini(adev);
1640         if (ret)
1641                 return ret;
1642
1643         ret = smu_hw_init(adev);
1644         if (ret)
1645                 return ret;
1646
1647         ret = smu_late_init(adev);
1648         if (ret)
1649                 return ret;
1650
1651         return 0;
1652 }
1653
1654 static int smu_suspend(void *handle)
1655 {
1656         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1657         struct smu_context *smu = adev->powerplay.pp_handle;
1658         int ret;
1659         uint64_t count;
1660
1661         if (amdgpu_sriov_vf(adev)&& !amdgpu_sriov_is_pp_one_vf(adev))
1662                 return 0;
1663
1664         if (!smu->pm_enabled)
1665                 return 0;
1666
1667         adev->pm.dpm_enabled = false;
1668
1669         ret = smu_smc_hw_cleanup(smu);
1670         if (ret)
1671                 return ret;
1672
1673         smu->watermarks_bitmap &= ~(WATERMARKS_LOADED);
1674
1675         smu_set_gfx_cgpg(smu, false);
1676
1677         /*
1678          * pwfw resets entrycount when device is suspended, so we save the
1679          * last value to be used when we resume to keep it consistent
1680          */
1681         ret = smu_get_entrycount_gfxoff(smu, &count);
1682         if (!ret)
1683                 adev->gfx.gfx_off_entrycount = count;
1684
1685         return 0;
1686 }
1687
1688 static int smu_resume(void *handle)
1689 {
1690         int ret;
1691         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1692         struct smu_context *smu = adev->powerplay.pp_handle;
1693
1694         if (amdgpu_sriov_vf(adev)&& !amdgpu_sriov_is_pp_one_vf(adev))
1695                 return 0;
1696
1697         if (!smu->pm_enabled)
1698                 return 0;
1699
1700         dev_info(adev->dev, "SMU is resuming...\n");
1701
1702         ret = smu_start_smc_engine(smu);
1703         if (ret) {
1704                 dev_err(adev->dev, "SMC engine is not correctly up!\n");
1705                 return ret;
1706         }
1707
1708         ret = smu_smc_hw_setup(smu);
1709         if (ret) {
1710                 dev_err(adev->dev, "Failed to setup smc hw!\n");
1711                 return ret;
1712         }
1713
1714         ret = smu_set_gfx_imu_enable(smu);
1715         if (ret)
1716                 return ret;
1717
1718         smu_set_gfx_cgpg(smu, true);
1719
1720         smu->disable_uclk_switch = 0;
1721
1722         adev->pm.dpm_enabled = true;
1723
1724         dev_info(adev->dev, "SMU is resumed successfully!\n");
1725
1726         return 0;
1727 }
1728
1729 static int smu_display_configuration_change(void *handle,
1730                                             const struct amd_pp_display_configuration *display_config)
1731 {
1732         struct smu_context *smu = handle;
1733
1734         if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
1735                 return -EOPNOTSUPP;
1736
1737         if (!display_config)
1738                 return -EINVAL;
1739
1740         smu_set_min_dcef_deep_sleep(smu,
1741                                     display_config->min_dcef_deep_sleep_set_clk / 100);
1742
1743         return 0;
1744 }
1745
1746 static int smu_set_clockgating_state(void *handle,
1747                                      enum amd_clockgating_state state)
1748 {
1749         return 0;
1750 }
1751
1752 static int smu_set_powergating_state(void *handle,
1753                                      enum amd_powergating_state state)
1754 {
1755         return 0;
1756 }
1757
1758 static int smu_enable_umd_pstate(void *handle,
1759                       enum amd_dpm_forced_level *level)
1760 {
1761         uint32_t profile_mode_mask = AMD_DPM_FORCED_LEVEL_PROFILE_STANDARD |
1762                                         AMD_DPM_FORCED_LEVEL_PROFILE_MIN_SCLK |
1763                                         AMD_DPM_FORCED_LEVEL_PROFILE_MIN_MCLK |
1764                                         AMD_DPM_FORCED_LEVEL_PROFILE_PEAK;
1765
1766         struct smu_context *smu = (struct smu_context*)(handle);
1767         struct smu_dpm_context *smu_dpm_ctx = &(smu->smu_dpm);
1768
1769         if (!smu->is_apu && !smu_dpm_ctx->dpm_context)
1770                 return -EINVAL;
1771
1772         if (!(smu_dpm_ctx->dpm_level & profile_mode_mask)) {
1773                 /* enter umd pstate, save current level, disable gfx cg*/
1774                 if (*level & profile_mode_mask) {
1775                         smu_dpm_ctx->saved_dpm_level = smu_dpm_ctx->dpm_level;
1776                         smu_gpo_control(smu, false);
1777                         smu_gfx_ulv_control(smu, false);
1778                         smu_deep_sleep_control(smu, false);
1779                         amdgpu_asic_update_umd_stable_pstate(smu->adev, true);
1780                 }
1781         } else {
1782                 /* exit umd pstate, restore level, enable gfx cg*/
1783                 if (!(*level & profile_mode_mask)) {
1784                         if (*level == AMD_DPM_FORCED_LEVEL_PROFILE_EXIT)
1785                                 *level = smu_dpm_ctx->saved_dpm_level;
1786                         amdgpu_asic_update_umd_stable_pstate(smu->adev, false);
1787                         smu_deep_sleep_control(smu, true);
1788                         smu_gfx_ulv_control(smu, true);
1789                         smu_gpo_control(smu, true);
1790                 }
1791         }
1792
1793         return 0;
1794 }
1795
1796 static int smu_bump_power_profile_mode(struct smu_context *smu,
1797                                            long *param,
1798                                            uint32_t param_size)
1799 {
1800         int ret = 0;
1801
1802         if (smu->ppt_funcs->set_power_profile_mode)
1803                 ret = smu->ppt_funcs->set_power_profile_mode(smu, param, param_size);
1804
1805         return ret;
1806 }
1807
1808 static int smu_adjust_power_state_dynamic(struct smu_context *smu,
1809                                    enum amd_dpm_forced_level level,
1810                                    bool skip_display_settings)
1811 {
1812         int ret = 0;
1813         int index = 0;
1814         long workload;
1815         struct smu_dpm_context *smu_dpm_ctx = &(smu->smu_dpm);
1816
1817         if (!skip_display_settings) {
1818                 ret = smu_display_config_changed(smu);
1819                 if (ret) {
1820                         dev_err(smu->adev->dev, "Failed to change display config!");
1821                         return ret;
1822                 }
1823         }
1824
1825         ret = smu_apply_clocks_adjust_rules(smu);
1826         if (ret) {
1827                 dev_err(smu->adev->dev, "Failed to apply clocks adjust rules!");
1828                 return ret;
1829         }
1830
1831         if (!skip_display_settings) {
1832                 ret = smu_notify_smc_display_config(smu);
1833                 if (ret) {
1834                         dev_err(smu->adev->dev, "Failed to notify smc display config!");
1835                         return ret;
1836                 }
1837         }
1838
1839         if (smu_dpm_ctx->dpm_level != level) {
1840                 ret = smu_asic_set_performance_level(smu, level);
1841                 if (ret) {
1842                         dev_err(smu->adev->dev, "Failed to set performance level!");
1843                         return ret;
1844                 }
1845
1846                 /* update the saved copy */
1847                 smu_dpm_ctx->dpm_level = level;
1848         }
1849
1850         if (smu_dpm_ctx->dpm_level != AMD_DPM_FORCED_LEVEL_MANUAL &&
1851                 smu_dpm_ctx->dpm_level != AMD_DPM_FORCED_LEVEL_PERF_DETERMINISM) {
1852                 index = fls(smu->workload_mask);
1853                 index = index > 0 && index <= WORKLOAD_POLICY_MAX ? index - 1 : 0;
1854                 workload = smu->workload_setting[index];
1855
1856                 if (smu->power_profile_mode != workload)
1857                         smu_bump_power_profile_mode(smu, &workload, 0);
1858         }
1859
1860         return ret;
1861 }
1862
1863 static int smu_handle_task(struct smu_context *smu,
1864                            enum amd_dpm_forced_level level,
1865                            enum amd_pp_task task_id)
1866 {
1867         int ret = 0;
1868
1869         if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
1870                 return -EOPNOTSUPP;
1871
1872         switch (task_id) {
1873         case AMD_PP_TASK_DISPLAY_CONFIG_CHANGE:
1874                 ret = smu_pre_display_config_changed(smu);
1875                 if (ret)
1876                         return ret;
1877                 ret = smu_adjust_power_state_dynamic(smu, level, false);
1878                 break;
1879         case AMD_PP_TASK_COMPLETE_INIT:
1880         case AMD_PP_TASK_READJUST_POWER_STATE:
1881                 ret = smu_adjust_power_state_dynamic(smu, level, true);
1882                 break;
1883         default:
1884                 break;
1885         }
1886
1887         return ret;
1888 }
1889
1890 static int smu_handle_dpm_task(void *handle,
1891                                enum amd_pp_task task_id,
1892                                enum amd_pm_state_type *user_state)
1893 {
1894         struct smu_context *smu = handle;
1895         struct smu_dpm_context *smu_dpm = &smu->smu_dpm;
1896
1897         return smu_handle_task(smu, smu_dpm->dpm_level, task_id);
1898
1899 }
1900
1901 static int smu_switch_power_profile(void *handle,
1902                                     enum PP_SMC_POWER_PROFILE type,
1903                                     bool en)
1904 {
1905         struct smu_context *smu = handle;
1906         struct smu_dpm_context *smu_dpm_ctx = &(smu->smu_dpm);
1907         long workload;
1908         uint32_t index;
1909
1910         if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
1911                 return -EOPNOTSUPP;
1912
1913         if (!(type < PP_SMC_POWER_PROFILE_CUSTOM))
1914                 return -EINVAL;
1915
1916         if (!en) {
1917                 smu->workload_mask &= ~(1 << smu->workload_prority[type]);
1918                 index = fls(smu->workload_mask);
1919                 index = index > 0 && index <= WORKLOAD_POLICY_MAX ? index - 1 : 0;
1920                 workload = smu->workload_setting[index];
1921         } else {
1922                 smu->workload_mask |= (1 << smu->workload_prority[type]);
1923                 index = fls(smu->workload_mask);
1924                 index = index <= WORKLOAD_POLICY_MAX ? index - 1 : 0;
1925                 workload = smu->workload_setting[index];
1926         }
1927
1928         if (smu_dpm_ctx->dpm_level != AMD_DPM_FORCED_LEVEL_MANUAL &&
1929                 smu_dpm_ctx->dpm_level != AMD_DPM_FORCED_LEVEL_PERF_DETERMINISM)
1930                 smu_bump_power_profile_mode(smu, &workload, 0);
1931
1932         return 0;
1933 }
1934
1935 static enum amd_dpm_forced_level smu_get_performance_level(void *handle)
1936 {
1937         struct smu_context *smu = handle;
1938         struct smu_dpm_context *smu_dpm_ctx = &(smu->smu_dpm);
1939
1940         if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
1941                 return -EOPNOTSUPP;
1942
1943         if (!smu->is_apu && !smu_dpm_ctx->dpm_context)
1944                 return -EINVAL;
1945
1946         return smu_dpm_ctx->dpm_level;
1947 }
1948
1949 static int smu_force_performance_level(void *handle,
1950                                        enum amd_dpm_forced_level level)
1951 {
1952         struct smu_context *smu = handle;
1953         struct smu_dpm_context *smu_dpm_ctx = &(smu->smu_dpm);
1954         int ret = 0;
1955
1956         if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
1957                 return -EOPNOTSUPP;
1958
1959         if (!smu->is_apu && !smu_dpm_ctx->dpm_context)
1960                 return -EINVAL;
1961
1962         ret = smu_enable_umd_pstate(smu, &level);
1963         if (ret)
1964                 return ret;
1965
1966         ret = smu_handle_task(smu, level,
1967                               AMD_PP_TASK_READJUST_POWER_STATE);
1968
1969         /* reset user dpm clock state */
1970         if (!ret && smu_dpm_ctx->dpm_level != AMD_DPM_FORCED_LEVEL_MANUAL) {
1971                 memset(smu->user_dpm_profile.clk_mask, 0, sizeof(smu->user_dpm_profile.clk_mask));
1972                 smu->user_dpm_profile.clk_dependency = 0;
1973         }
1974
1975         return ret;
1976 }
1977
1978 static int smu_set_display_count(void *handle, uint32_t count)
1979 {
1980         struct smu_context *smu = handle;
1981
1982         if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
1983                 return -EOPNOTSUPP;
1984
1985         return smu_init_display_count(smu, count);
1986 }
1987
1988 static int smu_force_smuclk_levels(struct smu_context *smu,
1989                          enum smu_clk_type clk_type,
1990                          uint32_t mask)
1991 {
1992         struct smu_dpm_context *smu_dpm_ctx = &(smu->smu_dpm);
1993         int ret = 0;
1994
1995         if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
1996                 return -EOPNOTSUPP;
1997
1998         if (smu_dpm_ctx->dpm_level != AMD_DPM_FORCED_LEVEL_MANUAL) {
1999                 dev_dbg(smu->adev->dev, "force clock level is for dpm manual mode only.\n");
2000                 return -EINVAL;
2001         }
2002
2003         if (smu->ppt_funcs && smu->ppt_funcs->force_clk_levels) {
2004                 ret = smu->ppt_funcs->force_clk_levels(smu, clk_type, mask);
2005                 if (!ret && !(smu->user_dpm_profile.flags & SMU_DPM_USER_PROFILE_RESTORE)) {
2006                         smu->user_dpm_profile.clk_mask[clk_type] = mask;
2007                         smu_set_user_clk_dependencies(smu, clk_type);
2008                 }
2009         }
2010
2011         return ret;
2012 }
2013
2014 static int smu_force_ppclk_levels(void *handle,
2015                                   enum pp_clock_type type,
2016                                   uint32_t mask)
2017 {
2018         struct smu_context *smu = handle;
2019         enum smu_clk_type clk_type;
2020
2021         switch (type) {
2022         case PP_SCLK:
2023                 clk_type = SMU_SCLK; break;
2024         case PP_MCLK:
2025                 clk_type = SMU_MCLK; break;
2026         case PP_PCIE:
2027                 clk_type = SMU_PCIE; break;
2028         case PP_SOCCLK:
2029                 clk_type = SMU_SOCCLK; break;
2030         case PP_FCLK:
2031                 clk_type = SMU_FCLK; break;
2032         case PP_DCEFCLK:
2033                 clk_type = SMU_DCEFCLK; break;
2034         case PP_VCLK:
2035                 clk_type = SMU_VCLK; break;
2036         case PP_VCLK1:
2037                 clk_type = SMU_VCLK1; break;
2038         case PP_DCLK:
2039                 clk_type = SMU_DCLK; break;
2040         case PP_DCLK1:
2041                 clk_type = SMU_DCLK1; break;
2042         case OD_SCLK:
2043                 clk_type = SMU_OD_SCLK; break;
2044         case OD_MCLK:
2045                 clk_type = SMU_OD_MCLK; break;
2046         case OD_VDDC_CURVE:
2047                 clk_type = SMU_OD_VDDC_CURVE; break;
2048         case OD_RANGE:
2049                 clk_type = SMU_OD_RANGE; break;
2050         default:
2051                 return -EINVAL;
2052         }
2053
2054         return smu_force_smuclk_levels(smu, clk_type, mask);
2055 }
2056
2057 /*
2058  * On system suspending or resetting, the dpm_enabled
2059  * flag will be cleared. So that those SMU services which
2060  * are not supported will be gated.
2061  * However, the mp1 state setting should still be granted
2062  * even if the dpm_enabled cleared.
2063  */
2064 static int smu_set_mp1_state(void *handle,
2065                              enum pp_mp1_state mp1_state)
2066 {
2067         struct smu_context *smu = handle;
2068         int ret = 0;
2069
2070         if (!smu->pm_enabled)
2071                 return -EOPNOTSUPP;
2072
2073         if (smu->ppt_funcs &&
2074             smu->ppt_funcs->set_mp1_state)
2075                 ret = smu->ppt_funcs->set_mp1_state(smu, mp1_state);
2076
2077         return ret;
2078 }
2079
2080 static int smu_set_df_cstate(void *handle,
2081                              enum pp_df_cstate state)
2082 {
2083         struct smu_context *smu = handle;
2084         int ret = 0;
2085
2086         if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
2087                 return -EOPNOTSUPP;
2088
2089         if (!smu->ppt_funcs || !smu->ppt_funcs->set_df_cstate)
2090                 return 0;
2091
2092         ret = smu->ppt_funcs->set_df_cstate(smu, state);
2093         if (ret)
2094                 dev_err(smu->adev->dev, "[SetDfCstate] failed!\n");
2095
2096         return ret;
2097 }
2098
2099 int smu_allow_xgmi_power_down(struct smu_context *smu, bool en)
2100 {
2101         int ret = 0;
2102
2103         if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
2104                 return -EOPNOTSUPP;
2105
2106         if (!smu->ppt_funcs || !smu->ppt_funcs->allow_xgmi_power_down)
2107                 return 0;
2108
2109         ret = smu->ppt_funcs->allow_xgmi_power_down(smu, en);
2110         if (ret)
2111                 dev_err(smu->adev->dev, "[AllowXgmiPowerDown] failed!\n");
2112
2113         return ret;
2114 }
2115
2116 int smu_write_watermarks_table(struct smu_context *smu)
2117 {
2118         if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
2119                 return -EOPNOTSUPP;
2120
2121         return smu_set_watermarks_table(smu, NULL);
2122 }
2123
2124 static int smu_set_watermarks_for_clock_ranges(void *handle,
2125                                                struct pp_smu_wm_range_sets *clock_ranges)
2126 {
2127         struct smu_context *smu = handle;
2128
2129         if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
2130                 return -EOPNOTSUPP;
2131
2132         if (smu->disable_watermark)
2133                 return 0;
2134
2135         return smu_set_watermarks_table(smu, clock_ranges);
2136 }
2137
2138 int smu_set_ac_dc(struct smu_context *smu)
2139 {
2140         int ret = 0;
2141
2142         if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
2143                 return -EOPNOTSUPP;
2144
2145         /* controlled by firmware */
2146         if (smu->dc_controlled_by_gpio)
2147                 return 0;
2148
2149         ret = smu_set_power_source(smu,
2150                                    smu->adev->pm.ac_power ? SMU_POWER_SOURCE_AC :
2151                                    SMU_POWER_SOURCE_DC);
2152         if (ret)
2153                 dev_err(smu->adev->dev, "Failed to switch to %s mode!\n",
2154                        smu->adev->pm.ac_power ? "AC" : "DC");
2155
2156         return ret;
2157 }
2158
2159 const struct amd_ip_funcs smu_ip_funcs = {
2160         .name = "smu",
2161         .early_init = smu_early_init,
2162         .late_init = smu_late_init,
2163         .sw_init = smu_sw_init,
2164         .sw_fini = smu_sw_fini,
2165         .hw_init = smu_hw_init,
2166         .hw_fini = smu_hw_fini,
2167         .late_fini = smu_late_fini,
2168         .suspend = smu_suspend,
2169         .resume = smu_resume,
2170         .is_idle = NULL,
2171         .check_soft_reset = NULL,
2172         .wait_for_idle = NULL,
2173         .soft_reset = NULL,
2174         .set_clockgating_state = smu_set_clockgating_state,
2175         .set_powergating_state = smu_set_powergating_state,
2176 };
2177
2178 const struct amdgpu_ip_block_version smu_v11_0_ip_block =
2179 {
2180         .type = AMD_IP_BLOCK_TYPE_SMC,
2181         .major = 11,
2182         .minor = 0,
2183         .rev = 0,
2184         .funcs = &smu_ip_funcs,
2185 };
2186
2187 const struct amdgpu_ip_block_version smu_v12_0_ip_block =
2188 {
2189         .type = AMD_IP_BLOCK_TYPE_SMC,
2190         .major = 12,
2191         .minor = 0,
2192         .rev = 0,
2193         .funcs = &smu_ip_funcs,
2194 };
2195
2196 const struct amdgpu_ip_block_version smu_v13_0_ip_block =
2197 {
2198         .type = AMD_IP_BLOCK_TYPE_SMC,
2199         .major = 13,
2200         .minor = 0,
2201         .rev = 0,
2202         .funcs = &smu_ip_funcs,
2203 };
2204
2205 static int smu_load_microcode(void *handle)
2206 {
2207         struct smu_context *smu = handle;
2208         struct amdgpu_device *adev = smu->adev;
2209         int ret = 0;
2210
2211         if (!smu->pm_enabled)
2212                 return -EOPNOTSUPP;
2213
2214         /* This should be used for non PSP loading */
2215         if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP)
2216                 return 0;
2217
2218         if (smu->ppt_funcs->load_microcode) {
2219                 ret = smu->ppt_funcs->load_microcode(smu);
2220                 if (ret) {
2221                         dev_err(adev->dev, "Load microcode failed\n");
2222                         return ret;
2223                 }
2224         }
2225
2226         if (smu->ppt_funcs->check_fw_status) {
2227                 ret = smu->ppt_funcs->check_fw_status(smu);
2228                 if (ret) {
2229                         dev_err(adev->dev, "SMC is not ready\n");
2230                         return ret;
2231                 }
2232         }
2233
2234         return ret;
2235 }
2236
2237 static int smu_set_gfx_cgpg(struct smu_context *smu, bool enabled)
2238 {
2239         int ret = 0;
2240
2241         if (smu->ppt_funcs->set_gfx_cgpg)
2242                 ret = smu->ppt_funcs->set_gfx_cgpg(smu, enabled);
2243
2244         return ret;
2245 }
2246
2247 static int smu_set_fan_speed_rpm(void *handle, uint32_t speed)
2248 {
2249         struct smu_context *smu = handle;
2250         int ret = 0;
2251
2252         if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
2253                 return -EOPNOTSUPP;
2254
2255         if (!smu->ppt_funcs->set_fan_speed_rpm)
2256                 return -EOPNOTSUPP;
2257
2258         if (speed == U32_MAX)
2259                 return -EINVAL;
2260
2261         ret = smu->ppt_funcs->set_fan_speed_rpm(smu, speed);
2262         if (!ret && !(smu->user_dpm_profile.flags & SMU_DPM_USER_PROFILE_RESTORE)) {
2263                 smu->user_dpm_profile.flags |= SMU_CUSTOM_FAN_SPEED_RPM;
2264                 smu->user_dpm_profile.fan_speed_rpm = speed;
2265
2266                 /* Override custom PWM setting as they cannot co-exist */
2267                 smu->user_dpm_profile.flags &= ~SMU_CUSTOM_FAN_SPEED_PWM;
2268                 smu->user_dpm_profile.fan_speed_pwm = 0;
2269         }
2270
2271         return ret;
2272 }
2273
2274 /**
2275  * smu_get_power_limit - Request one of the SMU Power Limits
2276  *
2277  * @handle: pointer to smu context
2278  * @limit: requested limit is written back to this variable
2279  * @pp_limit_level: &pp_power_limit_level which limit of the power to return
2280  * @pp_power_type: &pp_power_type type of power
2281  * Return:  0 on success, <0 on error
2282  *
2283  */
2284 int smu_get_power_limit(void *handle,
2285                         uint32_t *limit,
2286                         enum pp_power_limit_level pp_limit_level,
2287                         enum pp_power_type pp_power_type)
2288 {
2289         struct smu_context *smu = handle;
2290         struct amdgpu_device *adev = smu->adev;
2291         enum smu_ppt_limit_level limit_level;
2292         uint32_t limit_type;
2293         int ret = 0;
2294
2295         if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
2296                 return -EOPNOTSUPP;
2297
2298         switch(pp_power_type) {
2299         case PP_PWR_TYPE_SUSTAINED:
2300                 limit_type = SMU_DEFAULT_PPT_LIMIT;
2301                 break;
2302         case PP_PWR_TYPE_FAST:
2303                 limit_type = SMU_FAST_PPT_LIMIT;
2304                 break;
2305         default:
2306                 return -EOPNOTSUPP;
2307                 break;
2308         }
2309
2310         switch(pp_limit_level){
2311         case PP_PWR_LIMIT_CURRENT:
2312                 limit_level = SMU_PPT_LIMIT_CURRENT;
2313                 break;
2314         case PP_PWR_LIMIT_DEFAULT:
2315                 limit_level = SMU_PPT_LIMIT_DEFAULT;
2316                 break;
2317         case PP_PWR_LIMIT_MAX:
2318                 limit_level = SMU_PPT_LIMIT_MAX;
2319                 break;
2320         case PP_PWR_LIMIT_MIN:
2321         default:
2322                 return -EOPNOTSUPP;
2323                 break;
2324         }
2325
2326         if (limit_type != SMU_DEFAULT_PPT_LIMIT) {
2327                 if (smu->ppt_funcs->get_ppt_limit)
2328                         ret = smu->ppt_funcs->get_ppt_limit(smu, limit, limit_type, limit_level);
2329         } else {
2330                 switch (limit_level) {
2331                 case SMU_PPT_LIMIT_CURRENT:
2332                         switch (adev->ip_versions[MP1_HWIP][0]) {
2333                         case IP_VERSION(13, 0, 2):
2334                         case IP_VERSION(11, 0, 7):
2335                         case IP_VERSION(11, 0, 11):
2336                         case IP_VERSION(11, 0, 12):
2337                         case IP_VERSION(11, 0, 13):
2338                                 ret = smu_get_asic_power_limits(smu,
2339                                                                 &smu->current_power_limit,
2340                                                                 NULL,
2341                                                                 NULL);
2342                                 break;
2343                         default:
2344                                 break;
2345                         }
2346                         *limit = smu->current_power_limit;
2347                         break;
2348                 case SMU_PPT_LIMIT_DEFAULT:
2349                         *limit = smu->default_power_limit;
2350                         break;
2351                 case SMU_PPT_LIMIT_MAX:
2352                         *limit = smu->max_power_limit;
2353                         break;
2354                 default:
2355                         break;
2356                 }
2357         }
2358
2359         return ret;
2360 }
2361
2362 static int smu_set_power_limit(void *handle, uint32_t limit)
2363 {
2364         struct smu_context *smu = handle;
2365         uint32_t limit_type = limit >> 24;
2366         int ret = 0;
2367
2368         if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
2369                 return -EOPNOTSUPP;
2370
2371         limit &= (1<<24)-1;
2372         if (limit_type != SMU_DEFAULT_PPT_LIMIT)
2373                 if (smu->ppt_funcs->set_power_limit)
2374                         return smu->ppt_funcs->set_power_limit(smu, limit_type, limit);
2375
2376         if (limit > smu->max_power_limit) {
2377                 dev_err(smu->adev->dev,
2378                         "New power limit (%d) is over the max allowed %d\n",
2379                         limit, smu->max_power_limit);
2380                 return -EINVAL;
2381         }
2382
2383         if (!limit)
2384                 limit = smu->current_power_limit;
2385
2386         if (smu->ppt_funcs->set_power_limit) {
2387                 ret = smu->ppt_funcs->set_power_limit(smu, limit_type, limit);
2388                 if (!ret && !(smu->user_dpm_profile.flags & SMU_DPM_USER_PROFILE_RESTORE))
2389                         smu->user_dpm_profile.power_limit = limit;
2390         }
2391
2392         return ret;
2393 }
2394
2395 static int smu_print_smuclk_levels(struct smu_context *smu, enum smu_clk_type clk_type, char *buf)
2396 {
2397         int ret = 0;
2398
2399         if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
2400                 return -EOPNOTSUPP;
2401
2402         if (smu->ppt_funcs->print_clk_levels)
2403                 ret = smu->ppt_funcs->print_clk_levels(smu, clk_type, buf);
2404
2405         return ret;
2406 }
2407
2408 static enum smu_clk_type smu_convert_to_smuclk(enum pp_clock_type type)
2409 {
2410         enum smu_clk_type clk_type;
2411
2412         switch (type) {
2413         case PP_SCLK:
2414                 clk_type = SMU_SCLK; break;
2415         case PP_MCLK:
2416                 clk_type = SMU_MCLK; break;
2417         case PP_PCIE:
2418                 clk_type = SMU_PCIE; break;
2419         case PP_SOCCLK:
2420                 clk_type = SMU_SOCCLK; break;
2421         case PP_FCLK:
2422                 clk_type = SMU_FCLK; break;
2423         case PP_DCEFCLK:
2424                 clk_type = SMU_DCEFCLK; break;
2425         case PP_VCLK:
2426                 clk_type = SMU_VCLK; break;
2427         case PP_VCLK1:
2428                 clk_type = SMU_VCLK1; break;
2429         case PP_DCLK:
2430                 clk_type = SMU_DCLK; break;
2431         case PP_DCLK1:
2432                 clk_type = SMU_DCLK1; break;
2433         case OD_SCLK:
2434                 clk_type = SMU_OD_SCLK; break;
2435         case OD_MCLK:
2436                 clk_type = SMU_OD_MCLK; break;
2437         case OD_VDDC_CURVE:
2438                 clk_type = SMU_OD_VDDC_CURVE; break;
2439         case OD_RANGE:
2440                 clk_type = SMU_OD_RANGE; break;
2441         case OD_VDDGFX_OFFSET:
2442                 clk_type = SMU_OD_VDDGFX_OFFSET; break;
2443         case OD_CCLK:
2444                 clk_type = SMU_OD_CCLK; break;
2445         default:
2446                 clk_type = SMU_CLK_COUNT; break;
2447         }
2448
2449         return clk_type;
2450 }
2451
2452 static int smu_print_ppclk_levels(void *handle,
2453                                   enum pp_clock_type type,
2454                                   char *buf)
2455 {
2456         struct smu_context *smu = handle;
2457         enum smu_clk_type clk_type;
2458
2459         clk_type = smu_convert_to_smuclk(type);
2460         if (clk_type == SMU_CLK_COUNT)
2461                 return -EINVAL;
2462
2463         return smu_print_smuclk_levels(smu, clk_type, buf);
2464 }
2465
2466 static int smu_emit_ppclk_levels(void *handle, enum pp_clock_type type, char *buf, int *offset)
2467 {
2468         struct smu_context *smu = handle;
2469         enum smu_clk_type clk_type;
2470
2471         clk_type = smu_convert_to_smuclk(type);
2472         if (clk_type == SMU_CLK_COUNT)
2473                 return -EINVAL;
2474
2475         if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
2476                 return -EOPNOTSUPP;
2477
2478         if (!smu->ppt_funcs->emit_clk_levels)
2479                 return -ENOENT;
2480
2481         return smu->ppt_funcs->emit_clk_levels(smu, clk_type, buf, offset);
2482
2483 }
2484
2485 static int smu_od_edit_dpm_table(void *handle,
2486                                  enum PP_OD_DPM_TABLE_COMMAND type,
2487                                  long *input, uint32_t size)
2488 {
2489         struct smu_context *smu = handle;
2490         int ret = 0;
2491
2492         if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
2493                 return -EOPNOTSUPP;
2494
2495         if (smu->ppt_funcs->od_edit_dpm_table) {
2496                 ret = smu->ppt_funcs->od_edit_dpm_table(smu, type, input, size);
2497         }
2498
2499         return ret;
2500 }
2501
2502 static int smu_read_sensor(void *handle,
2503                            int sensor,
2504                            void *data,
2505                            int *size_arg)
2506 {
2507         struct smu_context *smu = handle;
2508         struct smu_umd_pstate_table *pstate_table =
2509                                 &smu->pstate_table;
2510         int ret = 0;
2511         uint32_t *size, size_val;
2512
2513         if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
2514                 return -EOPNOTSUPP;
2515
2516         if (!data || !size_arg)
2517                 return -EINVAL;
2518
2519         size_val = *size_arg;
2520         size = &size_val;
2521
2522         if (smu->ppt_funcs->read_sensor)
2523                 if (!smu->ppt_funcs->read_sensor(smu, sensor, data, size))
2524                         goto unlock;
2525
2526         switch (sensor) {
2527         case AMDGPU_PP_SENSOR_STABLE_PSTATE_SCLK:
2528                 *((uint32_t *)data) = pstate_table->gfxclk_pstate.standard * 100;
2529                 *size = 4;
2530                 break;
2531         case AMDGPU_PP_SENSOR_STABLE_PSTATE_MCLK:
2532                 *((uint32_t *)data) = pstate_table->uclk_pstate.standard * 100;
2533                 *size = 4;
2534                 break;
2535         case AMDGPU_PP_SENSOR_PEAK_PSTATE_SCLK:
2536                 *((uint32_t *)data) = pstate_table->gfxclk_pstate.peak * 100;
2537                 *size = 4;
2538                 break;
2539         case AMDGPU_PP_SENSOR_PEAK_PSTATE_MCLK:
2540                 *((uint32_t *)data) = pstate_table->uclk_pstate.peak * 100;
2541                 *size = 4;
2542                 break;
2543         case AMDGPU_PP_SENSOR_ENABLED_SMC_FEATURES_MASK:
2544                 ret = smu_feature_get_enabled_mask(smu, (uint64_t *)data);
2545                 *size = 8;
2546                 break;
2547         case AMDGPU_PP_SENSOR_UVD_POWER:
2548                 *(uint32_t *)data = smu_feature_is_enabled(smu, SMU_FEATURE_DPM_UVD_BIT) ? 1 : 0;
2549                 *size = 4;
2550                 break;
2551         case AMDGPU_PP_SENSOR_VCE_POWER:
2552                 *(uint32_t *)data = smu_feature_is_enabled(smu, SMU_FEATURE_DPM_VCE_BIT) ? 1 : 0;
2553                 *size = 4;
2554                 break;
2555         case AMDGPU_PP_SENSOR_VCN_POWER_STATE:
2556                 *(uint32_t *)data = atomic_read(&smu->smu_power.power_gate.vcn_gated) ? 0: 1;
2557                 *size = 4;
2558                 break;
2559         case AMDGPU_PP_SENSOR_MIN_FAN_RPM:
2560                 *(uint32_t *)data = 0;
2561                 *size = 4;
2562                 break;
2563         default:
2564                 *size = 0;
2565                 ret = -EOPNOTSUPP;
2566                 break;
2567         }
2568
2569 unlock:
2570         // assign uint32_t to int
2571         *size_arg = size_val;
2572
2573         return ret;
2574 }
2575
2576 static int smu_get_apu_thermal_limit(void *handle, uint32_t *limit)
2577 {
2578         int ret = -EINVAL;
2579         struct smu_context *smu = handle;
2580
2581         if (smu->ppt_funcs && smu->ppt_funcs->get_apu_thermal_limit)
2582                 ret = smu->ppt_funcs->get_apu_thermal_limit(smu, limit);
2583
2584         return ret;
2585 }
2586
2587 static int smu_set_apu_thermal_limit(void *handle, uint32_t limit)
2588 {
2589         int ret = -EINVAL;
2590         struct smu_context *smu = handle;
2591
2592         if (smu->ppt_funcs && smu->ppt_funcs->set_apu_thermal_limit)
2593                 ret = smu->ppt_funcs->set_apu_thermal_limit(smu, limit);
2594
2595         return ret;
2596 }
2597
2598 static int smu_get_power_profile_mode(void *handle, char *buf)
2599 {
2600         struct smu_context *smu = handle;
2601
2602         if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled ||
2603             !smu->ppt_funcs->get_power_profile_mode)
2604                 return -EOPNOTSUPP;
2605         if (!buf)
2606                 return -EINVAL;
2607
2608         return smu->ppt_funcs->get_power_profile_mode(smu, buf);
2609 }
2610
2611 static int smu_set_power_profile_mode(void *handle,
2612                                       long *param,
2613                                       uint32_t param_size)
2614 {
2615         struct smu_context *smu = handle;
2616
2617         if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled ||
2618             !smu->ppt_funcs->set_power_profile_mode)
2619                 return -EOPNOTSUPP;
2620
2621         return smu_bump_power_profile_mode(smu, param, param_size);
2622 }
2623
2624 static int smu_get_fan_control_mode(void *handle, u32 *fan_mode)
2625 {
2626         struct smu_context *smu = handle;
2627
2628         if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
2629                 return -EOPNOTSUPP;
2630
2631         if (!smu->ppt_funcs->get_fan_control_mode)
2632                 return -EOPNOTSUPP;
2633
2634         if (!fan_mode)
2635                 return -EINVAL;
2636
2637         *fan_mode = smu->ppt_funcs->get_fan_control_mode(smu);
2638
2639         return 0;
2640 }
2641
2642 static int smu_set_fan_control_mode(void *handle, u32 value)
2643 {
2644         struct smu_context *smu = handle;
2645         int ret = 0;
2646
2647         if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
2648                 return -EOPNOTSUPP;
2649
2650         if (!smu->ppt_funcs->set_fan_control_mode)
2651                 return -EOPNOTSUPP;
2652
2653         if (value == U32_MAX)
2654                 return -EINVAL;
2655
2656         ret = smu->ppt_funcs->set_fan_control_mode(smu, value);
2657         if (ret)
2658                 goto out;
2659
2660         if (!(smu->user_dpm_profile.flags & SMU_DPM_USER_PROFILE_RESTORE)) {
2661                 smu->user_dpm_profile.fan_mode = value;
2662
2663                 /* reset user dpm fan speed */
2664                 if (value != AMD_FAN_CTRL_MANUAL) {
2665                         smu->user_dpm_profile.fan_speed_pwm = 0;
2666                         smu->user_dpm_profile.fan_speed_rpm = 0;
2667                         smu->user_dpm_profile.flags &= ~(SMU_CUSTOM_FAN_SPEED_RPM | SMU_CUSTOM_FAN_SPEED_PWM);
2668                 }
2669         }
2670
2671 out:
2672         return ret;
2673 }
2674
2675 static int smu_get_fan_speed_pwm(void *handle, u32 *speed)
2676 {
2677         struct smu_context *smu = handle;
2678         int ret = 0;
2679
2680         if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
2681                 return -EOPNOTSUPP;
2682
2683         if (!smu->ppt_funcs->get_fan_speed_pwm)
2684                 return -EOPNOTSUPP;
2685
2686         if (!speed)
2687                 return -EINVAL;
2688
2689         ret = smu->ppt_funcs->get_fan_speed_pwm(smu, speed);
2690
2691         return ret;
2692 }
2693
2694 static int smu_set_fan_speed_pwm(void *handle, u32 speed)
2695 {
2696         struct smu_context *smu = handle;
2697         int ret = 0;
2698
2699         if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
2700                 return -EOPNOTSUPP;
2701
2702         if (!smu->ppt_funcs->set_fan_speed_pwm)
2703                 return -EOPNOTSUPP;
2704
2705         if (speed == U32_MAX)
2706                 return -EINVAL;
2707
2708         ret = smu->ppt_funcs->set_fan_speed_pwm(smu, speed);
2709         if (!ret && !(smu->user_dpm_profile.flags & SMU_DPM_USER_PROFILE_RESTORE)) {
2710                 smu->user_dpm_profile.flags |= SMU_CUSTOM_FAN_SPEED_PWM;
2711                 smu->user_dpm_profile.fan_speed_pwm = speed;
2712
2713                 /* Override custom RPM setting as they cannot co-exist */
2714                 smu->user_dpm_profile.flags &= ~SMU_CUSTOM_FAN_SPEED_RPM;
2715                 smu->user_dpm_profile.fan_speed_rpm = 0;
2716         }
2717
2718         return ret;
2719 }
2720
2721 static int smu_get_fan_speed_rpm(void *handle, uint32_t *speed)
2722 {
2723         struct smu_context *smu = handle;
2724         int ret = 0;
2725
2726         if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
2727                 return -EOPNOTSUPP;
2728
2729         if (!smu->ppt_funcs->get_fan_speed_rpm)
2730                 return -EOPNOTSUPP;
2731
2732         if (!speed)
2733                 return -EINVAL;
2734
2735         ret = smu->ppt_funcs->get_fan_speed_rpm(smu, speed);
2736
2737         return ret;
2738 }
2739
2740 static int smu_set_deep_sleep_dcefclk(void *handle, uint32_t clk)
2741 {
2742         struct smu_context *smu = handle;
2743
2744         if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
2745                 return -EOPNOTSUPP;
2746
2747         return smu_set_min_dcef_deep_sleep(smu, clk);
2748 }
2749
2750 static int smu_get_clock_by_type_with_latency(void *handle,
2751                                               enum amd_pp_clock_type type,
2752                                               struct pp_clock_levels_with_latency *clocks)
2753 {
2754         struct smu_context *smu = handle;
2755         enum smu_clk_type clk_type;
2756         int ret = 0;
2757
2758         if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
2759                 return -EOPNOTSUPP;
2760
2761         if (smu->ppt_funcs->get_clock_by_type_with_latency) {
2762                 switch (type) {
2763                 case amd_pp_sys_clock:
2764                         clk_type = SMU_GFXCLK;
2765                         break;
2766                 case amd_pp_mem_clock:
2767                         clk_type = SMU_MCLK;
2768                         break;
2769                 case amd_pp_dcef_clock:
2770                         clk_type = SMU_DCEFCLK;
2771                         break;
2772                 case amd_pp_disp_clock:
2773                         clk_type = SMU_DISPCLK;
2774                         break;
2775                 default:
2776                         dev_err(smu->adev->dev, "Invalid clock type!\n");
2777                         return -EINVAL;
2778                 }
2779
2780                 ret = smu->ppt_funcs->get_clock_by_type_with_latency(smu, clk_type, clocks);
2781         }
2782
2783         return ret;
2784 }
2785
2786 static int smu_display_clock_voltage_request(void *handle,
2787                                              struct pp_display_clock_request *clock_req)
2788 {
2789         struct smu_context *smu = handle;
2790         int ret = 0;
2791
2792         if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
2793                 return -EOPNOTSUPP;
2794
2795         if (smu->ppt_funcs->display_clock_voltage_request)
2796                 ret = smu->ppt_funcs->display_clock_voltage_request(smu, clock_req);
2797
2798         return ret;
2799 }
2800
2801
2802 static int smu_display_disable_memory_clock_switch(void *handle,
2803                                                    bool disable_memory_clock_switch)
2804 {
2805         struct smu_context *smu = handle;
2806         int ret = -EINVAL;
2807
2808         if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
2809                 return -EOPNOTSUPP;
2810
2811         if (smu->ppt_funcs->display_disable_memory_clock_switch)
2812                 ret = smu->ppt_funcs->display_disable_memory_clock_switch(smu, disable_memory_clock_switch);
2813
2814         return ret;
2815 }
2816
2817 static int smu_set_xgmi_pstate(void *handle,
2818                                uint32_t pstate)
2819 {
2820         struct smu_context *smu = handle;
2821         int ret = 0;
2822
2823         if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
2824                 return -EOPNOTSUPP;
2825
2826         if (smu->ppt_funcs->set_xgmi_pstate)
2827                 ret = smu->ppt_funcs->set_xgmi_pstate(smu, pstate);
2828
2829         if(ret)
2830                 dev_err(smu->adev->dev, "Failed to set XGMI pstate!\n");
2831
2832         return ret;
2833 }
2834
2835 static int smu_get_baco_capability(void *handle, bool *cap)
2836 {
2837         struct smu_context *smu = handle;
2838
2839         *cap = false;
2840
2841         if (!smu->pm_enabled)
2842                 return 0;
2843
2844         if (smu->ppt_funcs && smu->ppt_funcs->baco_is_support)
2845                 *cap = smu->ppt_funcs->baco_is_support(smu);
2846
2847         return 0;
2848 }
2849
2850 static int smu_baco_set_state(void *handle, int state)
2851 {
2852         struct smu_context *smu = handle;
2853         int ret = 0;
2854
2855         if (!smu->pm_enabled)
2856                 return -EOPNOTSUPP;
2857
2858         if (state == 0) {
2859                 if (smu->ppt_funcs->baco_exit)
2860                         ret = smu->ppt_funcs->baco_exit(smu);
2861         } else if (state == 1) {
2862                 if (smu->ppt_funcs->baco_enter)
2863                         ret = smu->ppt_funcs->baco_enter(smu);
2864         } else {
2865                 return -EINVAL;
2866         }
2867
2868         if (ret)
2869                 dev_err(smu->adev->dev, "Failed to %s BACO state!\n",
2870                                 (state)?"enter":"exit");
2871
2872         return ret;
2873 }
2874
2875 bool smu_mode1_reset_is_support(struct smu_context *smu)
2876 {
2877         bool ret = false;
2878
2879         if (!smu->pm_enabled)
2880                 return false;
2881
2882         if (smu->ppt_funcs && smu->ppt_funcs->mode1_reset_is_support)
2883                 ret = smu->ppt_funcs->mode1_reset_is_support(smu);
2884
2885         return ret;
2886 }
2887
2888 bool smu_mode2_reset_is_support(struct smu_context *smu)
2889 {
2890         bool ret = false;
2891
2892         if (!smu->pm_enabled)
2893                 return false;
2894
2895         if (smu->ppt_funcs && smu->ppt_funcs->mode2_reset_is_support)
2896                 ret = smu->ppt_funcs->mode2_reset_is_support(smu);
2897
2898         return ret;
2899 }
2900
2901 int smu_mode1_reset(struct smu_context *smu)
2902 {
2903         int ret = 0;
2904
2905         if (!smu->pm_enabled)
2906                 return -EOPNOTSUPP;
2907
2908         if (smu->ppt_funcs->mode1_reset)
2909                 ret = smu->ppt_funcs->mode1_reset(smu);
2910
2911         return ret;
2912 }
2913
2914 static int smu_mode2_reset(void *handle)
2915 {
2916         struct smu_context *smu = handle;
2917         int ret = 0;
2918
2919         if (!smu->pm_enabled)
2920                 return -EOPNOTSUPP;
2921
2922         if (smu->ppt_funcs->mode2_reset)
2923                 ret = smu->ppt_funcs->mode2_reset(smu);
2924
2925         if (ret)
2926                 dev_err(smu->adev->dev, "Mode2 reset failed!\n");
2927
2928         return ret;
2929 }
2930
2931 static int smu_enable_gfx_features(void *handle)
2932 {
2933         struct smu_context *smu = handle;
2934         int ret = 0;
2935
2936         if (!smu->pm_enabled)
2937                 return -EOPNOTSUPP;
2938
2939         if (smu->ppt_funcs->enable_gfx_features)
2940                 ret = smu->ppt_funcs->enable_gfx_features(smu);
2941
2942         if (ret)
2943                 dev_err(smu->adev->dev, "enable gfx features failed!\n");
2944
2945         return ret;
2946 }
2947
2948 static int smu_get_max_sustainable_clocks_by_dc(void *handle,
2949                                                 struct pp_smu_nv_clock_table *max_clocks)
2950 {
2951         struct smu_context *smu = handle;
2952         int ret = 0;
2953
2954         if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
2955                 return -EOPNOTSUPP;
2956
2957         if (smu->ppt_funcs->get_max_sustainable_clocks_by_dc)
2958                 ret = smu->ppt_funcs->get_max_sustainable_clocks_by_dc(smu, max_clocks);
2959
2960         return ret;
2961 }
2962
2963 static int smu_get_uclk_dpm_states(void *handle,
2964                                    unsigned int *clock_values_in_khz,
2965                                    unsigned int *num_states)
2966 {
2967         struct smu_context *smu = handle;
2968         int ret = 0;
2969
2970         if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
2971                 return -EOPNOTSUPP;
2972
2973         if (smu->ppt_funcs->get_uclk_dpm_states)
2974                 ret = smu->ppt_funcs->get_uclk_dpm_states(smu, clock_values_in_khz, num_states);
2975
2976         return ret;
2977 }
2978
2979 static enum amd_pm_state_type smu_get_current_power_state(void *handle)
2980 {
2981         struct smu_context *smu = handle;
2982         enum amd_pm_state_type pm_state = POWER_STATE_TYPE_DEFAULT;
2983
2984         if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
2985                 return -EOPNOTSUPP;
2986
2987         if (smu->ppt_funcs->get_current_power_state)
2988                 pm_state = smu->ppt_funcs->get_current_power_state(smu);
2989
2990         return pm_state;
2991 }
2992
2993 static int smu_get_dpm_clock_table(void *handle,
2994                                    struct dpm_clocks *clock_table)
2995 {
2996         struct smu_context *smu = handle;
2997         int ret = 0;
2998
2999         if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
3000                 return -EOPNOTSUPP;
3001
3002         if (smu->ppt_funcs->get_dpm_clock_table)
3003                 ret = smu->ppt_funcs->get_dpm_clock_table(smu, clock_table);
3004
3005         return ret;
3006 }
3007
3008 static ssize_t smu_sys_get_gpu_metrics(void *handle, void **table)
3009 {
3010         struct smu_context *smu = handle;
3011
3012         if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
3013                 return -EOPNOTSUPP;
3014
3015         if (!smu->ppt_funcs->get_gpu_metrics)
3016                 return -EOPNOTSUPP;
3017
3018         return smu->ppt_funcs->get_gpu_metrics(smu, table);
3019 }
3020
3021 static int smu_enable_mgpu_fan_boost(void *handle)
3022 {
3023         struct smu_context *smu = handle;
3024         int ret = 0;
3025
3026         if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
3027                 return -EOPNOTSUPP;
3028
3029         if (smu->ppt_funcs->enable_mgpu_fan_boost)
3030                 ret = smu->ppt_funcs->enable_mgpu_fan_boost(smu);
3031
3032         return ret;
3033 }
3034
3035 static int smu_gfx_state_change_set(void *handle,
3036                                     uint32_t state)
3037 {
3038         struct smu_context *smu = handle;
3039         int ret = 0;
3040
3041         if (smu->ppt_funcs->gfx_state_change_set)
3042                 ret = smu->ppt_funcs->gfx_state_change_set(smu, state);
3043
3044         return ret;
3045 }
3046
3047 int smu_handle_passthrough_sbr(struct smu_context *smu, bool enable)
3048 {
3049         int ret = 0;
3050
3051         if (smu->ppt_funcs->smu_handle_passthrough_sbr)
3052                 ret = smu->ppt_funcs->smu_handle_passthrough_sbr(smu, enable);
3053
3054         return ret;
3055 }
3056
3057 int smu_get_ecc_info(struct smu_context *smu, void *umc_ecc)
3058 {
3059         int ret = -EOPNOTSUPP;
3060
3061         if (smu->ppt_funcs &&
3062                 smu->ppt_funcs->get_ecc_info)
3063                 ret = smu->ppt_funcs->get_ecc_info(smu, umc_ecc);
3064
3065         return ret;
3066
3067 }
3068
3069 static int smu_get_prv_buffer_details(void *handle, void **addr, size_t *size)
3070 {
3071         struct smu_context *smu = handle;
3072         struct smu_table_context *smu_table = &smu->smu_table;
3073         struct smu_table *memory_pool = &smu_table->memory_pool;
3074
3075         if (!addr || !size)
3076                 return -EINVAL;
3077
3078         *addr = NULL;
3079         *size = 0;
3080         if (memory_pool->bo) {
3081                 *addr = memory_pool->cpu_addr;
3082                 *size = memory_pool->size;
3083         }
3084
3085         return 0;
3086 }
3087
3088 static const struct amd_pm_funcs swsmu_pm_funcs = {
3089         /* export for sysfs */
3090         .set_fan_control_mode    = smu_set_fan_control_mode,
3091         .get_fan_control_mode    = smu_get_fan_control_mode,
3092         .set_fan_speed_pwm   = smu_set_fan_speed_pwm,
3093         .get_fan_speed_pwm   = smu_get_fan_speed_pwm,
3094         .force_clock_level       = smu_force_ppclk_levels,
3095         .print_clock_levels      = smu_print_ppclk_levels,
3096         .emit_clock_levels       = smu_emit_ppclk_levels,
3097         .force_performance_level = smu_force_performance_level,
3098         .read_sensor             = smu_read_sensor,
3099         .get_apu_thermal_limit       = smu_get_apu_thermal_limit,
3100         .set_apu_thermal_limit       = smu_set_apu_thermal_limit,
3101         .get_performance_level   = smu_get_performance_level,
3102         .get_current_power_state = smu_get_current_power_state,
3103         .get_fan_speed_rpm       = smu_get_fan_speed_rpm,
3104         .set_fan_speed_rpm       = smu_set_fan_speed_rpm,
3105         .get_pp_num_states       = smu_get_power_num_states,
3106         .get_pp_table            = smu_sys_get_pp_table,
3107         .set_pp_table            = smu_sys_set_pp_table,
3108         .switch_power_profile    = smu_switch_power_profile,
3109         /* export to amdgpu */
3110         .dispatch_tasks          = smu_handle_dpm_task,
3111         .load_firmware           = smu_load_microcode,
3112         .set_powergating_by_smu  = smu_dpm_set_power_gate,
3113         .set_power_limit         = smu_set_power_limit,
3114         .get_power_limit         = smu_get_power_limit,
3115         .get_power_profile_mode  = smu_get_power_profile_mode,
3116         .set_power_profile_mode  = smu_set_power_profile_mode,
3117         .odn_edit_dpm_table      = smu_od_edit_dpm_table,
3118         .set_mp1_state           = smu_set_mp1_state,
3119         .gfx_state_change_set    = smu_gfx_state_change_set,
3120         /* export to DC */
3121         .get_sclk                         = smu_get_sclk,
3122         .get_mclk                         = smu_get_mclk,
3123         .display_configuration_change     = smu_display_configuration_change,
3124         .get_clock_by_type_with_latency   = smu_get_clock_by_type_with_latency,
3125         .display_clock_voltage_request    = smu_display_clock_voltage_request,
3126         .enable_mgpu_fan_boost            = smu_enable_mgpu_fan_boost,
3127         .set_active_display_count         = smu_set_display_count,
3128         .set_min_deep_sleep_dcefclk       = smu_set_deep_sleep_dcefclk,
3129         .get_asic_baco_capability         = smu_get_baco_capability,
3130         .set_asic_baco_state              = smu_baco_set_state,
3131         .get_ppfeature_status             = smu_sys_get_pp_feature_mask,
3132         .set_ppfeature_status             = smu_sys_set_pp_feature_mask,
3133         .asic_reset_mode_2                = smu_mode2_reset,
3134         .asic_reset_enable_gfx_features   = smu_enable_gfx_features,
3135         .set_df_cstate                    = smu_set_df_cstate,
3136         .set_xgmi_pstate                  = smu_set_xgmi_pstate,
3137         .get_gpu_metrics                  = smu_sys_get_gpu_metrics,
3138         .set_watermarks_for_clock_ranges     = smu_set_watermarks_for_clock_ranges,
3139         .display_disable_memory_clock_switch = smu_display_disable_memory_clock_switch,
3140         .get_max_sustainable_clocks_by_dc    = smu_get_max_sustainable_clocks_by_dc,
3141         .get_uclk_dpm_states              = smu_get_uclk_dpm_states,
3142         .get_dpm_clock_table              = smu_get_dpm_clock_table,
3143         .get_smu_prv_buf_details = smu_get_prv_buffer_details,
3144 };
3145
3146 int smu_wait_for_event(struct smu_context *smu, enum smu_event_type event,
3147                        uint64_t event_arg)
3148 {
3149         int ret = -EINVAL;
3150
3151         if (smu->ppt_funcs->wait_for_event)
3152                 ret = smu->ppt_funcs->wait_for_event(smu, event, event_arg);
3153
3154         return ret;
3155 }
3156
3157 int smu_stb_collect_info(struct smu_context *smu, void *buf, uint32_t size)
3158 {
3159
3160         if (!smu->ppt_funcs->stb_collect_info || !smu->stb_context.enabled)
3161                 return -EOPNOTSUPP;
3162
3163         /* Confirm the buffer allocated is of correct size */
3164         if (size != smu->stb_context.stb_buf_size)
3165                 return -EINVAL;
3166
3167         /*
3168          * No need to lock smu mutex as we access STB directly through MMIO
3169          * and not going through SMU messaging route (for now at least).
3170          * For registers access rely on implementation internal locking.
3171          */
3172         return smu->ppt_funcs->stb_collect_info(smu, buf, size);
3173 }
3174
3175 #if defined(CONFIG_DEBUG_FS)
3176
3177 static int smu_stb_debugfs_open(struct inode *inode, struct file *filp)
3178 {
3179         struct amdgpu_device *adev = filp->f_inode->i_private;
3180         struct smu_context *smu = adev->powerplay.pp_handle;
3181         unsigned char *buf;
3182         int r;
3183
3184         buf = kvmalloc_array(smu->stb_context.stb_buf_size, sizeof(*buf), GFP_KERNEL);
3185         if (!buf)
3186                 return -ENOMEM;
3187
3188         r = smu_stb_collect_info(smu, buf, smu->stb_context.stb_buf_size);
3189         if (r)
3190                 goto out;
3191
3192         filp->private_data = buf;
3193
3194         return 0;
3195
3196 out:
3197         kvfree(buf);
3198         return r;
3199 }
3200
3201 static ssize_t smu_stb_debugfs_read(struct file *filp, char __user *buf, size_t size,
3202                                 loff_t *pos)
3203 {
3204         struct amdgpu_device *adev = filp->f_inode->i_private;
3205         struct smu_context *smu = adev->powerplay.pp_handle;
3206
3207
3208         if (!filp->private_data)
3209                 return -EINVAL;
3210
3211         return simple_read_from_buffer(buf,
3212                                        size,
3213                                        pos, filp->private_data,
3214                                        smu->stb_context.stb_buf_size);
3215 }
3216
3217 static int smu_stb_debugfs_release(struct inode *inode, struct file *filp)
3218 {
3219         kvfree(filp->private_data);
3220         filp->private_data = NULL;
3221
3222         return 0;
3223 }
3224
3225 /*
3226  * We have to define not only read method but also
3227  * open and release because .read takes up to PAGE_SIZE
3228  * data each time so and so is invoked multiple times.
3229  *  We allocate the STB buffer in .open and release it
3230  *  in .release
3231  */
3232 static const struct file_operations smu_stb_debugfs_fops = {
3233         .owner = THIS_MODULE,
3234         .open = smu_stb_debugfs_open,
3235         .read = smu_stb_debugfs_read,
3236         .release = smu_stb_debugfs_release,
3237         .llseek = default_llseek,
3238 };
3239
3240 #endif
3241
3242 void amdgpu_smu_stb_debug_fs_init(struct amdgpu_device *adev)
3243 {
3244 #if defined(CONFIG_DEBUG_FS)
3245
3246         struct smu_context *smu = adev->powerplay.pp_handle;
3247
3248         if (!smu || (!smu->stb_context.stb_buf_size))
3249                 return;
3250
3251         debugfs_create_file_size("amdgpu_smu_stb_dump",
3252                             S_IRUSR,
3253                             adev_to_drm(adev)->primary->debugfs_root,
3254                             adev,
3255                             &smu_stb_debugfs_fops,
3256                             smu->stb_context.stb_buf_size);
3257 #endif
3258 }
3259
3260 int smu_send_hbm_bad_pages_num(struct smu_context *smu, uint32_t size)
3261 {
3262         int ret = 0;
3263
3264         if (smu->ppt_funcs && smu->ppt_funcs->send_hbm_bad_pages_num)
3265                 ret = smu->ppt_funcs->send_hbm_bad_pages_num(smu, size);
3266
3267         return ret;
3268 }
3269
3270 int smu_send_hbm_bad_channel_flag(struct smu_context *smu, uint32_t size)
3271 {
3272         int ret = 0;
3273
3274         if (smu->ppt_funcs && smu->ppt_funcs->send_hbm_bad_channel_flag)
3275                 ret = smu->ppt_funcs->send_hbm_bad_channel_flag(smu, size);
3276
3277         return ret;
3278 }
This page took 0.225717 seconds and 4 git commands to generate.