]> Git Repo - linux.git/blob - drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_pp_smu.c
Merge tag 'arm-dt-5.14' of git://git.kernel.org/pub/scm/linux/kernel/git/soc/soc
[linux.git] / drivers / gpu / drm / amd / display / amdgpu_dm / amdgpu_dm_pp_smu.c
1 /*
2  * Copyright 2018 Advanced Micro Devices, Inc.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20  * OTHER DEALINGS IN THE SOFTWARE.
21  *
22  * Authors: AMD
23  */
24 #include <linux/string.h>
25 #include <linux/acpi.h>
26
27 #include <drm/drm_probe_helper.h>
28 #include <drm/amdgpu_drm.h>
29 #include "dm_services.h"
30 #include "amdgpu.h"
31 #include "amdgpu_dm.h"
32 #include "amdgpu_dm_irq.h"
33 #include "amdgpu_pm.h"
34 #include "dm_pp_smu.h"
35
36 bool dm_pp_apply_display_requirements(
37                 const struct dc_context *ctx,
38                 const struct dm_pp_display_configuration *pp_display_cfg)
39 {
40         struct amdgpu_device *adev = ctx->driver_context;
41         int i;
42
43         if (adev->pm.dpm_enabled) {
44
45                 memset(&adev->pm.pm_display_cfg, 0,
46                                 sizeof(adev->pm.pm_display_cfg));
47
48                 adev->pm.pm_display_cfg.cpu_cc6_disable =
49                         pp_display_cfg->cpu_cc6_disable;
50
51                 adev->pm.pm_display_cfg.cpu_pstate_disable =
52                         pp_display_cfg->cpu_pstate_disable;
53
54                 adev->pm.pm_display_cfg.cpu_pstate_separation_time =
55                         pp_display_cfg->cpu_pstate_separation_time;
56
57                 adev->pm.pm_display_cfg.nb_pstate_switch_disable =
58                         pp_display_cfg->nb_pstate_switch_disable;
59
60                 adev->pm.pm_display_cfg.num_display =
61                                 pp_display_cfg->display_count;
62                 adev->pm.pm_display_cfg.num_path_including_non_display =
63                                 pp_display_cfg->display_count;
64
65                 adev->pm.pm_display_cfg.min_core_set_clock =
66                                 pp_display_cfg->min_engine_clock_khz/10;
67                 adev->pm.pm_display_cfg.min_core_set_clock_in_sr =
68                                 pp_display_cfg->min_engine_clock_deep_sleep_khz/10;
69                 adev->pm.pm_display_cfg.min_mem_set_clock =
70                                 pp_display_cfg->min_memory_clock_khz/10;
71
72                 adev->pm.pm_display_cfg.min_dcef_deep_sleep_set_clk =
73                                 pp_display_cfg->min_engine_clock_deep_sleep_khz/10;
74                 adev->pm.pm_display_cfg.min_dcef_set_clk =
75                                 pp_display_cfg->min_dcfclock_khz/10;
76
77                 adev->pm.pm_display_cfg.multi_monitor_in_sync =
78                                 pp_display_cfg->all_displays_in_sync;
79                 adev->pm.pm_display_cfg.min_vblank_time =
80                                 pp_display_cfg->avail_mclk_switch_time_us;
81
82                 adev->pm.pm_display_cfg.display_clk =
83                                 pp_display_cfg->disp_clk_khz/10;
84
85                 adev->pm.pm_display_cfg.dce_tolerable_mclk_in_active_latency =
86                                 pp_display_cfg->avail_mclk_switch_time_in_disp_active_us;
87
88                 adev->pm.pm_display_cfg.crtc_index = pp_display_cfg->crtc_index;
89                 adev->pm.pm_display_cfg.line_time_in_us =
90                                 pp_display_cfg->line_time_in_us;
91
92                 adev->pm.pm_display_cfg.vrefresh = pp_display_cfg->disp_configs[0].v_refresh;
93                 adev->pm.pm_display_cfg.crossfire_display_index = -1;
94                 adev->pm.pm_display_cfg.min_bus_bandwidth = 0;
95
96                 for (i = 0; i < pp_display_cfg->display_count; i++) {
97                         const struct dm_pp_single_disp_config *dc_cfg =
98                                                 &pp_display_cfg->disp_configs[i];
99                         adev->pm.pm_display_cfg.displays[i].controller_id = dc_cfg->pipe_idx + 1;
100                 }
101
102                 if (adev->powerplay.pp_funcs && adev->powerplay.pp_funcs->display_configuration_change)
103                         adev->powerplay.pp_funcs->display_configuration_change(
104                                 adev->powerplay.pp_handle,
105                                 &adev->pm.pm_display_cfg);
106
107                 amdgpu_pm_compute_clocks(adev);
108         }
109
110         return true;
111 }
112
113 static void get_default_clock_levels(
114                 enum dm_pp_clock_type clk_type,
115                 struct dm_pp_clock_levels *clks)
116 {
117         uint32_t disp_clks_in_khz[6] = {
118                         300000, 400000, 496560, 626090, 685720, 757900 };
119         uint32_t sclks_in_khz[6] = {
120                         300000, 360000, 423530, 514290, 626090, 720000 };
121         uint32_t mclks_in_khz[2] = { 333000, 800000 };
122
123         switch (clk_type) {
124         case DM_PP_CLOCK_TYPE_DISPLAY_CLK:
125                 clks->num_levels = 6;
126                 memmove(clks->clocks_in_khz, disp_clks_in_khz,
127                                 sizeof(disp_clks_in_khz));
128                 break;
129         case DM_PP_CLOCK_TYPE_ENGINE_CLK:
130                 clks->num_levels = 6;
131                 memmove(clks->clocks_in_khz, sclks_in_khz,
132                                 sizeof(sclks_in_khz));
133                 break;
134         case DM_PP_CLOCK_TYPE_MEMORY_CLK:
135                 clks->num_levels = 2;
136                 memmove(clks->clocks_in_khz, mclks_in_khz,
137                                 sizeof(mclks_in_khz));
138                 break;
139         default:
140                 clks->num_levels = 0;
141                 break;
142         }
143 }
144
145 static enum amd_pp_clock_type dc_to_pp_clock_type(
146                 enum dm_pp_clock_type dm_pp_clk_type)
147 {
148         enum amd_pp_clock_type amd_pp_clk_type = 0;
149
150         switch (dm_pp_clk_type) {
151         case DM_PP_CLOCK_TYPE_DISPLAY_CLK:
152                 amd_pp_clk_type = amd_pp_disp_clock;
153                 break;
154         case DM_PP_CLOCK_TYPE_ENGINE_CLK:
155                 amd_pp_clk_type = amd_pp_sys_clock;
156                 break;
157         case DM_PP_CLOCK_TYPE_MEMORY_CLK:
158                 amd_pp_clk_type = amd_pp_mem_clock;
159                 break;
160         case DM_PP_CLOCK_TYPE_DCEFCLK:
161                 amd_pp_clk_type  = amd_pp_dcef_clock;
162                 break;
163         case DM_PP_CLOCK_TYPE_DCFCLK:
164                 amd_pp_clk_type = amd_pp_dcf_clock;
165                 break;
166         case DM_PP_CLOCK_TYPE_PIXELCLK:
167                 amd_pp_clk_type = amd_pp_pixel_clock;
168                 break;
169         case DM_PP_CLOCK_TYPE_FCLK:
170                 amd_pp_clk_type = amd_pp_f_clock;
171                 break;
172         case DM_PP_CLOCK_TYPE_DISPLAYPHYCLK:
173                 amd_pp_clk_type = amd_pp_phy_clock;
174                 break;
175         case DM_PP_CLOCK_TYPE_DPPCLK:
176                 amd_pp_clk_type = amd_pp_dpp_clock;
177                 break;
178         default:
179                 DRM_ERROR("DM_PPLIB: invalid clock type: %d!\n",
180                                 dm_pp_clk_type);
181                 break;
182         }
183
184         return amd_pp_clk_type;
185 }
186
187 static enum dm_pp_clocks_state pp_to_dc_powerlevel_state(
188                         enum PP_DAL_POWERLEVEL max_clocks_state)
189 {
190         switch (max_clocks_state) {
191         case PP_DAL_POWERLEVEL_0:
192                 return DM_PP_CLOCKS_DPM_STATE_LEVEL_0;
193         case PP_DAL_POWERLEVEL_1:
194                 return DM_PP_CLOCKS_DPM_STATE_LEVEL_1;
195         case PP_DAL_POWERLEVEL_2:
196                 return DM_PP_CLOCKS_DPM_STATE_LEVEL_2;
197         case PP_DAL_POWERLEVEL_3:
198                 return DM_PP_CLOCKS_DPM_STATE_LEVEL_3;
199         case PP_DAL_POWERLEVEL_4:
200                 return DM_PP_CLOCKS_DPM_STATE_LEVEL_4;
201         case PP_DAL_POWERLEVEL_5:
202                 return DM_PP_CLOCKS_DPM_STATE_LEVEL_5;
203         case PP_DAL_POWERLEVEL_6:
204                 return DM_PP_CLOCKS_DPM_STATE_LEVEL_6;
205         case PP_DAL_POWERLEVEL_7:
206                 return DM_PP_CLOCKS_DPM_STATE_LEVEL_7;
207         default:
208                 DRM_ERROR("DM_PPLIB: invalid powerlevel state: %d!\n",
209                                 max_clocks_state);
210                 return DM_PP_CLOCKS_STATE_INVALID;
211         }
212 }
213
214 static void pp_to_dc_clock_levels(
215                 const struct amd_pp_clocks *pp_clks,
216                 struct dm_pp_clock_levels *dc_clks,
217                 enum dm_pp_clock_type dc_clk_type)
218 {
219         uint32_t i;
220
221         if (pp_clks->count > DM_PP_MAX_CLOCK_LEVELS) {
222                 DRM_INFO("DM_PPLIB: Warning: %s clock: number of levels %d exceeds maximum of %d!\n",
223                                 DC_DECODE_PP_CLOCK_TYPE(dc_clk_type),
224                                 pp_clks->count,
225                                 DM_PP_MAX_CLOCK_LEVELS);
226
227                 dc_clks->num_levels = DM_PP_MAX_CLOCK_LEVELS;
228         } else
229                 dc_clks->num_levels = pp_clks->count;
230
231         DRM_INFO("DM_PPLIB: values for %s clock\n",
232                         DC_DECODE_PP_CLOCK_TYPE(dc_clk_type));
233
234         for (i = 0; i < dc_clks->num_levels; i++) {
235                 DRM_INFO("DM_PPLIB:\t %d\n", pp_clks->clock[i]);
236                 dc_clks->clocks_in_khz[i] = pp_clks->clock[i];
237         }
238 }
239
240 static void pp_to_dc_clock_levels_with_latency(
241                 const struct pp_clock_levels_with_latency *pp_clks,
242                 struct dm_pp_clock_levels_with_latency *clk_level_info,
243                 enum dm_pp_clock_type dc_clk_type)
244 {
245         uint32_t i;
246
247         if (pp_clks->num_levels > DM_PP_MAX_CLOCK_LEVELS) {
248                 DRM_INFO("DM_PPLIB: Warning: %s clock: number of levels %d exceeds maximum of %d!\n",
249                                 DC_DECODE_PP_CLOCK_TYPE(dc_clk_type),
250                                 pp_clks->num_levels,
251                                 DM_PP_MAX_CLOCK_LEVELS);
252
253                 clk_level_info->num_levels = DM_PP_MAX_CLOCK_LEVELS;
254         } else
255                 clk_level_info->num_levels = pp_clks->num_levels;
256
257         DRM_DEBUG("DM_PPLIB: values for %s clock\n",
258                         DC_DECODE_PP_CLOCK_TYPE(dc_clk_type));
259
260         for (i = 0; i < clk_level_info->num_levels; i++) {
261                 DRM_DEBUG("DM_PPLIB:\t %d in kHz\n", pp_clks->data[i].clocks_in_khz);
262                 clk_level_info->data[i].clocks_in_khz = pp_clks->data[i].clocks_in_khz;
263                 clk_level_info->data[i].latency_in_us = pp_clks->data[i].latency_in_us;
264         }
265 }
266
267 static void pp_to_dc_clock_levels_with_voltage(
268                 const struct pp_clock_levels_with_voltage *pp_clks,
269                 struct dm_pp_clock_levels_with_voltage *clk_level_info,
270                 enum dm_pp_clock_type dc_clk_type)
271 {
272         uint32_t i;
273
274         if (pp_clks->num_levels > DM_PP_MAX_CLOCK_LEVELS) {
275                 DRM_INFO("DM_PPLIB: Warning: %s clock: number of levels %d exceeds maximum of %d!\n",
276                                 DC_DECODE_PP_CLOCK_TYPE(dc_clk_type),
277                                 pp_clks->num_levels,
278                                 DM_PP_MAX_CLOCK_LEVELS);
279
280                 clk_level_info->num_levels = DM_PP_MAX_CLOCK_LEVELS;
281         } else
282                 clk_level_info->num_levels = pp_clks->num_levels;
283
284         DRM_INFO("DM_PPLIB: values for %s clock\n",
285                         DC_DECODE_PP_CLOCK_TYPE(dc_clk_type));
286
287         for (i = 0; i < clk_level_info->num_levels; i++) {
288                 DRM_INFO("DM_PPLIB:\t %d in kHz, %d in mV\n", pp_clks->data[i].clocks_in_khz,
289                          pp_clks->data[i].voltage_in_mv);
290                 clk_level_info->data[i].clocks_in_khz = pp_clks->data[i].clocks_in_khz;
291                 clk_level_info->data[i].voltage_in_mv = pp_clks->data[i].voltage_in_mv;
292         }
293 }
294
295 bool dm_pp_get_clock_levels_by_type(
296                 const struct dc_context *ctx,
297                 enum dm_pp_clock_type clk_type,
298                 struct dm_pp_clock_levels *dc_clks)
299 {
300         struct amdgpu_device *adev = ctx->driver_context;
301         void *pp_handle = adev->powerplay.pp_handle;
302         struct amd_pp_clocks pp_clks = { 0 };
303         struct amd_pp_simple_clock_info validation_clks = { 0 };
304         uint32_t i;
305
306         if (adev->powerplay.pp_funcs && adev->powerplay.pp_funcs->get_clock_by_type) {
307                 if (adev->powerplay.pp_funcs->get_clock_by_type(pp_handle,
308                         dc_to_pp_clock_type(clk_type), &pp_clks)) {
309                         /* Error in pplib. Provide default values. */
310                         get_default_clock_levels(clk_type, dc_clks);
311                         return true;
312                 }
313         }
314
315         pp_to_dc_clock_levels(&pp_clks, dc_clks, clk_type);
316
317         if (adev->powerplay.pp_funcs && adev->powerplay.pp_funcs->get_display_mode_validation_clocks) {
318                 if (adev->powerplay.pp_funcs->get_display_mode_validation_clocks(
319                                                 pp_handle, &validation_clks)) {
320                         /* Error in pplib. Provide default values. */
321                         DRM_INFO("DM_PPLIB: Warning: using default validation clocks!\n");
322                         validation_clks.engine_max_clock = 72000;
323                         validation_clks.memory_max_clock = 80000;
324                         validation_clks.level = 0;
325                 }
326         }
327
328         DRM_INFO("DM_PPLIB: Validation clocks:\n");
329         DRM_INFO("DM_PPLIB:    engine_max_clock: %d\n",
330                         validation_clks.engine_max_clock);
331         DRM_INFO("DM_PPLIB:    memory_max_clock: %d\n",
332                         validation_clks.memory_max_clock);
333         DRM_INFO("DM_PPLIB:    level           : %d\n",
334                         validation_clks.level);
335
336         /* Translate 10 kHz to kHz. */
337         validation_clks.engine_max_clock *= 10;
338         validation_clks.memory_max_clock *= 10;
339
340         /* Determine the highest non-boosted level from the Validation Clocks */
341         if (clk_type == DM_PP_CLOCK_TYPE_ENGINE_CLK) {
342                 for (i = 0; i < dc_clks->num_levels; i++) {
343                         if (dc_clks->clocks_in_khz[i] > validation_clks.engine_max_clock) {
344                                 /* This clock is higher the validation clock.
345                                  * Than means the previous one is the highest
346                                  * non-boosted one. */
347                                 DRM_INFO("DM_PPLIB: reducing engine clock level from %d to %d\n",
348                                                 dc_clks->num_levels, i);
349                                 dc_clks->num_levels = i > 0 ? i : 1;
350                                 break;
351                         }
352                 }
353         } else if (clk_type == DM_PP_CLOCK_TYPE_MEMORY_CLK) {
354                 for (i = 0; i < dc_clks->num_levels; i++) {
355                         if (dc_clks->clocks_in_khz[i] > validation_clks.memory_max_clock) {
356                                 DRM_INFO("DM_PPLIB: reducing memory clock level from %d to %d\n",
357                                                 dc_clks->num_levels, i);
358                                 dc_clks->num_levels = i > 0 ? i : 1;
359                                 break;
360                         }
361                 }
362         }
363
364         return true;
365 }
366
367 bool dm_pp_get_clock_levels_by_type_with_latency(
368         const struct dc_context *ctx,
369         enum dm_pp_clock_type clk_type,
370         struct dm_pp_clock_levels_with_latency *clk_level_info)
371 {
372         struct amdgpu_device *adev = ctx->driver_context;
373         void *pp_handle = adev->powerplay.pp_handle;
374         struct pp_clock_levels_with_latency pp_clks = { 0 };
375         const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
376         int ret;
377
378         if (pp_funcs && pp_funcs->get_clock_by_type_with_latency) {
379                 ret = pp_funcs->get_clock_by_type_with_latency(pp_handle,
380                                                 dc_to_pp_clock_type(clk_type),
381                                                 &pp_clks);
382                 if (ret)
383                         return false;
384         }
385
386         pp_to_dc_clock_levels_with_latency(&pp_clks, clk_level_info, clk_type);
387
388         return true;
389 }
390
391 bool dm_pp_get_clock_levels_by_type_with_voltage(
392         const struct dc_context *ctx,
393         enum dm_pp_clock_type clk_type,
394         struct dm_pp_clock_levels_with_voltage *clk_level_info)
395 {
396         struct amdgpu_device *adev = ctx->driver_context;
397         void *pp_handle = adev->powerplay.pp_handle;
398         struct pp_clock_levels_with_voltage pp_clk_info = {0};
399         const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
400         int ret;
401
402         if (pp_funcs && pp_funcs->get_clock_by_type_with_voltage) {
403                 ret = pp_funcs->get_clock_by_type_with_voltage(pp_handle,
404                                                 dc_to_pp_clock_type(clk_type),
405                                                 &pp_clk_info);
406                 if (ret)
407                         return false;
408         }
409
410         pp_to_dc_clock_levels_with_voltage(&pp_clk_info, clk_level_info, clk_type);
411
412         return true;
413 }
414
415 bool dm_pp_notify_wm_clock_changes(
416         const struct dc_context *ctx,
417         struct dm_pp_wm_sets_with_clock_ranges *wm_with_clock_ranges)
418 {
419         struct amdgpu_device *adev = ctx->driver_context;
420         void *pp_handle = adev->powerplay.pp_handle;
421         const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
422
423         /*
424          * Limit this watermark setting for Polaris for now
425          * TODO: expand this to other ASICs
426          */
427         if ((adev->asic_type >= CHIP_POLARIS10) && (adev->asic_type <= CHIP_VEGAM)
428              && pp_funcs && pp_funcs->set_watermarks_for_clocks_ranges) {
429                 if (!pp_funcs->set_watermarks_for_clocks_ranges(pp_handle,
430                                                 (void *)wm_with_clock_ranges))
431                         return true;
432         }
433
434         return false;
435 }
436
437 bool dm_pp_apply_power_level_change_request(
438         const struct dc_context *ctx,
439         struct dm_pp_power_level_change_request *level_change_req)
440 {
441         /* TODO: to be implemented */
442         return false;
443 }
444
445 bool dm_pp_apply_clock_for_voltage_request(
446         const struct dc_context *ctx,
447         struct dm_pp_clock_for_voltage_req *clock_for_voltage_req)
448 {
449         struct amdgpu_device *adev = ctx->driver_context;
450         struct pp_display_clock_request pp_clock_request = {0};
451         int ret = 0;
452
453         pp_clock_request.clock_type = dc_to_pp_clock_type(clock_for_voltage_req->clk_type);
454         pp_clock_request.clock_freq_in_khz = clock_for_voltage_req->clocks_in_khz;
455
456         if (!pp_clock_request.clock_type)
457                 return false;
458
459         if (adev->powerplay.pp_funcs && adev->powerplay.pp_funcs->display_clock_voltage_request)
460                 ret = adev->powerplay.pp_funcs->display_clock_voltage_request(
461                         adev->powerplay.pp_handle,
462                         &pp_clock_request);
463         if (ret)
464                 return false;
465         return true;
466 }
467
468 bool dm_pp_get_static_clocks(
469         const struct dc_context *ctx,
470         struct dm_pp_static_clock_info *static_clk_info)
471 {
472         struct amdgpu_device *adev = ctx->driver_context;
473         struct amd_pp_clock_info pp_clk_info = {0};
474         int ret = 0;
475
476         if (adev->powerplay.pp_funcs && adev->powerplay.pp_funcs->get_current_clocks)
477                 ret = adev->powerplay.pp_funcs->get_current_clocks(
478                         adev->powerplay.pp_handle,
479                         &pp_clk_info);
480         else
481                 return false;
482         if (ret)
483                 return false;
484
485         static_clk_info->max_clocks_state = pp_to_dc_powerlevel_state(pp_clk_info.max_clocks_state);
486         static_clk_info->max_mclk_khz = pp_clk_info.max_memory_clock * 10;
487         static_clk_info->max_sclk_khz = pp_clk_info.max_engine_clock * 10;
488
489         return true;
490 }
491
492 static void pp_rv_set_wm_ranges(struct pp_smu *pp,
493                 struct pp_smu_wm_range_sets *ranges)
494 {
495         const struct dc_context *ctx = pp->dm;
496         struct amdgpu_device *adev = ctx->driver_context;
497         void *pp_handle = adev->powerplay.pp_handle;
498         const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
499         struct dm_pp_wm_sets_with_clock_ranges_soc15 wm_with_clock_ranges;
500         struct dm_pp_clock_range_for_dmif_wm_set_soc15 *wm_dce_clocks = wm_with_clock_ranges.wm_dmif_clocks_ranges;
501         struct dm_pp_clock_range_for_mcif_wm_set_soc15 *wm_soc_clocks = wm_with_clock_ranges.wm_mcif_clocks_ranges;
502         int32_t i;
503
504         wm_with_clock_ranges.num_wm_dmif_sets = ranges->num_reader_wm_sets;
505         wm_with_clock_ranges.num_wm_mcif_sets = ranges->num_writer_wm_sets;
506
507         for (i = 0; i < wm_with_clock_ranges.num_wm_dmif_sets; i++) {
508                 if (ranges->reader_wm_sets[i].wm_inst > 3)
509                         wm_dce_clocks[i].wm_set_id = WM_SET_A;
510                 else
511                         wm_dce_clocks[i].wm_set_id =
512                                         ranges->reader_wm_sets[i].wm_inst;
513                 wm_dce_clocks[i].wm_max_dcfclk_clk_in_khz =
514                                 ranges->reader_wm_sets[i].max_drain_clk_mhz * 1000;
515                 wm_dce_clocks[i].wm_min_dcfclk_clk_in_khz =
516                                 ranges->reader_wm_sets[i].min_drain_clk_mhz * 1000;
517                 wm_dce_clocks[i].wm_max_mem_clk_in_khz =
518                                 ranges->reader_wm_sets[i].max_fill_clk_mhz * 1000;
519                 wm_dce_clocks[i].wm_min_mem_clk_in_khz =
520                                 ranges->reader_wm_sets[i].min_fill_clk_mhz * 1000;
521         }
522
523         for (i = 0; i < wm_with_clock_ranges.num_wm_mcif_sets; i++) {
524                 if (ranges->writer_wm_sets[i].wm_inst > 3)
525                         wm_soc_clocks[i].wm_set_id = WM_SET_A;
526                 else
527                         wm_soc_clocks[i].wm_set_id =
528                                         ranges->writer_wm_sets[i].wm_inst;
529                 wm_soc_clocks[i].wm_max_socclk_clk_in_khz =
530                                 ranges->writer_wm_sets[i].max_fill_clk_mhz * 1000;
531                 wm_soc_clocks[i].wm_min_socclk_clk_in_khz =
532                                 ranges->writer_wm_sets[i].min_fill_clk_mhz * 1000;
533                 wm_soc_clocks[i].wm_max_mem_clk_in_khz =
534                                 ranges->writer_wm_sets[i].max_drain_clk_mhz * 1000;
535                 wm_soc_clocks[i].wm_min_mem_clk_in_khz =
536                                 ranges->writer_wm_sets[i].min_drain_clk_mhz * 1000;
537         }
538
539         if (pp_funcs && pp_funcs->set_watermarks_for_clocks_ranges)
540                 pp_funcs->set_watermarks_for_clocks_ranges(pp_handle,
541                                                            &wm_with_clock_ranges);
542 }
543
544 static void pp_rv_set_pme_wa_enable(struct pp_smu *pp)
545 {
546         const struct dc_context *ctx = pp->dm;
547         struct amdgpu_device *adev = ctx->driver_context;
548         void *pp_handle = adev->powerplay.pp_handle;
549         const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
550
551         if (pp_funcs && pp_funcs->notify_smu_enable_pwe)
552                 pp_funcs->notify_smu_enable_pwe(pp_handle);
553 }
554
555 static void pp_rv_set_active_display_count(struct pp_smu *pp, int count)
556 {
557         const struct dc_context *ctx = pp->dm;
558         struct amdgpu_device *adev = ctx->driver_context;
559         void *pp_handle = adev->powerplay.pp_handle;
560         const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
561
562         if (!pp_funcs || !pp_funcs->set_active_display_count)
563                 return;
564
565         pp_funcs->set_active_display_count(pp_handle, count);
566 }
567
568 static void pp_rv_set_min_deep_sleep_dcfclk(struct pp_smu *pp, int clock)
569 {
570         const struct dc_context *ctx = pp->dm;
571         struct amdgpu_device *adev = ctx->driver_context;
572         void *pp_handle = adev->powerplay.pp_handle;
573         const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
574
575         if (!pp_funcs || !pp_funcs->set_min_deep_sleep_dcefclk)
576                 return;
577
578         pp_funcs->set_min_deep_sleep_dcefclk(pp_handle, clock);
579 }
580
581 static void pp_rv_set_hard_min_dcefclk_by_freq(struct pp_smu *pp, int clock)
582 {
583         const struct dc_context *ctx = pp->dm;
584         struct amdgpu_device *adev = ctx->driver_context;
585         void *pp_handle = adev->powerplay.pp_handle;
586         const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
587
588         if (!pp_funcs || !pp_funcs->set_hard_min_dcefclk_by_freq)
589                 return;
590
591         pp_funcs->set_hard_min_dcefclk_by_freq(pp_handle, clock);
592 }
593
594 static void pp_rv_set_hard_min_fclk_by_freq(struct pp_smu *pp, int mhz)
595 {
596         const struct dc_context *ctx = pp->dm;
597         struct amdgpu_device *adev = ctx->driver_context;
598         void *pp_handle = adev->powerplay.pp_handle;
599         const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
600
601         if (!pp_funcs || !pp_funcs->set_hard_min_fclk_by_freq)
602                 return;
603
604         pp_funcs->set_hard_min_fclk_by_freq(pp_handle, mhz);
605 }
606
607 static enum pp_smu_status pp_nv_set_wm_ranges(struct pp_smu *pp,
608                 struct pp_smu_wm_range_sets *ranges)
609 {
610         const struct dc_context *ctx = pp->dm;
611         struct amdgpu_device *adev = ctx->driver_context;
612         void *pp_handle = adev->powerplay.pp_handle;
613         const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
614
615         if (pp_funcs && pp_funcs->set_watermarks_for_clocks_ranges)
616                 pp_funcs->set_watermarks_for_clocks_ranges(pp_handle, ranges);
617
618         return PP_SMU_RESULT_OK;
619 }
620
621 static enum pp_smu_status pp_nv_set_display_count(struct pp_smu *pp, int count)
622 {
623         const struct dc_context *ctx = pp->dm;
624         struct amdgpu_device *adev = ctx->driver_context;
625         void *pp_handle = adev->powerplay.pp_handle;
626         const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
627
628         if (!pp_funcs || !pp_funcs->set_active_display_count)
629                 return PP_SMU_RESULT_UNSUPPORTED;
630
631         /* 0: successful or smu.ppt_funcs->set_display_count = NULL;  1: fail */
632         if (pp_funcs->set_active_display_count(pp_handle, count))
633                 return PP_SMU_RESULT_FAIL;
634
635         return PP_SMU_RESULT_OK;
636 }
637
638 static enum pp_smu_status
639 pp_nv_set_min_deep_sleep_dcfclk(struct pp_smu *pp, int mhz)
640 {
641         const struct dc_context *ctx = pp->dm;
642         struct amdgpu_device *adev = ctx->driver_context;
643         void *pp_handle = adev->powerplay.pp_handle;
644         const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
645
646         if (!pp_funcs || !pp_funcs->set_min_deep_sleep_dcefclk)
647                 return PP_SMU_RESULT_UNSUPPORTED;
648
649         /* 0: successful or smu.ppt_funcs->set_deep_sleep_dcefclk = NULL;1: fail */
650         if (pp_funcs->set_min_deep_sleep_dcefclk(pp_handle, mhz))
651                 return PP_SMU_RESULT_FAIL;
652
653         return PP_SMU_RESULT_OK;
654 }
655
656 static enum pp_smu_status pp_nv_set_hard_min_dcefclk_by_freq(
657                 struct pp_smu *pp, int mhz)
658 {
659         const struct dc_context *ctx = pp->dm;
660         struct amdgpu_device *adev = ctx->driver_context;
661         void *pp_handle = adev->powerplay.pp_handle;
662         const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
663         struct pp_display_clock_request clock_req;
664
665         if (!pp_funcs || !pp_funcs->display_clock_voltage_request)
666                 return PP_SMU_RESULT_UNSUPPORTED;
667
668         clock_req.clock_type = amd_pp_dcef_clock;
669         clock_req.clock_freq_in_khz = mhz * 1000;
670
671         /* 0: successful or smu.ppt_funcs->display_clock_voltage_request = NULL
672          * 1: fail
673          */
674         if (pp_funcs->display_clock_voltage_request(pp_handle, &clock_req))
675                 return PP_SMU_RESULT_FAIL;
676
677         return PP_SMU_RESULT_OK;
678 }
679
680 static enum pp_smu_status
681 pp_nv_set_hard_min_uclk_by_freq(struct pp_smu *pp, int mhz)
682 {
683         const struct dc_context *ctx = pp->dm;
684         struct amdgpu_device *adev = ctx->driver_context;
685         void *pp_handle = adev->powerplay.pp_handle;
686         const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
687         struct pp_display_clock_request clock_req;
688
689         if (!pp_funcs || !pp_funcs->display_clock_voltage_request)
690                 return PP_SMU_RESULT_UNSUPPORTED;
691
692         clock_req.clock_type = amd_pp_mem_clock;
693         clock_req.clock_freq_in_khz = mhz * 1000;
694
695         /* 0: successful or smu.ppt_funcs->display_clock_voltage_request = NULL
696          * 1: fail
697          */
698         if (pp_funcs->display_clock_voltage_request(pp_handle, &clock_req))
699                 return PP_SMU_RESULT_FAIL;
700
701         return PP_SMU_RESULT_OK;
702 }
703
704 static enum pp_smu_status pp_nv_set_pstate_handshake_support(
705         struct pp_smu *pp, bool pstate_handshake_supported)
706 {
707         const struct dc_context *ctx = pp->dm;
708         struct amdgpu_device *adev = ctx->driver_context;
709         void *pp_handle = adev->powerplay.pp_handle;
710         const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
711
712         if (pp_funcs && pp_funcs->display_disable_memory_clock_switch) {
713                 if (pp_funcs->display_disable_memory_clock_switch(pp_handle,
714                                                                   !pstate_handshake_supported))
715                         return PP_SMU_RESULT_FAIL;
716         }
717
718         return PP_SMU_RESULT_OK;
719 }
720
721 static enum pp_smu_status pp_nv_set_voltage_by_freq(struct pp_smu *pp,
722                 enum pp_smu_nv_clock_id clock_id, int mhz)
723 {
724         const struct dc_context *ctx = pp->dm;
725         struct amdgpu_device *adev = ctx->driver_context;
726         void *pp_handle = adev->powerplay.pp_handle;
727         const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
728         struct pp_display_clock_request clock_req;
729
730         if (!pp_funcs || !pp_funcs->display_clock_voltage_request)
731                 return PP_SMU_RESULT_UNSUPPORTED;
732
733         switch (clock_id) {
734         case PP_SMU_NV_DISPCLK:
735                 clock_req.clock_type = amd_pp_disp_clock;
736                 break;
737         case PP_SMU_NV_PHYCLK:
738                 clock_req.clock_type = amd_pp_phy_clock;
739                 break;
740         case PP_SMU_NV_PIXELCLK:
741                 clock_req.clock_type = amd_pp_pixel_clock;
742                 break;
743         default:
744                 break;
745         }
746         clock_req.clock_freq_in_khz = mhz * 1000;
747
748         /* 0: successful or smu.ppt_funcs->display_clock_voltage_request = NULL
749          * 1: fail
750          */
751         if (pp_funcs->display_clock_voltage_request(pp_handle, &clock_req))
752                 return PP_SMU_RESULT_FAIL;
753
754         return PP_SMU_RESULT_OK;
755 }
756
757 static enum pp_smu_status pp_nv_get_maximum_sustainable_clocks(
758                 struct pp_smu *pp, struct pp_smu_nv_clock_table *max_clocks)
759 {
760         const struct dc_context *ctx = pp->dm;
761         struct amdgpu_device *adev = ctx->driver_context;
762         void *pp_handle = adev->powerplay.pp_handle;
763         const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
764
765         if (!pp_funcs || !pp_funcs->get_max_sustainable_clocks_by_dc)
766                 return PP_SMU_RESULT_UNSUPPORTED;
767
768         if (!pp_funcs->get_max_sustainable_clocks_by_dc(pp_handle, max_clocks))
769                 return PP_SMU_RESULT_OK;
770
771         return PP_SMU_RESULT_FAIL;
772 }
773
774 static enum pp_smu_status pp_nv_get_uclk_dpm_states(struct pp_smu *pp,
775                 unsigned int *clock_values_in_khz, unsigned int *num_states)
776 {
777         const struct dc_context *ctx = pp->dm;
778         struct amdgpu_device *adev = ctx->driver_context;
779         void *pp_handle = adev->powerplay.pp_handle;
780         const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
781
782         if (!pp_funcs || !pp_funcs->get_uclk_dpm_states)
783                 return PP_SMU_RESULT_UNSUPPORTED;
784
785         if (!pp_funcs->get_uclk_dpm_states(pp_handle,
786                                            clock_values_in_khz,
787                                            num_states))
788                 return PP_SMU_RESULT_OK;
789
790         return PP_SMU_RESULT_FAIL;
791 }
792
793 static enum pp_smu_status pp_rn_get_dpm_clock_table(
794                 struct pp_smu *pp, struct dpm_clocks *clock_table)
795 {
796         const struct dc_context *ctx = pp->dm;
797         struct amdgpu_device *adev = ctx->driver_context;
798         void *pp_handle = adev->powerplay.pp_handle;
799         const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
800
801         if (!pp_funcs || !pp_funcs->get_dpm_clock_table)
802                 return PP_SMU_RESULT_UNSUPPORTED;
803
804         if (!pp_funcs->get_dpm_clock_table(pp_handle, clock_table))
805                 return PP_SMU_RESULT_OK;
806
807         return PP_SMU_RESULT_FAIL;
808 }
809
810 static enum pp_smu_status pp_rn_set_wm_ranges(struct pp_smu *pp,
811                 struct pp_smu_wm_range_sets *ranges)
812 {
813         const struct dc_context *ctx = pp->dm;
814         struct amdgpu_device *adev = ctx->driver_context;
815         void *pp_handle = adev->powerplay.pp_handle;
816         const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
817
818         if (pp_funcs && pp_funcs->set_watermarks_for_clocks_ranges)
819                 pp_funcs->set_watermarks_for_clocks_ranges(pp_handle, ranges);
820
821         return PP_SMU_RESULT_OK;
822 }
823
824 void dm_pp_get_funcs(
825                 struct dc_context *ctx,
826                 struct pp_smu_funcs *funcs)
827 {
828         switch (ctx->dce_version) {
829         case DCN_VERSION_1_0:
830         case DCN_VERSION_1_01:
831                 funcs->ctx.ver = PP_SMU_VER_RV;
832                 funcs->rv_funcs.pp_smu.dm = ctx;
833                 funcs->rv_funcs.set_wm_ranges = pp_rv_set_wm_ranges;
834                 funcs->rv_funcs.set_pme_wa_enable = pp_rv_set_pme_wa_enable;
835                 funcs->rv_funcs.set_display_count =
836                                 pp_rv_set_active_display_count;
837                 funcs->rv_funcs.set_min_deep_sleep_dcfclk =
838                                 pp_rv_set_min_deep_sleep_dcfclk;
839                 funcs->rv_funcs.set_hard_min_dcfclk_by_freq =
840                                 pp_rv_set_hard_min_dcefclk_by_freq;
841                 funcs->rv_funcs.set_hard_min_fclk_by_freq =
842                                 pp_rv_set_hard_min_fclk_by_freq;
843                 break;
844         case DCN_VERSION_2_0:
845                 funcs->ctx.ver = PP_SMU_VER_NV;
846                 funcs->nv_funcs.pp_smu.dm = ctx;
847                 funcs->nv_funcs.set_display_count = pp_nv_set_display_count;
848                 funcs->nv_funcs.set_hard_min_dcfclk_by_freq =
849                                 pp_nv_set_hard_min_dcefclk_by_freq;
850                 funcs->nv_funcs.set_min_deep_sleep_dcfclk =
851                                 pp_nv_set_min_deep_sleep_dcfclk;
852                 funcs->nv_funcs.set_voltage_by_freq =
853                                 pp_nv_set_voltage_by_freq;
854                 funcs->nv_funcs.set_wm_ranges = pp_nv_set_wm_ranges;
855
856                 /* todo set_pme_wa_enable cause 4k@6ohz display not light up */
857                 funcs->nv_funcs.set_pme_wa_enable = NULL;
858                 /* todo debug waring message */
859                 funcs->nv_funcs.set_hard_min_uclk_by_freq = pp_nv_set_hard_min_uclk_by_freq;
860                 /* todo  compare data with window driver*/
861                 funcs->nv_funcs.get_maximum_sustainable_clocks = pp_nv_get_maximum_sustainable_clocks;
862                 /*todo  compare data with window driver */
863                 funcs->nv_funcs.get_uclk_dpm_states = pp_nv_get_uclk_dpm_states;
864                 funcs->nv_funcs.set_pstate_handshake_support = pp_nv_set_pstate_handshake_support;
865                 break;
866
867         case DCN_VERSION_2_1:
868                 funcs->ctx.ver = PP_SMU_VER_RN;
869                 funcs->rn_funcs.pp_smu.dm = ctx;
870                 funcs->rn_funcs.set_wm_ranges = pp_rn_set_wm_ranges;
871                 funcs->rn_funcs.get_dpm_clock_table = pp_rn_get_dpm_clock_table;
872                 break;
873         default:
874                 DRM_ERROR("smu version is not supported !\n");
875                 break;
876         }
877 }
This page took 0.090362 seconds and 4 git commands to generate.