2 * Copyright 2018 Advanced Micro Devices, Inc.
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
24 #include <linux/string.h>
25 #include <linux/acpi.h>
27 #include <drm/drm_probe_helper.h>
28 #include <drm/amdgpu_drm.h>
29 #include "dm_services.h"
31 #include "amdgpu_dm.h"
32 #include "amdgpu_dm_irq.h"
33 #include "amdgpu_pm.h"
34 #include "dm_pp_smu.h"
35 #include "amdgpu_smu.h"
38 bool dm_pp_apply_display_requirements(
39 const struct dc_context *ctx,
40 const struct dm_pp_display_configuration *pp_display_cfg)
42 struct amdgpu_device *adev = ctx->driver_context;
43 struct smu_context *smu = &adev->smu;
46 if (adev->pm.dpm_enabled) {
48 memset(&adev->pm.pm_display_cfg, 0,
49 sizeof(adev->pm.pm_display_cfg));
51 adev->pm.pm_display_cfg.cpu_cc6_disable =
52 pp_display_cfg->cpu_cc6_disable;
54 adev->pm.pm_display_cfg.cpu_pstate_disable =
55 pp_display_cfg->cpu_pstate_disable;
57 adev->pm.pm_display_cfg.cpu_pstate_separation_time =
58 pp_display_cfg->cpu_pstate_separation_time;
60 adev->pm.pm_display_cfg.nb_pstate_switch_disable =
61 pp_display_cfg->nb_pstate_switch_disable;
63 adev->pm.pm_display_cfg.num_display =
64 pp_display_cfg->display_count;
65 adev->pm.pm_display_cfg.num_path_including_non_display =
66 pp_display_cfg->display_count;
68 adev->pm.pm_display_cfg.min_core_set_clock =
69 pp_display_cfg->min_engine_clock_khz/10;
70 adev->pm.pm_display_cfg.min_core_set_clock_in_sr =
71 pp_display_cfg->min_engine_clock_deep_sleep_khz/10;
72 adev->pm.pm_display_cfg.min_mem_set_clock =
73 pp_display_cfg->min_memory_clock_khz/10;
75 adev->pm.pm_display_cfg.min_dcef_deep_sleep_set_clk =
76 pp_display_cfg->min_engine_clock_deep_sleep_khz/10;
77 adev->pm.pm_display_cfg.min_dcef_set_clk =
78 pp_display_cfg->min_dcfclock_khz/10;
80 adev->pm.pm_display_cfg.multi_monitor_in_sync =
81 pp_display_cfg->all_displays_in_sync;
82 adev->pm.pm_display_cfg.min_vblank_time =
83 pp_display_cfg->avail_mclk_switch_time_us;
85 adev->pm.pm_display_cfg.display_clk =
86 pp_display_cfg->disp_clk_khz/10;
88 adev->pm.pm_display_cfg.dce_tolerable_mclk_in_active_latency =
89 pp_display_cfg->avail_mclk_switch_time_in_disp_active_us;
91 adev->pm.pm_display_cfg.crtc_index = pp_display_cfg->crtc_index;
92 adev->pm.pm_display_cfg.line_time_in_us =
93 pp_display_cfg->line_time_in_us;
95 adev->pm.pm_display_cfg.vrefresh = pp_display_cfg->disp_configs[0].v_refresh;
96 adev->pm.pm_display_cfg.crossfire_display_index = -1;
97 adev->pm.pm_display_cfg.min_bus_bandwidth = 0;
99 for (i = 0; i < pp_display_cfg->display_count; i++) {
100 const struct dm_pp_single_disp_config *dc_cfg =
101 &pp_display_cfg->disp_configs[i];
102 adev->pm.pm_display_cfg.displays[i].controller_id = dc_cfg->pipe_idx + 1;
105 if (adev->powerplay.pp_funcs && adev->powerplay.pp_funcs->display_configuration_change)
106 adev->powerplay.pp_funcs->display_configuration_change(
107 adev->powerplay.pp_handle,
108 &adev->pm.pm_display_cfg);
110 smu_display_configuration_change(smu,
111 &adev->pm.pm_display_cfg);
113 amdgpu_pm_compute_clocks(adev);
119 static void get_default_clock_levels(
120 enum dm_pp_clock_type clk_type,
121 struct dm_pp_clock_levels *clks)
123 uint32_t disp_clks_in_khz[6] = {
124 300000, 400000, 496560, 626090, 685720, 757900 };
125 uint32_t sclks_in_khz[6] = {
126 300000, 360000, 423530, 514290, 626090, 720000 };
127 uint32_t mclks_in_khz[2] = { 333000, 800000 };
130 case DM_PP_CLOCK_TYPE_DISPLAY_CLK:
131 clks->num_levels = 6;
132 memmove(clks->clocks_in_khz, disp_clks_in_khz,
133 sizeof(disp_clks_in_khz));
135 case DM_PP_CLOCK_TYPE_ENGINE_CLK:
136 clks->num_levels = 6;
137 memmove(clks->clocks_in_khz, sclks_in_khz,
138 sizeof(sclks_in_khz));
140 case DM_PP_CLOCK_TYPE_MEMORY_CLK:
141 clks->num_levels = 2;
142 memmove(clks->clocks_in_khz, mclks_in_khz,
143 sizeof(mclks_in_khz));
146 clks->num_levels = 0;
151 static enum smu_clk_type dc_to_smu_clock_type(
152 enum dm_pp_clock_type dm_pp_clk_type)
154 #define DCCLK_MAP_SMUCLK(dcclk, smuclk) \
157 static int dc_clk_type_map[] = {
158 DCCLK_MAP_SMUCLK(DM_PP_CLOCK_TYPE_DISPLAY_CLK, SMU_DISPCLK),
159 DCCLK_MAP_SMUCLK(DM_PP_CLOCK_TYPE_ENGINE_CLK, SMU_GFXCLK),
160 DCCLK_MAP_SMUCLK(DM_PP_CLOCK_TYPE_MEMORY_CLK, SMU_MCLK),
161 DCCLK_MAP_SMUCLK(DM_PP_CLOCK_TYPE_DCEFCLK, SMU_DCEFCLK),
162 DCCLK_MAP_SMUCLK(DM_PP_CLOCK_TYPE_SOCCLK, SMU_SOCCLK),
165 return dc_clk_type_map[dm_pp_clk_type];
168 static enum amd_pp_clock_type dc_to_pp_clock_type(
169 enum dm_pp_clock_type dm_pp_clk_type)
171 enum amd_pp_clock_type amd_pp_clk_type = 0;
173 switch (dm_pp_clk_type) {
174 case DM_PP_CLOCK_TYPE_DISPLAY_CLK:
175 amd_pp_clk_type = amd_pp_disp_clock;
177 case DM_PP_CLOCK_TYPE_ENGINE_CLK:
178 amd_pp_clk_type = amd_pp_sys_clock;
180 case DM_PP_CLOCK_TYPE_MEMORY_CLK:
181 amd_pp_clk_type = amd_pp_mem_clock;
183 case DM_PP_CLOCK_TYPE_DCEFCLK:
184 amd_pp_clk_type = amd_pp_dcef_clock;
186 case DM_PP_CLOCK_TYPE_DCFCLK:
187 amd_pp_clk_type = amd_pp_dcf_clock;
189 case DM_PP_CLOCK_TYPE_PIXELCLK:
190 amd_pp_clk_type = amd_pp_pixel_clock;
192 case DM_PP_CLOCK_TYPE_FCLK:
193 amd_pp_clk_type = amd_pp_f_clock;
195 case DM_PP_CLOCK_TYPE_DISPLAYPHYCLK:
196 amd_pp_clk_type = amd_pp_phy_clock;
198 case DM_PP_CLOCK_TYPE_DPPCLK:
199 amd_pp_clk_type = amd_pp_dpp_clock;
202 DRM_ERROR("DM_PPLIB: invalid clock type: %d!\n",
207 return amd_pp_clk_type;
210 static enum dm_pp_clocks_state pp_to_dc_powerlevel_state(
211 enum PP_DAL_POWERLEVEL max_clocks_state)
213 switch (max_clocks_state) {
214 case PP_DAL_POWERLEVEL_0:
215 return DM_PP_CLOCKS_DPM_STATE_LEVEL_0;
216 case PP_DAL_POWERLEVEL_1:
217 return DM_PP_CLOCKS_DPM_STATE_LEVEL_1;
218 case PP_DAL_POWERLEVEL_2:
219 return DM_PP_CLOCKS_DPM_STATE_LEVEL_2;
220 case PP_DAL_POWERLEVEL_3:
221 return DM_PP_CLOCKS_DPM_STATE_LEVEL_3;
222 case PP_DAL_POWERLEVEL_4:
223 return DM_PP_CLOCKS_DPM_STATE_LEVEL_4;
224 case PP_DAL_POWERLEVEL_5:
225 return DM_PP_CLOCKS_DPM_STATE_LEVEL_5;
226 case PP_DAL_POWERLEVEL_6:
227 return DM_PP_CLOCKS_DPM_STATE_LEVEL_6;
228 case PP_DAL_POWERLEVEL_7:
229 return DM_PP_CLOCKS_DPM_STATE_LEVEL_7;
231 DRM_ERROR("DM_PPLIB: invalid powerlevel state: %d!\n",
233 return DM_PP_CLOCKS_STATE_INVALID;
237 static void pp_to_dc_clock_levels(
238 const struct amd_pp_clocks *pp_clks,
239 struct dm_pp_clock_levels *dc_clks,
240 enum dm_pp_clock_type dc_clk_type)
244 if (pp_clks->count > DM_PP_MAX_CLOCK_LEVELS) {
245 DRM_INFO("DM_PPLIB: Warning: %s clock: number of levels %d exceeds maximum of %d!\n",
246 DC_DECODE_PP_CLOCK_TYPE(dc_clk_type),
248 DM_PP_MAX_CLOCK_LEVELS);
250 dc_clks->num_levels = DM_PP_MAX_CLOCK_LEVELS;
252 dc_clks->num_levels = pp_clks->count;
254 DRM_INFO("DM_PPLIB: values for %s clock\n",
255 DC_DECODE_PP_CLOCK_TYPE(dc_clk_type));
257 for (i = 0; i < dc_clks->num_levels; i++) {
258 DRM_INFO("DM_PPLIB:\t %d\n", pp_clks->clock[i]);
259 dc_clks->clocks_in_khz[i] = pp_clks->clock[i];
263 static void pp_to_dc_clock_levels_with_latency(
264 const struct pp_clock_levels_with_latency *pp_clks,
265 struct dm_pp_clock_levels_with_latency *clk_level_info,
266 enum dm_pp_clock_type dc_clk_type)
270 if (pp_clks->num_levels > DM_PP_MAX_CLOCK_LEVELS) {
271 DRM_INFO("DM_PPLIB: Warning: %s clock: number of levels %d exceeds maximum of %d!\n",
272 DC_DECODE_PP_CLOCK_TYPE(dc_clk_type),
274 DM_PP_MAX_CLOCK_LEVELS);
276 clk_level_info->num_levels = DM_PP_MAX_CLOCK_LEVELS;
278 clk_level_info->num_levels = pp_clks->num_levels;
280 DRM_DEBUG("DM_PPLIB: values for %s clock\n",
281 DC_DECODE_PP_CLOCK_TYPE(dc_clk_type));
283 for (i = 0; i < clk_level_info->num_levels; i++) {
284 DRM_DEBUG("DM_PPLIB:\t %d in kHz\n", pp_clks->data[i].clocks_in_khz);
285 clk_level_info->data[i].clocks_in_khz = pp_clks->data[i].clocks_in_khz;
286 clk_level_info->data[i].latency_in_us = pp_clks->data[i].latency_in_us;
290 static void pp_to_dc_clock_levels_with_voltage(
291 const struct pp_clock_levels_with_voltage *pp_clks,
292 struct dm_pp_clock_levels_with_voltage *clk_level_info,
293 enum dm_pp_clock_type dc_clk_type)
297 if (pp_clks->num_levels > DM_PP_MAX_CLOCK_LEVELS) {
298 DRM_INFO("DM_PPLIB: Warning: %s clock: number of levels %d exceeds maximum of %d!\n",
299 DC_DECODE_PP_CLOCK_TYPE(dc_clk_type),
301 DM_PP_MAX_CLOCK_LEVELS);
303 clk_level_info->num_levels = DM_PP_MAX_CLOCK_LEVELS;
305 clk_level_info->num_levels = pp_clks->num_levels;
307 DRM_INFO("DM_PPLIB: values for %s clock\n",
308 DC_DECODE_PP_CLOCK_TYPE(dc_clk_type));
310 for (i = 0; i < clk_level_info->num_levels; i++) {
311 DRM_INFO("DM_PPLIB:\t %d in kHz, %d in mV\n", pp_clks->data[i].clocks_in_khz,
312 pp_clks->data[i].voltage_in_mv);
313 clk_level_info->data[i].clocks_in_khz = pp_clks->data[i].clocks_in_khz;
314 clk_level_info->data[i].voltage_in_mv = pp_clks->data[i].voltage_in_mv;
318 bool dm_pp_get_clock_levels_by_type(
319 const struct dc_context *ctx,
320 enum dm_pp_clock_type clk_type,
321 struct dm_pp_clock_levels *dc_clks)
323 struct amdgpu_device *adev = ctx->driver_context;
324 void *pp_handle = adev->powerplay.pp_handle;
325 struct amd_pp_clocks pp_clks = { 0 };
326 struct amd_pp_simple_clock_info validation_clks = { 0 };
329 if (adev->powerplay.pp_funcs && adev->powerplay.pp_funcs->get_clock_by_type) {
330 if (adev->powerplay.pp_funcs->get_clock_by_type(pp_handle,
331 dc_to_pp_clock_type(clk_type), &pp_clks)) {
332 /* Error in pplib. Provide default values. */
335 } else if (adev->smu.funcs && adev->smu.funcs->get_clock_by_type) {
336 if (smu_get_clock_by_type(&adev->smu,
337 dc_to_smu_clock_type(clk_type),
339 get_default_clock_levels(clk_type, dc_clks);
344 pp_to_dc_clock_levels(&pp_clks, dc_clks, clk_type);
346 if (adev->powerplay.pp_funcs && adev->powerplay.pp_funcs->get_display_mode_validation_clocks) {
347 if (adev->powerplay.pp_funcs->get_display_mode_validation_clocks(
348 pp_handle, &validation_clks)) {
349 /* Error in pplib. Provide default values. */
350 DRM_INFO("DM_PPLIB: Warning: using default validation clocks!\n");
351 validation_clks.engine_max_clock = 72000;
352 validation_clks.memory_max_clock = 80000;
353 validation_clks.level = 0;
355 } else if (adev->smu.funcs && adev->smu.funcs->get_max_high_clocks) {
356 if (smu_get_max_high_clocks(&adev->smu, &validation_clks)) {
357 DRM_INFO("DM_PPLIB: Warning: using default validation clocks!\n");
358 validation_clks.engine_max_clock = 72000;
359 validation_clks.memory_max_clock = 80000;
360 validation_clks.level = 0;
364 DRM_INFO("DM_PPLIB: Validation clocks:\n");
365 DRM_INFO("DM_PPLIB: engine_max_clock: %d\n",
366 validation_clks.engine_max_clock);
367 DRM_INFO("DM_PPLIB: memory_max_clock: %d\n",
368 validation_clks.memory_max_clock);
369 DRM_INFO("DM_PPLIB: level : %d\n",
370 validation_clks.level);
372 /* Translate 10 kHz to kHz. */
373 validation_clks.engine_max_clock *= 10;
374 validation_clks.memory_max_clock *= 10;
376 /* Determine the highest non-boosted level from the Validation Clocks */
377 if (clk_type == DM_PP_CLOCK_TYPE_ENGINE_CLK) {
378 for (i = 0; i < dc_clks->num_levels; i++) {
379 if (dc_clks->clocks_in_khz[i] > validation_clks.engine_max_clock) {
380 /* This clock is higher the validation clock.
381 * Than means the previous one is the highest
382 * non-boosted one. */
383 DRM_INFO("DM_PPLIB: reducing engine clock level from %d to %d\n",
384 dc_clks->num_levels, i);
385 dc_clks->num_levels = i > 0 ? i : 1;
389 } else if (clk_type == DM_PP_CLOCK_TYPE_MEMORY_CLK) {
390 for (i = 0; i < dc_clks->num_levels; i++) {
391 if (dc_clks->clocks_in_khz[i] > validation_clks.memory_max_clock) {
392 DRM_INFO("DM_PPLIB: reducing memory clock level from %d to %d\n",
393 dc_clks->num_levels, i);
394 dc_clks->num_levels = i > 0 ? i : 1;
403 bool dm_pp_get_clock_levels_by_type_with_latency(
404 const struct dc_context *ctx,
405 enum dm_pp_clock_type clk_type,
406 struct dm_pp_clock_levels_with_latency *clk_level_info)
408 struct amdgpu_device *adev = ctx->driver_context;
409 void *pp_handle = adev->powerplay.pp_handle;
410 struct pp_clock_levels_with_latency pp_clks = { 0 };
411 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
414 if (pp_funcs && pp_funcs->get_clock_by_type_with_latency) {
415 ret = pp_funcs->get_clock_by_type_with_latency(pp_handle,
416 dc_to_pp_clock_type(clk_type),
420 } else if (adev->smu.ppt_funcs && adev->smu.ppt_funcs->get_clock_by_type_with_latency) {
421 if (smu_get_clock_by_type_with_latency(&adev->smu,
422 dc_to_pp_clock_type(clk_type),
428 pp_to_dc_clock_levels_with_latency(&pp_clks, clk_level_info, clk_type);
433 bool dm_pp_get_clock_levels_by_type_with_voltage(
434 const struct dc_context *ctx,
435 enum dm_pp_clock_type clk_type,
436 struct dm_pp_clock_levels_with_voltage *clk_level_info)
438 struct amdgpu_device *adev = ctx->driver_context;
439 void *pp_handle = adev->powerplay.pp_handle;
440 struct pp_clock_levels_with_voltage pp_clk_info = {0};
441 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
444 if (pp_funcs && pp_funcs->get_clock_by_type_with_voltage) {
445 ret = pp_funcs->get_clock_by_type_with_voltage(pp_handle,
446 dc_to_pp_clock_type(clk_type),
450 } else if (adev->smu.ppt_funcs && adev->smu.ppt_funcs->get_clock_by_type_with_voltage) {
451 if (smu_get_clock_by_type_with_voltage(&adev->smu,
452 dc_to_pp_clock_type(clk_type),
457 pp_to_dc_clock_levels_with_voltage(&pp_clk_info, clk_level_info, clk_type);
462 bool dm_pp_notify_wm_clock_changes(
463 const struct dc_context *ctx,
464 struct dm_pp_wm_sets_with_clock_ranges *wm_with_clock_ranges)
466 /* TODO: to be implemented */
470 bool dm_pp_apply_power_level_change_request(
471 const struct dc_context *ctx,
472 struct dm_pp_power_level_change_request *level_change_req)
474 /* TODO: to be implemented */
478 bool dm_pp_apply_clock_for_voltage_request(
479 const struct dc_context *ctx,
480 struct dm_pp_clock_for_voltage_req *clock_for_voltage_req)
482 struct amdgpu_device *adev = ctx->driver_context;
483 struct pp_display_clock_request pp_clock_request = {0};
486 pp_clock_request.clock_type = dc_to_pp_clock_type(clock_for_voltage_req->clk_type);
487 pp_clock_request.clock_freq_in_khz = clock_for_voltage_req->clocks_in_khz;
489 if (!pp_clock_request.clock_type)
492 if (adev->powerplay.pp_funcs && adev->powerplay.pp_funcs->display_clock_voltage_request)
493 ret = adev->powerplay.pp_funcs->display_clock_voltage_request(
494 adev->powerplay.pp_handle,
496 else if (adev->smu.funcs &&
497 adev->smu.funcs->display_clock_voltage_request)
498 ret = smu_display_clock_voltage_request(&adev->smu,
505 bool dm_pp_get_static_clocks(
506 const struct dc_context *ctx,
507 struct dm_pp_static_clock_info *static_clk_info)
509 struct amdgpu_device *adev = ctx->driver_context;
510 struct amd_pp_clock_info pp_clk_info = {0};
513 if (adev->powerplay.pp_funcs && adev->powerplay.pp_funcs->get_current_clocks)
514 ret = adev->powerplay.pp_funcs->get_current_clocks(
515 adev->powerplay.pp_handle,
517 else if (adev->smu.funcs)
518 ret = smu_get_current_clocks(&adev->smu, &pp_clk_info);
522 static_clk_info->max_clocks_state = pp_to_dc_powerlevel_state(pp_clk_info.max_clocks_state);
523 static_clk_info->max_mclk_khz = pp_clk_info.max_memory_clock * 10;
524 static_clk_info->max_sclk_khz = pp_clk_info.max_engine_clock * 10;
529 void pp_rv_set_wm_ranges(struct pp_smu *pp,
530 struct pp_smu_wm_range_sets *ranges)
532 const struct dc_context *ctx = pp->dm;
533 struct amdgpu_device *adev = ctx->driver_context;
534 void *pp_handle = adev->powerplay.pp_handle;
535 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
536 struct dm_pp_wm_sets_with_clock_ranges_soc15 wm_with_clock_ranges;
537 struct dm_pp_clock_range_for_dmif_wm_set_soc15 *wm_dce_clocks = wm_with_clock_ranges.wm_dmif_clocks_ranges;
538 struct dm_pp_clock_range_for_mcif_wm_set_soc15 *wm_soc_clocks = wm_with_clock_ranges.wm_mcif_clocks_ranges;
541 wm_with_clock_ranges.num_wm_dmif_sets = ranges->num_reader_wm_sets;
542 wm_with_clock_ranges.num_wm_mcif_sets = ranges->num_writer_wm_sets;
544 for (i = 0; i < wm_with_clock_ranges.num_wm_dmif_sets; i++) {
545 if (ranges->reader_wm_sets[i].wm_inst > 3)
546 wm_dce_clocks[i].wm_set_id = WM_SET_A;
548 wm_dce_clocks[i].wm_set_id =
549 ranges->reader_wm_sets[i].wm_inst;
550 wm_dce_clocks[i].wm_max_dcfclk_clk_in_khz =
551 ranges->reader_wm_sets[i].max_drain_clk_mhz * 1000;
552 wm_dce_clocks[i].wm_min_dcfclk_clk_in_khz =
553 ranges->reader_wm_sets[i].min_drain_clk_mhz * 1000;
554 wm_dce_clocks[i].wm_max_mem_clk_in_khz =
555 ranges->reader_wm_sets[i].max_fill_clk_mhz * 1000;
556 wm_dce_clocks[i].wm_min_mem_clk_in_khz =
557 ranges->reader_wm_sets[i].min_fill_clk_mhz * 1000;
560 for (i = 0; i < wm_with_clock_ranges.num_wm_mcif_sets; i++) {
561 if (ranges->writer_wm_sets[i].wm_inst > 3)
562 wm_soc_clocks[i].wm_set_id = WM_SET_A;
564 wm_soc_clocks[i].wm_set_id =
565 ranges->writer_wm_sets[i].wm_inst;
566 wm_soc_clocks[i].wm_max_socclk_clk_in_khz =
567 ranges->writer_wm_sets[i].max_fill_clk_mhz * 1000;
568 wm_soc_clocks[i].wm_min_socclk_clk_in_khz =
569 ranges->writer_wm_sets[i].min_fill_clk_mhz * 1000;
570 wm_soc_clocks[i].wm_max_mem_clk_in_khz =
571 ranges->writer_wm_sets[i].max_drain_clk_mhz * 1000;
572 wm_soc_clocks[i].wm_min_mem_clk_in_khz =
573 ranges->writer_wm_sets[i].min_drain_clk_mhz * 1000;
576 if (pp_funcs && pp_funcs->set_watermarks_for_clocks_ranges)
577 pp_funcs->set_watermarks_for_clocks_ranges(pp_handle,
578 &wm_with_clock_ranges);
579 else if (adev->smu.funcs &&
580 adev->smu.funcs->set_watermarks_for_clock_ranges)
581 smu_set_watermarks_for_clock_ranges(&adev->smu,
582 &wm_with_clock_ranges);
585 void pp_rv_set_pme_wa_enable(struct pp_smu *pp)
587 const struct dc_context *ctx = pp->dm;
588 struct amdgpu_device *adev = ctx->driver_context;
589 void *pp_handle = adev->powerplay.pp_handle;
590 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
592 if (pp_funcs && pp_funcs->notify_smu_enable_pwe)
593 pp_funcs->notify_smu_enable_pwe(pp_handle);
594 else if (adev->smu.funcs)
595 smu_notify_smu_enable_pwe(&adev->smu);
598 void pp_rv_set_active_display_count(struct pp_smu *pp, int count)
600 const struct dc_context *ctx = pp->dm;
601 struct amdgpu_device *adev = ctx->driver_context;
602 void *pp_handle = adev->powerplay.pp_handle;
603 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
605 if (!pp_funcs || !pp_funcs->set_active_display_count)
608 pp_funcs->set_active_display_count(pp_handle, count);
611 void pp_rv_set_min_deep_sleep_dcfclk(struct pp_smu *pp, int clock)
613 const struct dc_context *ctx = pp->dm;
614 struct amdgpu_device *adev = ctx->driver_context;
615 void *pp_handle = adev->powerplay.pp_handle;
616 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
618 if (!pp_funcs || !pp_funcs->set_min_deep_sleep_dcefclk)
621 pp_funcs->set_min_deep_sleep_dcefclk(pp_handle, clock);
624 void pp_rv_set_hard_min_dcefclk_by_freq(struct pp_smu *pp, int clock)
626 const struct dc_context *ctx = pp->dm;
627 struct amdgpu_device *adev = ctx->driver_context;
628 void *pp_handle = adev->powerplay.pp_handle;
629 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
631 if (!pp_funcs || !pp_funcs->set_hard_min_dcefclk_by_freq)
634 pp_funcs->set_hard_min_dcefclk_by_freq(pp_handle, clock);
637 void pp_rv_set_hard_min_fclk_by_freq(struct pp_smu *pp, int mhz)
639 const struct dc_context *ctx = pp->dm;
640 struct amdgpu_device *adev = ctx->driver_context;
641 void *pp_handle = adev->powerplay.pp_handle;
642 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
644 if (!pp_funcs || !pp_funcs->set_hard_min_fclk_by_freq)
647 pp_funcs->set_hard_min_fclk_by_freq(pp_handle, mhz);
650 enum pp_smu_status pp_nv_set_wm_ranges(struct pp_smu *pp,
651 struct pp_smu_wm_range_sets *ranges)
653 const struct dc_context *ctx = pp->dm;
654 struct amdgpu_device *adev = ctx->driver_context;
655 struct smu_context *smu = &adev->smu;
656 struct dm_pp_wm_sets_with_clock_ranges_soc15 wm_with_clock_ranges;
657 struct dm_pp_clock_range_for_dmif_wm_set_soc15 *wm_dce_clocks =
658 wm_with_clock_ranges.wm_dmif_clocks_ranges;
659 struct dm_pp_clock_range_for_mcif_wm_set_soc15 *wm_soc_clocks =
660 wm_with_clock_ranges.wm_mcif_clocks_ranges;
663 wm_with_clock_ranges.num_wm_dmif_sets = ranges->num_reader_wm_sets;
664 wm_with_clock_ranges.num_wm_mcif_sets = ranges->num_writer_wm_sets;
666 for (i = 0; i < wm_with_clock_ranges.num_wm_dmif_sets; i++) {
667 if (ranges->reader_wm_sets[i].wm_inst > 3)
668 wm_dce_clocks[i].wm_set_id = WM_SET_A;
670 wm_dce_clocks[i].wm_set_id =
671 ranges->reader_wm_sets[i].wm_inst;
672 wm_dce_clocks[i].wm_max_dcfclk_clk_in_khz =
673 ranges->reader_wm_sets[i].max_drain_clk_mhz * 1000;
674 wm_dce_clocks[i].wm_min_dcfclk_clk_in_khz =
675 ranges->reader_wm_sets[i].min_drain_clk_mhz * 1000;
676 wm_dce_clocks[i].wm_max_mem_clk_in_khz =
677 ranges->reader_wm_sets[i].max_fill_clk_mhz * 1000;
678 wm_dce_clocks[i].wm_min_mem_clk_in_khz =
679 ranges->reader_wm_sets[i].min_fill_clk_mhz * 1000;
682 for (i = 0; i < wm_with_clock_ranges.num_wm_mcif_sets; i++) {
683 if (ranges->writer_wm_sets[i].wm_inst > 3)
684 wm_soc_clocks[i].wm_set_id = WM_SET_A;
686 wm_soc_clocks[i].wm_set_id =
687 ranges->writer_wm_sets[i].wm_inst;
688 wm_soc_clocks[i].wm_max_socclk_clk_in_khz =
689 ranges->writer_wm_sets[i].max_fill_clk_mhz * 1000;
690 wm_soc_clocks[i].wm_min_socclk_clk_in_khz =
691 ranges->writer_wm_sets[i].min_fill_clk_mhz * 1000;
692 wm_soc_clocks[i].wm_max_mem_clk_in_khz =
693 ranges->writer_wm_sets[i].max_drain_clk_mhz * 1000;
694 wm_soc_clocks[i].wm_min_mem_clk_in_khz =
695 ranges->writer_wm_sets[i].min_drain_clk_mhz * 1000;
699 return PP_SMU_RESULT_UNSUPPORTED;
701 /* 0: successful or smu.funcs->set_watermarks_for_clock_ranges = NULL;
704 if (smu_set_watermarks_for_clock_ranges(&adev->smu,
705 &wm_with_clock_ranges))
706 return PP_SMU_RESULT_UNSUPPORTED;
708 return PP_SMU_RESULT_OK;
711 enum pp_smu_status pp_nv_set_pme_wa_enable(struct pp_smu *pp)
713 const struct dc_context *ctx = pp->dm;
714 struct amdgpu_device *adev = ctx->driver_context;
715 struct smu_context *smu = &adev->smu;
718 return PP_SMU_RESULT_UNSUPPORTED;
720 /* 0: successful or smu.funcs->set_azalia_d3_pme = NULL; 1: fail */
721 if (smu_set_azalia_d3_pme(smu))
722 return PP_SMU_RESULT_FAIL;
724 return PP_SMU_RESULT_OK;
727 enum pp_smu_status pp_nv_set_display_count(struct pp_smu *pp, int count)
729 const struct dc_context *ctx = pp->dm;
730 struct amdgpu_device *adev = ctx->driver_context;
731 struct smu_context *smu = &adev->smu;
734 return PP_SMU_RESULT_UNSUPPORTED;
736 /* 0: successful or smu.funcs->set_display_count = NULL; 1: fail */
737 if (smu_set_display_count(smu, count))
738 return PP_SMU_RESULT_FAIL;
740 return PP_SMU_RESULT_OK;
743 enum pp_smu_status pp_nv_set_min_deep_sleep_dcfclk(struct pp_smu *pp, int mhz)
745 const struct dc_context *ctx = pp->dm;
746 struct amdgpu_device *adev = ctx->driver_context;
747 struct smu_context *smu = &adev->smu;
750 return PP_SMU_RESULT_UNSUPPORTED;
752 /* 0: successful or smu.funcs->set_deep_sleep_dcefclk = NULL;1: fail */
753 if (smu_set_deep_sleep_dcefclk(smu, mhz))
754 return PP_SMU_RESULT_FAIL;
756 return PP_SMU_RESULT_OK;
759 enum pp_smu_status pp_nv_set_hard_min_dcefclk_by_freq(
760 struct pp_smu *pp, int mhz)
762 const struct dc_context *ctx = pp->dm;
763 struct amdgpu_device *adev = ctx->driver_context;
764 struct smu_context *smu = &adev->smu;
765 struct pp_display_clock_request clock_req;
768 return PP_SMU_RESULT_UNSUPPORTED;
770 clock_req.clock_type = amd_pp_dcef_clock;
771 clock_req.clock_freq_in_khz = mhz * 1000;
773 /* 0: successful or smu.funcs->display_clock_voltage_request = NULL
776 if (smu_display_clock_voltage_request(smu, &clock_req))
777 return PP_SMU_RESULT_FAIL;
779 return PP_SMU_RESULT_OK;
782 enum pp_smu_status pp_nv_set_hard_min_uclk_by_freq(struct pp_smu *pp, int mhz)
784 const struct dc_context *ctx = pp->dm;
785 struct amdgpu_device *adev = ctx->driver_context;
786 struct smu_context *smu = &adev->smu;
787 struct pp_display_clock_request clock_req;
790 return PP_SMU_RESULT_UNSUPPORTED;
792 clock_req.clock_type = amd_pp_mem_clock;
793 clock_req.clock_freq_in_khz = mhz * 1000;
795 /* 0: successful or smu.funcs->display_clock_voltage_request = NULL
798 if (smu_display_clock_voltage_request(smu, &clock_req))
799 return PP_SMU_RESULT_FAIL;
801 return PP_SMU_RESULT_OK;
804 enum pp_smu_status pp_nv_set_voltage_by_freq(struct pp_smu *pp,
805 enum pp_smu_nv_clock_id clock_id, int mhz)
807 const struct dc_context *ctx = pp->dm;
808 struct amdgpu_device *adev = ctx->driver_context;
809 struct smu_context *smu = &adev->smu;
810 struct pp_display_clock_request clock_req;
813 return PP_SMU_RESULT_UNSUPPORTED;
816 case PP_SMU_NV_DISPCLK:
817 clock_req.clock_type = amd_pp_disp_clock;
819 case PP_SMU_NV_PHYCLK:
820 clock_req.clock_type = amd_pp_phy_clock;
822 case PP_SMU_NV_PIXELCLK:
823 clock_req.clock_type = amd_pp_pixel_clock;
828 clock_req.clock_freq_in_khz = mhz * 1000;
830 /* 0: successful or smu.funcs->display_clock_voltage_request = NULL
833 if (smu_display_clock_voltage_request(smu, &clock_req))
834 return PP_SMU_RESULT_FAIL;
836 return PP_SMU_RESULT_OK;
839 enum pp_smu_status pp_nv_get_maximum_sustainable_clocks(
840 struct pp_smu *pp, struct pp_smu_nv_clock_table *max_clocks)
842 const struct dc_context *ctx = pp->dm;
843 struct amdgpu_device *adev = ctx->driver_context;
844 struct smu_context *smu = &adev->smu;
847 return PP_SMU_RESULT_UNSUPPORTED;
849 if (!smu->funcs->get_max_sustainable_clocks_by_dc)
850 return PP_SMU_RESULT_UNSUPPORTED;
852 if (!smu->funcs->get_max_sustainable_clocks_by_dc(smu, max_clocks))
853 return PP_SMU_RESULT_OK;
855 return PP_SMU_RESULT_FAIL;
858 enum pp_smu_status pp_nv_get_uclk_dpm_states(struct pp_smu *pp,
859 unsigned int *clock_values_in_khz, unsigned int *num_states)
861 const struct dc_context *ctx = pp->dm;
862 struct amdgpu_device *adev = ctx->driver_context;
863 struct smu_context *smu = &adev->smu;
866 return PP_SMU_RESULT_UNSUPPORTED;
868 if (!smu->ppt_funcs->get_uclk_dpm_states)
869 return PP_SMU_RESULT_UNSUPPORTED;
871 if (!smu->ppt_funcs->get_uclk_dpm_states(smu,
872 clock_values_in_khz, num_states))
873 return PP_SMU_RESULT_OK;
875 return PP_SMU_RESULT_FAIL;
878 void dm_pp_get_funcs(
879 struct dc_context *ctx,
880 struct pp_smu_funcs *funcs)
882 switch (ctx->dce_version) {
883 case DCN_VERSION_1_0:
884 case DCN_VERSION_1_01:
885 funcs->ctx.ver = PP_SMU_VER_RV;
886 funcs->rv_funcs.pp_smu.dm = ctx;
887 funcs->rv_funcs.set_wm_ranges = pp_rv_set_wm_ranges;
888 funcs->rv_funcs.set_pme_wa_enable = pp_rv_set_pme_wa_enable;
889 funcs->rv_funcs.set_display_count =
890 pp_rv_set_active_display_count;
891 funcs->rv_funcs.set_min_deep_sleep_dcfclk =
892 pp_rv_set_min_deep_sleep_dcfclk;
893 funcs->rv_funcs.set_hard_min_dcfclk_by_freq =
894 pp_rv_set_hard_min_dcefclk_by_freq;
895 funcs->rv_funcs.set_hard_min_fclk_by_freq =
896 pp_rv_set_hard_min_fclk_by_freq;
898 #ifdef CONFIG_DRM_AMD_DC_DCN2_0
899 case DCN_VERSION_2_0:
900 funcs->ctx.ver = PP_SMU_VER_NV;
901 funcs->nv_funcs.pp_smu.dm = ctx;
902 funcs->nv_funcs.set_display_count = pp_nv_set_display_count;
903 funcs->nv_funcs.set_hard_min_dcfclk_by_freq =
904 pp_nv_set_hard_min_dcefclk_by_freq;
905 funcs->nv_funcs.set_min_deep_sleep_dcfclk =
906 pp_nv_set_min_deep_sleep_dcfclk;
907 funcs->nv_funcs.set_voltage_by_freq =
908 pp_nv_set_voltage_by_freq;
909 funcs->nv_funcs.set_wm_ranges = pp_nv_set_wm_ranges;
911 /* todo set_pme_wa_enable cause 4k@6ohz display not light up */
912 funcs->nv_funcs.set_pme_wa_enable = NULL;
913 /* todo debug waring message */
914 funcs->nv_funcs.set_hard_min_uclk_by_freq = pp_nv_set_hard_min_uclk_by_freq;
915 /* todo compare data with window driver*/
916 funcs->nv_funcs.get_maximum_sustainable_clocks = pp_nv_get_maximum_sustainable_clocks;
917 /*todo compare data with window driver */
918 funcs->nv_funcs.get_uclk_dpm_states = pp_nv_get_uclk_dpm_states;
922 DRM_ERROR("smu version is not supported !\n");