2 * Copyright 2018 Advanced Micro Devices, Inc.
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
24 #include <linux/string.h>
25 #include <linux/acpi.h>
27 #include <drm/drm_probe_helper.h>
28 #include <drm/amdgpu_drm.h>
29 #include "dm_services.h"
31 #include "amdgpu_dm.h"
32 #include "amdgpu_dm_irq.h"
33 #include "amdgpu_pm.h"
34 #include "dm_pp_smu.h"
36 bool dm_pp_apply_display_requirements(
37 const struct dc_context *ctx,
38 const struct dm_pp_display_configuration *pp_display_cfg)
40 struct amdgpu_device *adev = ctx->driver_context;
43 if (adev->pm.dpm_enabled) {
45 memset(&adev->pm.pm_display_cfg, 0,
46 sizeof(adev->pm.pm_display_cfg));
48 adev->pm.pm_display_cfg.cpu_cc6_disable =
49 pp_display_cfg->cpu_cc6_disable;
51 adev->pm.pm_display_cfg.cpu_pstate_disable =
52 pp_display_cfg->cpu_pstate_disable;
54 adev->pm.pm_display_cfg.cpu_pstate_separation_time =
55 pp_display_cfg->cpu_pstate_separation_time;
57 adev->pm.pm_display_cfg.nb_pstate_switch_disable =
58 pp_display_cfg->nb_pstate_switch_disable;
60 adev->pm.pm_display_cfg.num_display =
61 pp_display_cfg->display_count;
62 adev->pm.pm_display_cfg.num_path_including_non_display =
63 pp_display_cfg->display_count;
65 adev->pm.pm_display_cfg.min_core_set_clock =
66 pp_display_cfg->min_engine_clock_khz/10;
67 adev->pm.pm_display_cfg.min_core_set_clock_in_sr =
68 pp_display_cfg->min_engine_clock_deep_sleep_khz/10;
69 adev->pm.pm_display_cfg.min_mem_set_clock =
70 pp_display_cfg->min_memory_clock_khz/10;
72 adev->pm.pm_display_cfg.min_dcef_deep_sleep_set_clk =
73 pp_display_cfg->min_engine_clock_deep_sleep_khz/10;
74 adev->pm.pm_display_cfg.min_dcef_set_clk =
75 pp_display_cfg->min_dcfclock_khz/10;
77 adev->pm.pm_display_cfg.multi_monitor_in_sync =
78 pp_display_cfg->all_displays_in_sync;
79 adev->pm.pm_display_cfg.min_vblank_time =
80 pp_display_cfg->avail_mclk_switch_time_us;
82 adev->pm.pm_display_cfg.display_clk =
83 pp_display_cfg->disp_clk_khz/10;
85 adev->pm.pm_display_cfg.dce_tolerable_mclk_in_active_latency =
86 pp_display_cfg->avail_mclk_switch_time_in_disp_active_us;
88 adev->pm.pm_display_cfg.crtc_index = pp_display_cfg->crtc_index;
89 adev->pm.pm_display_cfg.line_time_in_us =
90 pp_display_cfg->line_time_in_us;
92 adev->pm.pm_display_cfg.vrefresh = pp_display_cfg->disp_configs[0].v_refresh;
93 adev->pm.pm_display_cfg.crossfire_display_index = -1;
94 adev->pm.pm_display_cfg.min_bus_bandwidth = 0;
96 for (i = 0; i < pp_display_cfg->display_count; i++) {
97 const struct dm_pp_single_disp_config *dc_cfg =
98 &pp_display_cfg->disp_configs[i];
99 adev->pm.pm_display_cfg.displays[i].controller_id = dc_cfg->pipe_idx + 1;
102 if (adev->powerplay.pp_funcs && adev->powerplay.pp_funcs->display_configuration_change)
103 adev->powerplay.pp_funcs->display_configuration_change(
104 adev->powerplay.pp_handle,
105 &adev->pm.pm_display_cfg);
107 amdgpu_pm_compute_clocks(adev);
113 static void get_default_clock_levels(
114 enum dm_pp_clock_type clk_type,
115 struct dm_pp_clock_levels *clks)
117 uint32_t disp_clks_in_khz[6] = {
118 300000, 400000, 496560, 626090, 685720, 757900 };
119 uint32_t sclks_in_khz[6] = {
120 300000, 360000, 423530, 514290, 626090, 720000 };
121 uint32_t mclks_in_khz[2] = { 333000, 800000 };
124 case DM_PP_CLOCK_TYPE_DISPLAY_CLK:
125 clks->num_levels = 6;
126 memmove(clks->clocks_in_khz, disp_clks_in_khz,
127 sizeof(disp_clks_in_khz));
129 case DM_PP_CLOCK_TYPE_ENGINE_CLK:
130 clks->num_levels = 6;
131 memmove(clks->clocks_in_khz, sclks_in_khz,
132 sizeof(sclks_in_khz));
134 case DM_PP_CLOCK_TYPE_MEMORY_CLK:
135 clks->num_levels = 2;
136 memmove(clks->clocks_in_khz, mclks_in_khz,
137 sizeof(mclks_in_khz));
140 clks->num_levels = 0;
145 static enum amd_pp_clock_type dc_to_pp_clock_type(
146 enum dm_pp_clock_type dm_pp_clk_type)
148 enum amd_pp_clock_type amd_pp_clk_type = 0;
150 switch (dm_pp_clk_type) {
151 case DM_PP_CLOCK_TYPE_DISPLAY_CLK:
152 amd_pp_clk_type = amd_pp_disp_clock;
154 case DM_PP_CLOCK_TYPE_ENGINE_CLK:
155 amd_pp_clk_type = amd_pp_sys_clock;
157 case DM_PP_CLOCK_TYPE_MEMORY_CLK:
158 amd_pp_clk_type = amd_pp_mem_clock;
160 case DM_PP_CLOCK_TYPE_DCEFCLK:
161 amd_pp_clk_type = amd_pp_dcef_clock;
163 case DM_PP_CLOCK_TYPE_DCFCLK:
164 amd_pp_clk_type = amd_pp_dcf_clock;
166 case DM_PP_CLOCK_TYPE_PIXELCLK:
167 amd_pp_clk_type = amd_pp_pixel_clock;
169 case DM_PP_CLOCK_TYPE_FCLK:
170 amd_pp_clk_type = amd_pp_f_clock;
172 case DM_PP_CLOCK_TYPE_DISPLAYPHYCLK:
173 amd_pp_clk_type = amd_pp_phy_clock;
175 case DM_PP_CLOCK_TYPE_DPPCLK:
176 amd_pp_clk_type = amd_pp_dpp_clock;
179 DRM_ERROR("DM_PPLIB: invalid clock type: %d!\n",
184 return amd_pp_clk_type;
187 static enum dm_pp_clocks_state pp_to_dc_powerlevel_state(
188 enum PP_DAL_POWERLEVEL max_clocks_state)
190 switch (max_clocks_state) {
191 case PP_DAL_POWERLEVEL_0:
192 return DM_PP_CLOCKS_DPM_STATE_LEVEL_0;
193 case PP_DAL_POWERLEVEL_1:
194 return DM_PP_CLOCKS_DPM_STATE_LEVEL_1;
195 case PP_DAL_POWERLEVEL_2:
196 return DM_PP_CLOCKS_DPM_STATE_LEVEL_2;
197 case PP_DAL_POWERLEVEL_3:
198 return DM_PP_CLOCKS_DPM_STATE_LEVEL_3;
199 case PP_DAL_POWERLEVEL_4:
200 return DM_PP_CLOCKS_DPM_STATE_LEVEL_4;
201 case PP_DAL_POWERLEVEL_5:
202 return DM_PP_CLOCKS_DPM_STATE_LEVEL_5;
203 case PP_DAL_POWERLEVEL_6:
204 return DM_PP_CLOCKS_DPM_STATE_LEVEL_6;
205 case PP_DAL_POWERLEVEL_7:
206 return DM_PP_CLOCKS_DPM_STATE_LEVEL_7;
208 DRM_ERROR("DM_PPLIB: invalid powerlevel state: %d!\n",
210 return DM_PP_CLOCKS_STATE_INVALID;
214 static void pp_to_dc_clock_levels(
215 const struct amd_pp_clocks *pp_clks,
216 struct dm_pp_clock_levels *dc_clks,
217 enum dm_pp_clock_type dc_clk_type)
221 if (pp_clks->count > DM_PP_MAX_CLOCK_LEVELS) {
222 DRM_INFO("DM_PPLIB: Warning: %s clock: number of levels %d exceeds maximum of %d!\n",
223 DC_DECODE_PP_CLOCK_TYPE(dc_clk_type),
225 DM_PP_MAX_CLOCK_LEVELS);
227 dc_clks->num_levels = DM_PP_MAX_CLOCK_LEVELS;
229 dc_clks->num_levels = pp_clks->count;
231 DRM_INFO("DM_PPLIB: values for %s clock\n",
232 DC_DECODE_PP_CLOCK_TYPE(dc_clk_type));
234 for (i = 0; i < dc_clks->num_levels; i++) {
235 DRM_INFO("DM_PPLIB:\t %d\n", pp_clks->clock[i]);
236 dc_clks->clocks_in_khz[i] = pp_clks->clock[i];
240 static void pp_to_dc_clock_levels_with_latency(
241 const struct pp_clock_levels_with_latency *pp_clks,
242 struct dm_pp_clock_levels_with_latency *clk_level_info,
243 enum dm_pp_clock_type dc_clk_type)
247 if (pp_clks->num_levels > DM_PP_MAX_CLOCK_LEVELS) {
248 DRM_INFO("DM_PPLIB: Warning: %s clock: number of levels %d exceeds maximum of %d!\n",
249 DC_DECODE_PP_CLOCK_TYPE(dc_clk_type),
251 DM_PP_MAX_CLOCK_LEVELS);
253 clk_level_info->num_levels = DM_PP_MAX_CLOCK_LEVELS;
255 clk_level_info->num_levels = pp_clks->num_levels;
257 DRM_DEBUG("DM_PPLIB: values for %s clock\n",
258 DC_DECODE_PP_CLOCK_TYPE(dc_clk_type));
260 for (i = 0; i < clk_level_info->num_levels; i++) {
261 DRM_DEBUG("DM_PPLIB:\t %d in kHz\n", pp_clks->data[i].clocks_in_khz);
262 clk_level_info->data[i].clocks_in_khz = pp_clks->data[i].clocks_in_khz;
263 clk_level_info->data[i].latency_in_us = pp_clks->data[i].latency_in_us;
267 static void pp_to_dc_clock_levels_with_voltage(
268 const struct pp_clock_levels_with_voltage *pp_clks,
269 struct dm_pp_clock_levels_with_voltage *clk_level_info,
270 enum dm_pp_clock_type dc_clk_type)
274 if (pp_clks->num_levels > DM_PP_MAX_CLOCK_LEVELS) {
275 DRM_INFO("DM_PPLIB: Warning: %s clock: number of levels %d exceeds maximum of %d!\n",
276 DC_DECODE_PP_CLOCK_TYPE(dc_clk_type),
278 DM_PP_MAX_CLOCK_LEVELS);
280 clk_level_info->num_levels = DM_PP_MAX_CLOCK_LEVELS;
282 clk_level_info->num_levels = pp_clks->num_levels;
284 DRM_INFO("DM_PPLIB: values for %s clock\n",
285 DC_DECODE_PP_CLOCK_TYPE(dc_clk_type));
287 for (i = 0; i < clk_level_info->num_levels; i++) {
288 DRM_INFO("DM_PPLIB:\t %d in kHz, %d in mV\n", pp_clks->data[i].clocks_in_khz,
289 pp_clks->data[i].voltage_in_mv);
290 clk_level_info->data[i].clocks_in_khz = pp_clks->data[i].clocks_in_khz;
291 clk_level_info->data[i].voltage_in_mv = pp_clks->data[i].voltage_in_mv;
295 bool dm_pp_get_clock_levels_by_type(
296 const struct dc_context *ctx,
297 enum dm_pp_clock_type clk_type,
298 struct dm_pp_clock_levels *dc_clks)
300 struct amdgpu_device *adev = ctx->driver_context;
301 void *pp_handle = adev->powerplay.pp_handle;
302 struct amd_pp_clocks pp_clks = { 0 };
303 struct amd_pp_simple_clock_info validation_clks = { 0 };
306 if (adev->powerplay.pp_funcs && adev->powerplay.pp_funcs->get_clock_by_type) {
307 if (adev->powerplay.pp_funcs->get_clock_by_type(pp_handle,
308 dc_to_pp_clock_type(clk_type), &pp_clks)) {
309 /* Error in pplib. Provide default values. */
310 get_default_clock_levels(clk_type, dc_clks);
315 pp_to_dc_clock_levels(&pp_clks, dc_clks, clk_type);
317 if (adev->powerplay.pp_funcs && adev->powerplay.pp_funcs->get_display_mode_validation_clocks) {
318 if (adev->powerplay.pp_funcs->get_display_mode_validation_clocks(
319 pp_handle, &validation_clks)) {
320 /* Error in pplib. Provide default values. */
321 DRM_INFO("DM_PPLIB: Warning: using default validation clocks!\n");
322 validation_clks.engine_max_clock = 72000;
323 validation_clks.memory_max_clock = 80000;
324 validation_clks.level = 0;
328 DRM_INFO("DM_PPLIB: Validation clocks:\n");
329 DRM_INFO("DM_PPLIB: engine_max_clock: %d\n",
330 validation_clks.engine_max_clock);
331 DRM_INFO("DM_PPLIB: memory_max_clock: %d\n",
332 validation_clks.memory_max_clock);
333 DRM_INFO("DM_PPLIB: level : %d\n",
334 validation_clks.level);
336 /* Translate 10 kHz to kHz. */
337 validation_clks.engine_max_clock *= 10;
338 validation_clks.memory_max_clock *= 10;
340 /* Determine the highest non-boosted level from the Validation Clocks */
341 if (clk_type == DM_PP_CLOCK_TYPE_ENGINE_CLK) {
342 for (i = 0; i < dc_clks->num_levels; i++) {
343 if (dc_clks->clocks_in_khz[i] > validation_clks.engine_max_clock) {
344 /* This clock is higher the validation clock.
345 * Than means the previous one is the highest
346 * non-boosted one. */
347 DRM_INFO("DM_PPLIB: reducing engine clock level from %d to %d\n",
348 dc_clks->num_levels, i);
349 dc_clks->num_levels = i > 0 ? i : 1;
353 } else if (clk_type == DM_PP_CLOCK_TYPE_MEMORY_CLK) {
354 for (i = 0; i < dc_clks->num_levels; i++) {
355 if (dc_clks->clocks_in_khz[i] > validation_clks.memory_max_clock) {
356 DRM_INFO("DM_PPLIB: reducing memory clock level from %d to %d\n",
357 dc_clks->num_levels, i);
358 dc_clks->num_levels = i > 0 ? i : 1;
367 bool dm_pp_get_clock_levels_by_type_with_latency(
368 const struct dc_context *ctx,
369 enum dm_pp_clock_type clk_type,
370 struct dm_pp_clock_levels_with_latency *clk_level_info)
372 struct amdgpu_device *adev = ctx->driver_context;
373 void *pp_handle = adev->powerplay.pp_handle;
374 struct pp_clock_levels_with_latency pp_clks = { 0 };
375 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
378 if (pp_funcs && pp_funcs->get_clock_by_type_with_latency) {
379 ret = pp_funcs->get_clock_by_type_with_latency(pp_handle,
380 dc_to_pp_clock_type(clk_type),
386 pp_to_dc_clock_levels_with_latency(&pp_clks, clk_level_info, clk_type);
391 bool dm_pp_get_clock_levels_by_type_with_voltage(
392 const struct dc_context *ctx,
393 enum dm_pp_clock_type clk_type,
394 struct dm_pp_clock_levels_with_voltage *clk_level_info)
396 struct amdgpu_device *adev = ctx->driver_context;
397 void *pp_handle = adev->powerplay.pp_handle;
398 struct pp_clock_levels_with_voltage pp_clk_info = {0};
399 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
402 if (pp_funcs && pp_funcs->get_clock_by_type_with_voltage) {
403 ret = pp_funcs->get_clock_by_type_with_voltage(pp_handle,
404 dc_to_pp_clock_type(clk_type),
410 pp_to_dc_clock_levels_with_voltage(&pp_clk_info, clk_level_info, clk_type);
415 bool dm_pp_notify_wm_clock_changes(
416 const struct dc_context *ctx,
417 struct dm_pp_wm_sets_with_clock_ranges *wm_with_clock_ranges)
419 struct amdgpu_device *adev = ctx->driver_context;
420 void *pp_handle = adev->powerplay.pp_handle;
421 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
424 * Limit this watermark setting for Polaris for now
425 * TODO: expand this to other ASICs
427 if ((adev->asic_type >= CHIP_POLARIS10) && (adev->asic_type <= CHIP_VEGAM)
428 && pp_funcs && pp_funcs->set_watermarks_for_clocks_ranges) {
429 if (!pp_funcs->set_watermarks_for_clocks_ranges(pp_handle,
430 (void *)wm_with_clock_ranges))
437 bool dm_pp_apply_power_level_change_request(
438 const struct dc_context *ctx,
439 struct dm_pp_power_level_change_request *level_change_req)
441 /* TODO: to be implemented */
445 bool dm_pp_apply_clock_for_voltage_request(
446 const struct dc_context *ctx,
447 struct dm_pp_clock_for_voltage_req *clock_for_voltage_req)
449 struct amdgpu_device *adev = ctx->driver_context;
450 struct pp_display_clock_request pp_clock_request = {0};
453 pp_clock_request.clock_type = dc_to_pp_clock_type(clock_for_voltage_req->clk_type);
454 pp_clock_request.clock_freq_in_khz = clock_for_voltage_req->clocks_in_khz;
456 if (!pp_clock_request.clock_type)
459 if (adev->powerplay.pp_funcs && adev->powerplay.pp_funcs->display_clock_voltage_request)
460 ret = adev->powerplay.pp_funcs->display_clock_voltage_request(
461 adev->powerplay.pp_handle,
468 bool dm_pp_get_static_clocks(
469 const struct dc_context *ctx,
470 struct dm_pp_static_clock_info *static_clk_info)
472 struct amdgpu_device *adev = ctx->driver_context;
473 struct amd_pp_clock_info pp_clk_info = {0};
476 if (adev->powerplay.pp_funcs && adev->powerplay.pp_funcs->get_current_clocks)
477 ret = adev->powerplay.pp_funcs->get_current_clocks(
478 adev->powerplay.pp_handle,
485 static_clk_info->max_clocks_state = pp_to_dc_powerlevel_state(pp_clk_info.max_clocks_state);
486 static_clk_info->max_mclk_khz = pp_clk_info.max_memory_clock * 10;
487 static_clk_info->max_sclk_khz = pp_clk_info.max_engine_clock * 10;
492 static void pp_rv_set_wm_ranges(struct pp_smu *pp,
493 struct pp_smu_wm_range_sets *ranges)
495 const struct dc_context *ctx = pp->dm;
496 struct amdgpu_device *adev = ctx->driver_context;
497 void *pp_handle = adev->powerplay.pp_handle;
498 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
499 struct dm_pp_wm_sets_with_clock_ranges_soc15 wm_with_clock_ranges;
500 struct dm_pp_clock_range_for_dmif_wm_set_soc15 *wm_dce_clocks = wm_with_clock_ranges.wm_dmif_clocks_ranges;
501 struct dm_pp_clock_range_for_mcif_wm_set_soc15 *wm_soc_clocks = wm_with_clock_ranges.wm_mcif_clocks_ranges;
504 wm_with_clock_ranges.num_wm_dmif_sets = ranges->num_reader_wm_sets;
505 wm_with_clock_ranges.num_wm_mcif_sets = ranges->num_writer_wm_sets;
507 for (i = 0; i < wm_with_clock_ranges.num_wm_dmif_sets; i++) {
508 if (ranges->reader_wm_sets[i].wm_inst > 3)
509 wm_dce_clocks[i].wm_set_id = WM_SET_A;
511 wm_dce_clocks[i].wm_set_id =
512 ranges->reader_wm_sets[i].wm_inst;
513 wm_dce_clocks[i].wm_max_dcfclk_clk_in_khz =
514 ranges->reader_wm_sets[i].max_drain_clk_mhz * 1000;
515 wm_dce_clocks[i].wm_min_dcfclk_clk_in_khz =
516 ranges->reader_wm_sets[i].min_drain_clk_mhz * 1000;
517 wm_dce_clocks[i].wm_max_mem_clk_in_khz =
518 ranges->reader_wm_sets[i].max_fill_clk_mhz * 1000;
519 wm_dce_clocks[i].wm_min_mem_clk_in_khz =
520 ranges->reader_wm_sets[i].min_fill_clk_mhz * 1000;
523 for (i = 0; i < wm_with_clock_ranges.num_wm_mcif_sets; i++) {
524 if (ranges->writer_wm_sets[i].wm_inst > 3)
525 wm_soc_clocks[i].wm_set_id = WM_SET_A;
527 wm_soc_clocks[i].wm_set_id =
528 ranges->writer_wm_sets[i].wm_inst;
529 wm_soc_clocks[i].wm_max_socclk_clk_in_khz =
530 ranges->writer_wm_sets[i].max_fill_clk_mhz * 1000;
531 wm_soc_clocks[i].wm_min_socclk_clk_in_khz =
532 ranges->writer_wm_sets[i].min_fill_clk_mhz * 1000;
533 wm_soc_clocks[i].wm_max_mem_clk_in_khz =
534 ranges->writer_wm_sets[i].max_drain_clk_mhz * 1000;
535 wm_soc_clocks[i].wm_min_mem_clk_in_khz =
536 ranges->writer_wm_sets[i].min_drain_clk_mhz * 1000;
539 if (pp_funcs && pp_funcs->set_watermarks_for_clocks_ranges)
540 pp_funcs->set_watermarks_for_clocks_ranges(pp_handle,
541 &wm_with_clock_ranges);
544 static void pp_rv_set_pme_wa_enable(struct pp_smu *pp)
546 const struct dc_context *ctx = pp->dm;
547 struct amdgpu_device *adev = ctx->driver_context;
548 void *pp_handle = adev->powerplay.pp_handle;
549 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
551 if (pp_funcs && pp_funcs->notify_smu_enable_pwe)
552 pp_funcs->notify_smu_enable_pwe(pp_handle);
555 static void pp_rv_set_active_display_count(struct pp_smu *pp, int count)
557 const struct dc_context *ctx = pp->dm;
558 struct amdgpu_device *adev = ctx->driver_context;
559 void *pp_handle = adev->powerplay.pp_handle;
560 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
562 if (!pp_funcs || !pp_funcs->set_active_display_count)
565 pp_funcs->set_active_display_count(pp_handle, count);
568 static void pp_rv_set_min_deep_sleep_dcfclk(struct pp_smu *pp, int clock)
570 const struct dc_context *ctx = pp->dm;
571 struct amdgpu_device *adev = ctx->driver_context;
572 void *pp_handle = adev->powerplay.pp_handle;
573 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
575 if (!pp_funcs || !pp_funcs->set_min_deep_sleep_dcefclk)
578 pp_funcs->set_min_deep_sleep_dcefclk(pp_handle, clock);
581 static void pp_rv_set_hard_min_dcefclk_by_freq(struct pp_smu *pp, int clock)
583 const struct dc_context *ctx = pp->dm;
584 struct amdgpu_device *adev = ctx->driver_context;
585 void *pp_handle = adev->powerplay.pp_handle;
586 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
588 if (!pp_funcs || !pp_funcs->set_hard_min_dcefclk_by_freq)
591 pp_funcs->set_hard_min_dcefclk_by_freq(pp_handle, clock);
594 static void pp_rv_set_hard_min_fclk_by_freq(struct pp_smu *pp, int mhz)
596 const struct dc_context *ctx = pp->dm;
597 struct amdgpu_device *adev = ctx->driver_context;
598 void *pp_handle = adev->powerplay.pp_handle;
599 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
601 if (!pp_funcs || !pp_funcs->set_hard_min_fclk_by_freq)
604 pp_funcs->set_hard_min_fclk_by_freq(pp_handle, mhz);
607 static enum pp_smu_status pp_nv_set_wm_ranges(struct pp_smu *pp,
608 struct pp_smu_wm_range_sets *ranges)
610 const struct dc_context *ctx = pp->dm;
611 struct amdgpu_device *adev = ctx->driver_context;
612 void *pp_handle = adev->powerplay.pp_handle;
613 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
615 if (pp_funcs && pp_funcs->set_watermarks_for_clocks_ranges)
616 pp_funcs->set_watermarks_for_clocks_ranges(pp_handle, ranges);
618 return PP_SMU_RESULT_OK;
621 static enum pp_smu_status pp_nv_set_display_count(struct pp_smu *pp, int count)
623 const struct dc_context *ctx = pp->dm;
624 struct amdgpu_device *adev = ctx->driver_context;
625 void *pp_handle = adev->powerplay.pp_handle;
626 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
628 if (!pp_funcs || !pp_funcs->set_active_display_count)
629 return PP_SMU_RESULT_UNSUPPORTED;
631 /* 0: successful or smu.ppt_funcs->set_display_count = NULL; 1: fail */
632 if (pp_funcs->set_active_display_count(pp_handle, count))
633 return PP_SMU_RESULT_FAIL;
635 return PP_SMU_RESULT_OK;
638 static enum pp_smu_status
639 pp_nv_set_min_deep_sleep_dcfclk(struct pp_smu *pp, int mhz)
641 const struct dc_context *ctx = pp->dm;
642 struct amdgpu_device *adev = ctx->driver_context;
643 void *pp_handle = adev->powerplay.pp_handle;
644 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
646 if (!pp_funcs || !pp_funcs->set_min_deep_sleep_dcefclk)
647 return PP_SMU_RESULT_UNSUPPORTED;
649 /* 0: successful or smu.ppt_funcs->set_deep_sleep_dcefclk = NULL;1: fail */
650 if (pp_funcs->set_min_deep_sleep_dcefclk(pp_handle, mhz))
651 return PP_SMU_RESULT_FAIL;
653 return PP_SMU_RESULT_OK;
656 static enum pp_smu_status pp_nv_set_hard_min_dcefclk_by_freq(
657 struct pp_smu *pp, int mhz)
659 const struct dc_context *ctx = pp->dm;
660 struct amdgpu_device *adev = ctx->driver_context;
661 void *pp_handle = adev->powerplay.pp_handle;
662 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
663 struct pp_display_clock_request clock_req;
665 if (!pp_funcs || !pp_funcs->display_clock_voltage_request)
666 return PP_SMU_RESULT_UNSUPPORTED;
668 clock_req.clock_type = amd_pp_dcef_clock;
669 clock_req.clock_freq_in_khz = mhz * 1000;
671 /* 0: successful or smu.ppt_funcs->display_clock_voltage_request = NULL
674 if (pp_funcs->display_clock_voltage_request(pp_handle, &clock_req))
675 return PP_SMU_RESULT_FAIL;
677 return PP_SMU_RESULT_OK;
680 static enum pp_smu_status
681 pp_nv_set_hard_min_uclk_by_freq(struct pp_smu *pp, int mhz)
683 const struct dc_context *ctx = pp->dm;
684 struct amdgpu_device *adev = ctx->driver_context;
685 void *pp_handle = adev->powerplay.pp_handle;
686 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
687 struct pp_display_clock_request clock_req;
689 if (!pp_funcs || !pp_funcs->display_clock_voltage_request)
690 return PP_SMU_RESULT_UNSUPPORTED;
692 clock_req.clock_type = amd_pp_mem_clock;
693 clock_req.clock_freq_in_khz = mhz * 1000;
695 /* 0: successful or smu.ppt_funcs->display_clock_voltage_request = NULL
698 if (pp_funcs->display_clock_voltage_request(pp_handle, &clock_req))
699 return PP_SMU_RESULT_FAIL;
701 return PP_SMU_RESULT_OK;
704 static enum pp_smu_status pp_nv_set_pstate_handshake_support(
705 struct pp_smu *pp, bool pstate_handshake_supported)
707 const struct dc_context *ctx = pp->dm;
708 struct amdgpu_device *adev = ctx->driver_context;
709 void *pp_handle = adev->powerplay.pp_handle;
710 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
712 if (pp_funcs && pp_funcs->display_disable_memory_clock_switch) {
713 if (pp_funcs->display_disable_memory_clock_switch(pp_handle,
714 !pstate_handshake_supported))
715 return PP_SMU_RESULT_FAIL;
718 return PP_SMU_RESULT_OK;
721 static enum pp_smu_status pp_nv_set_voltage_by_freq(struct pp_smu *pp,
722 enum pp_smu_nv_clock_id clock_id, int mhz)
724 const struct dc_context *ctx = pp->dm;
725 struct amdgpu_device *adev = ctx->driver_context;
726 void *pp_handle = adev->powerplay.pp_handle;
727 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
728 struct pp_display_clock_request clock_req;
730 if (!pp_funcs || !pp_funcs->display_clock_voltage_request)
731 return PP_SMU_RESULT_UNSUPPORTED;
734 case PP_SMU_NV_DISPCLK:
735 clock_req.clock_type = amd_pp_disp_clock;
737 case PP_SMU_NV_PHYCLK:
738 clock_req.clock_type = amd_pp_phy_clock;
740 case PP_SMU_NV_PIXELCLK:
741 clock_req.clock_type = amd_pp_pixel_clock;
746 clock_req.clock_freq_in_khz = mhz * 1000;
748 /* 0: successful or smu.ppt_funcs->display_clock_voltage_request = NULL
751 if (pp_funcs->display_clock_voltage_request(pp_handle, &clock_req))
752 return PP_SMU_RESULT_FAIL;
754 return PP_SMU_RESULT_OK;
757 static enum pp_smu_status pp_nv_get_maximum_sustainable_clocks(
758 struct pp_smu *pp, struct pp_smu_nv_clock_table *max_clocks)
760 const struct dc_context *ctx = pp->dm;
761 struct amdgpu_device *adev = ctx->driver_context;
762 void *pp_handle = adev->powerplay.pp_handle;
763 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
765 if (!pp_funcs || !pp_funcs->get_max_sustainable_clocks_by_dc)
766 return PP_SMU_RESULT_UNSUPPORTED;
768 if (!pp_funcs->get_max_sustainable_clocks_by_dc(pp_handle, max_clocks))
769 return PP_SMU_RESULT_OK;
771 return PP_SMU_RESULT_FAIL;
774 static enum pp_smu_status pp_nv_get_uclk_dpm_states(struct pp_smu *pp,
775 unsigned int *clock_values_in_khz, unsigned int *num_states)
777 const struct dc_context *ctx = pp->dm;
778 struct amdgpu_device *adev = ctx->driver_context;
779 void *pp_handle = adev->powerplay.pp_handle;
780 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
782 if (!pp_funcs || !pp_funcs->get_uclk_dpm_states)
783 return PP_SMU_RESULT_UNSUPPORTED;
785 if (!pp_funcs->get_uclk_dpm_states(pp_handle,
788 return PP_SMU_RESULT_OK;
790 return PP_SMU_RESULT_FAIL;
793 static enum pp_smu_status pp_rn_get_dpm_clock_table(
794 struct pp_smu *pp, struct dpm_clocks *clock_table)
796 const struct dc_context *ctx = pp->dm;
797 struct amdgpu_device *adev = ctx->driver_context;
798 void *pp_handle = adev->powerplay.pp_handle;
799 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
801 if (!pp_funcs || !pp_funcs->get_dpm_clock_table)
802 return PP_SMU_RESULT_UNSUPPORTED;
804 if (!pp_funcs->get_dpm_clock_table(pp_handle, clock_table))
805 return PP_SMU_RESULT_OK;
807 return PP_SMU_RESULT_FAIL;
810 static enum pp_smu_status pp_rn_set_wm_ranges(struct pp_smu *pp,
811 struct pp_smu_wm_range_sets *ranges)
813 const struct dc_context *ctx = pp->dm;
814 struct amdgpu_device *adev = ctx->driver_context;
815 void *pp_handle = adev->powerplay.pp_handle;
816 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
818 if (pp_funcs && pp_funcs->set_watermarks_for_clocks_ranges)
819 pp_funcs->set_watermarks_for_clocks_ranges(pp_handle, ranges);
821 return PP_SMU_RESULT_OK;
824 void dm_pp_get_funcs(
825 struct dc_context *ctx,
826 struct pp_smu_funcs *funcs)
828 switch (ctx->dce_version) {
829 case DCN_VERSION_1_0:
830 case DCN_VERSION_1_01:
831 funcs->ctx.ver = PP_SMU_VER_RV;
832 funcs->rv_funcs.pp_smu.dm = ctx;
833 funcs->rv_funcs.set_wm_ranges = pp_rv_set_wm_ranges;
834 funcs->rv_funcs.set_pme_wa_enable = pp_rv_set_pme_wa_enable;
835 funcs->rv_funcs.set_display_count =
836 pp_rv_set_active_display_count;
837 funcs->rv_funcs.set_min_deep_sleep_dcfclk =
838 pp_rv_set_min_deep_sleep_dcfclk;
839 funcs->rv_funcs.set_hard_min_dcfclk_by_freq =
840 pp_rv_set_hard_min_dcefclk_by_freq;
841 funcs->rv_funcs.set_hard_min_fclk_by_freq =
842 pp_rv_set_hard_min_fclk_by_freq;
844 case DCN_VERSION_2_0:
845 funcs->ctx.ver = PP_SMU_VER_NV;
846 funcs->nv_funcs.pp_smu.dm = ctx;
847 funcs->nv_funcs.set_display_count = pp_nv_set_display_count;
848 funcs->nv_funcs.set_hard_min_dcfclk_by_freq =
849 pp_nv_set_hard_min_dcefclk_by_freq;
850 funcs->nv_funcs.set_min_deep_sleep_dcfclk =
851 pp_nv_set_min_deep_sleep_dcfclk;
852 funcs->nv_funcs.set_voltage_by_freq =
853 pp_nv_set_voltage_by_freq;
854 funcs->nv_funcs.set_wm_ranges = pp_nv_set_wm_ranges;
856 /* todo set_pme_wa_enable cause 4k@6ohz display not light up */
857 funcs->nv_funcs.set_pme_wa_enable = NULL;
858 /* todo debug waring message */
859 funcs->nv_funcs.set_hard_min_uclk_by_freq = pp_nv_set_hard_min_uclk_by_freq;
860 /* todo compare data with window driver*/
861 funcs->nv_funcs.get_maximum_sustainable_clocks = pp_nv_get_maximum_sustainable_clocks;
862 /*todo compare data with window driver */
863 funcs->nv_funcs.get_uclk_dpm_states = pp_nv_get_uclk_dpm_states;
864 funcs->nv_funcs.set_pstate_handshake_support = pp_nv_set_pstate_handshake_support;
867 case DCN_VERSION_2_1:
868 funcs->ctx.ver = PP_SMU_VER_RN;
869 funcs->rn_funcs.pp_smu.dm = ctx;
870 funcs->rn_funcs.set_wm_ranges = pp_rn_set_wm_ranges;
871 funcs->rn_funcs.get_dpm_clock_table = pp_rn_get_dpm_clock_table;
874 DRM_ERROR("smu version is not supported !\n");