2 * Copyright 2015 Advanced Micro Devices, Inc.
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
25 #include <linux/slab.h>
28 #include "dm_services.h"
32 #include "core_status.h"
33 #include "core_types.h"
34 #include "hw_sequencer.h"
35 #include "dce/dce_hwseq.h"
40 #include "clock_source.h"
41 #include "dc_bios_types.h"
43 #include "bios_parser_interface.h"
44 #include "bios/bios_parser_helper.h"
45 #include "include/irq_service_interface.h"
46 #include "transform.h"
49 #include "timing_generator.h"
51 #include "virtual/virtual_link_encoder.h"
54 #include "link_hwss.h"
55 #include "link_encoder.h"
56 #include "link_enc_cfg.h"
59 #include "dc_link_ddc.h"
60 #include "dm_helpers.h"
61 #include "mem_input.h"
64 #include "dc_link_dp.h"
65 #include "dc_dmub_srv.h"
69 #include "vm_helper.h"
71 #include "dce/dce_i2c.h"
73 #include "dmub/dmub_srv.h"
75 #include "i2caux_interface.h"
76 #include "dce/dmub_hw_lock_mgr.h"
86 static const char DC_BUILD_ID[] = "production-build";
91 * DC is the OS-agnostic component of the amdgpu DC driver.
93 * DC maintains and validates a set of structs representing the state of the
94 * driver and writes that state to AMD hardware
98 * struct dc - The central struct. One per driver. Created on driver load,
99 * destroyed on driver unload.
101 * struct dc_context - One per driver.
102 * Used as a backpointer by most other structs in dc.
104 * struct dc_link - One per connector (the physical DP, HDMI, miniDP, or eDP
105 * plugpoints). Created on driver load, destroyed on driver unload.
107 * struct dc_sink - One per display. Created on boot or hotplug.
108 * Destroyed on shutdown or hotunplug. A dc_link can have a local sink
109 * (the display directly attached). It may also have one or more remote
110 * sinks (in the Multi-Stream Transport case)
112 * struct resource_pool - One per driver. Represents the hw blocks not in the
113 * main pipeline. Not directly accessible by dm.
115 * Main dc state structs:
117 * These structs can be created and destroyed as needed. There is a full set of
118 * these structs in dc->current_state representing the currently programmed state.
120 * struct dc_state - The global DC state to track global state information,
121 * such as bandwidth values.
123 * struct dc_stream_state - Represents the hw configuration for the pipeline from
124 * a framebuffer to a display. Maps one-to-one with dc_sink.
126 * struct dc_plane_state - Represents a framebuffer. Each stream has at least one,
127 * and may have more in the Multi-Plane Overlay case.
129 * struct resource_context - Represents the programmable state of everything in
130 * the resource_pool. Not directly accessible by dm.
132 * struct pipe_ctx - A member of struct resource_context. Represents the
133 * internal hardware pipeline components. Each dc_plane_state has either
134 * one or two (in the pipe-split case).
137 /*******************************************************************************
139 ******************************************************************************/
141 static inline void elevate_update_type(enum surface_update_type *original, enum surface_update_type new)
147 static void destroy_links(struct dc *dc)
151 for (i = 0; i < dc->link_count; i++) {
152 if (NULL != dc->links[i])
153 link_destroy(&dc->links[i]);
157 static uint32_t get_num_of_internal_disp(struct dc_link **links, uint32_t num_links)
162 for (i = 0; i < num_links; i++) {
163 if (links[i]->connector_signal == SIGNAL_TYPE_EDP ||
164 links[i]->is_internal_display)
171 static int get_seamless_boot_stream_count(struct dc_state *ctx)
174 uint8_t seamless_boot_stream_count = 0;
176 for (i = 0; i < ctx->stream_count; i++)
177 if (ctx->streams[i]->apply_seamless_boot_optimization)
178 seamless_boot_stream_count++;
180 return seamless_boot_stream_count;
183 static bool create_links(
185 uint32_t num_virtual_links)
189 struct dc_bios *bios = dc->ctx->dc_bios;
193 connectors_num = bios->funcs->get_connectors_number(bios);
195 DC_LOG_DC("BIOS object table - number of connectors: %d", connectors_num);
197 if (connectors_num > ENUM_ID_COUNT) {
199 "DC: Number of connectors %d exceeds maximum of %d!\n",
205 dm_output_to_console(
206 "DC: %s: connectors_num: physical:%d, virtual:%d\n",
211 for (i = 0; i < connectors_num; i++) {
212 struct link_init_data link_init_params = {0};
213 struct dc_link *link;
215 DC_LOG_DC("BIOS object table - printing link object info for connector number: %d, link_index: %d", i, dc->link_count);
217 link_init_params.ctx = dc->ctx;
218 /* next BIOS object table connector */
219 link_init_params.connector_index = i;
220 link_init_params.link_index = dc->link_count;
221 link_init_params.dc = dc;
222 link = link_create(&link_init_params);
225 dc->links[dc->link_count] = link;
231 DC_LOG_DC("BIOS object table - end");
233 for (i = 0; i < num_virtual_links; i++) {
234 struct dc_link *link = kzalloc(sizeof(*link), GFP_KERNEL);
235 struct encoder_init_data enc_init = {0};
242 link->link_index = dc->link_count;
243 dc->links[dc->link_count] = link;
248 link->connector_signal = SIGNAL_TYPE_VIRTUAL;
249 link->link_id.type = OBJECT_TYPE_CONNECTOR;
250 link->link_id.id = CONNECTOR_ID_VIRTUAL;
251 link->link_id.enum_id = ENUM_ID_1;
252 link->link_enc = kzalloc(sizeof(*link->link_enc), GFP_KERNEL);
254 if (!link->link_enc) {
259 link->link_status.dpcd_caps = &link->dpcd_caps;
261 enc_init.ctx = dc->ctx;
262 enc_init.channel = CHANNEL_ID_UNKNOWN;
263 enc_init.hpd_source = HPD_SOURCEID_UNKNOWN;
264 enc_init.transmitter = TRANSMITTER_UNKNOWN;
265 enc_init.connector = link->link_id;
266 enc_init.encoder.type = OBJECT_TYPE_ENCODER;
267 enc_init.encoder.id = ENCODER_ID_INTERNAL_VIRTUAL;
268 enc_init.encoder.enum_id = ENUM_ID_1;
269 virtual_link_encoder_construct(link->link_enc, &enc_init);
272 dc->caps.num_of_internal_disp = get_num_of_internal_disp(dc->links, dc->link_count);
280 static struct dc_perf_trace *dc_perf_trace_create(void)
282 return kzalloc(sizeof(struct dc_perf_trace), GFP_KERNEL);
285 static void dc_perf_trace_destroy(struct dc_perf_trace **perf_trace)
292 * dc_stream_adjust_vmin_vmax:
294 * Looks up the pipe context of dc_stream_state and updates the
295 * vertical_total_min and vertical_total_max of the DRR, Dynamic Refresh
296 * Rate, which is a power-saving feature that targets reducing panel
297 * refresh rate while the screen is static
300 * @stream: Initial dc stream state
301 * @adjust: Updated parameters for vertical_total_min and vertical_total_max
303 bool dc_stream_adjust_vmin_vmax(struct dc *dc,
304 struct dc_stream_state *stream,
305 struct dc_crtc_timing_adjust *adjust)
310 stream->adjust.v_total_max = adjust->v_total_max;
311 stream->adjust.v_total_mid = adjust->v_total_mid;
312 stream->adjust.v_total_mid_frame_num = adjust->v_total_mid_frame_num;
313 stream->adjust.v_total_min = adjust->v_total_min;
315 for (i = 0; i < MAX_PIPES; i++) {
316 struct pipe_ctx *pipe = &dc->current_state->res_ctx.pipe_ctx[i];
318 if (pipe->stream == stream && pipe->stream_res.tg) {
319 dc->hwss.set_drr(&pipe,
329 bool dc_stream_get_crtc_position(struct dc *dc,
330 struct dc_stream_state **streams, int num_streams,
331 unsigned int *v_pos, unsigned int *nom_v_pos)
333 /* TODO: Support multiple streams */
334 const struct dc_stream_state *stream = streams[0];
337 struct crtc_position position;
339 for (i = 0; i < MAX_PIPES; i++) {
340 struct pipe_ctx *pipe =
341 &dc->current_state->res_ctx.pipe_ctx[i];
343 if (pipe->stream == stream && pipe->stream_res.stream_enc) {
344 dc->hwss.get_position(&pipe, 1, &position);
346 *v_pos = position.vertical_count;
347 *nom_v_pos = position.nominal_vcount;
354 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
355 bool dc_stream_forward_dmcu_crc_window(struct dc *dc, struct dc_stream_state *stream,
356 struct crc_params *crc_window)
359 struct dmcu *dmcu = dc->res_pool->dmcu;
360 struct pipe_ctx *pipe;
361 struct crc_region tmp_win, *crc_win;
362 struct otg_phy_mux mapping_tmp, *mux_mapping;
364 /*crc window can't be null*/
368 if ((dmcu != NULL && dmcu->funcs->is_dmcu_initialized(dmcu))) {
370 mux_mapping = &mapping_tmp;
372 tmp_win.x_start = crc_window->windowa_x_start;
373 tmp_win.y_start = crc_window->windowa_y_start;
374 tmp_win.x_end = crc_window->windowa_x_end;
375 tmp_win.y_end = crc_window->windowa_y_end;
377 for (i = 0; i < MAX_PIPES; i++) {
378 pipe = &dc->current_state->res_ctx.pipe_ctx[i];
379 if (pipe->stream == stream && !pipe->top_pipe && !pipe->prev_odm_pipe)
383 /* Stream not found */
388 /*set mux routing info*/
389 mapping_tmp.phy_output_num = stream->link->link_enc_hw_inst;
390 mapping_tmp.otg_output_num = pipe->stream_res.tg->inst;
392 dmcu->funcs->forward_crc_window(dmcu, crc_win, mux_mapping);
394 DC_LOG_DC("dmcu is not initialized");
401 bool dc_stream_stop_dmcu_crc_win_update(struct dc *dc, struct dc_stream_state *stream)
404 struct dmcu *dmcu = dc->res_pool->dmcu;
405 struct pipe_ctx *pipe;
406 struct otg_phy_mux mapping_tmp, *mux_mapping;
408 if ((dmcu != NULL && dmcu->funcs->is_dmcu_initialized(dmcu))) {
409 mux_mapping = &mapping_tmp;
411 for (i = 0; i < MAX_PIPES; i++) {
412 pipe = &dc->current_state->res_ctx.pipe_ctx[i];
413 if (pipe->stream == stream && !pipe->top_pipe && !pipe->prev_odm_pipe)
417 /* Stream not found */
422 /*set mux routing info*/
423 mapping_tmp.phy_output_num = stream->link->link_enc_hw_inst;
424 mapping_tmp.otg_output_num = pipe->stream_res.tg->inst;
426 dmcu->funcs->stop_crc_win_update(dmcu, mux_mapping);
428 DC_LOG_DC("dmcu is not initialized");
437 * dc_stream_configure_crc() - Configure CRC capture for the given stream.
439 * @stream: The stream to configure CRC on.
440 * @enable: Enable CRC if true, disable otherwise.
441 * @crc_window: CRC window (x/y start/end) information
442 * @continuous: Capture CRC on every frame if true. Otherwise, only capture
445 * By default, only CRC0 is configured, and the entire frame is used to
448 bool dc_stream_configure_crc(struct dc *dc, struct dc_stream_state *stream,
449 struct crc_params *crc_window, bool enable, bool continuous)
452 struct pipe_ctx *pipe;
453 struct crc_params param;
454 struct timing_generator *tg;
456 for (i = 0; i < MAX_PIPES; i++) {
457 pipe = &dc->current_state->res_ctx.pipe_ctx[i];
458 if (pipe->stream == stream && !pipe->top_pipe && !pipe->prev_odm_pipe)
461 /* Stream not found */
465 /* By default, capture the full frame */
466 param.windowa_x_start = 0;
467 param.windowa_y_start = 0;
468 param.windowa_x_end = pipe->stream->timing.h_addressable;
469 param.windowa_y_end = pipe->stream->timing.v_addressable;
470 param.windowb_x_start = 0;
471 param.windowb_y_start = 0;
472 param.windowb_x_end = pipe->stream->timing.h_addressable;
473 param.windowb_y_end = pipe->stream->timing.v_addressable;
476 param.windowa_x_start = crc_window->windowa_x_start;
477 param.windowa_y_start = crc_window->windowa_y_start;
478 param.windowa_x_end = crc_window->windowa_x_end;
479 param.windowa_y_end = crc_window->windowa_y_end;
480 param.windowb_x_start = crc_window->windowb_x_start;
481 param.windowb_y_start = crc_window->windowb_y_start;
482 param.windowb_x_end = crc_window->windowb_x_end;
483 param.windowb_y_end = crc_window->windowb_y_end;
486 param.dsc_mode = pipe->stream->timing.flags.DSC ? 1:0;
487 param.odm_mode = pipe->next_odm_pipe ? 1:0;
489 /* Default to the union of both windows */
490 param.selection = UNION_WINDOW_A_B;
491 param.continuous_mode = continuous;
492 param.enable = enable;
494 tg = pipe->stream_res.tg;
496 /* Only call if supported */
497 if (tg->funcs->configure_crc)
498 return tg->funcs->configure_crc(tg, ¶m);
499 DC_LOG_WARNING("CRC capture not supported.");
504 * dc_stream_get_crc() - Get CRC values for the given stream.
506 * @stream: The DC stream state of the stream to get CRCs from.
507 * @r_cr: CRC value for the first of the 3 channels stored here.
508 * @g_y: CRC value for the second of the 3 channels stored here.
509 * @b_cb: CRC value for the third of the 3 channels stored here.
511 * dc_stream_configure_crc needs to be called beforehand to enable CRCs.
512 * Return false if stream is not found, or if CRCs are not enabled.
514 bool dc_stream_get_crc(struct dc *dc, struct dc_stream_state *stream,
515 uint32_t *r_cr, uint32_t *g_y, uint32_t *b_cb)
518 struct pipe_ctx *pipe;
519 struct timing_generator *tg;
521 for (i = 0; i < MAX_PIPES; i++) {
522 pipe = &dc->current_state->res_ctx.pipe_ctx[i];
523 if (pipe->stream == stream)
526 /* Stream not found */
530 tg = pipe->stream_res.tg;
532 if (tg->funcs->get_crc)
533 return tg->funcs->get_crc(tg, r_cr, g_y, b_cb);
534 DC_LOG_WARNING("CRC capture not supported.");
538 void dc_stream_set_dyn_expansion(struct dc *dc, struct dc_stream_state *stream,
539 enum dc_dynamic_expansion option)
541 /* OPP FMT dyn expansion updates*/
543 struct pipe_ctx *pipe_ctx;
545 for (i = 0; i < MAX_PIPES; i++) {
546 if (dc->current_state->res_ctx.pipe_ctx[i].stream
548 pipe_ctx = &dc->current_state->res_ctx.pipe_ctx[i];
549 pipe_ctx->stream_res.opp->dyn_expansion = option;
550 pipe_ctx->stream_res.opp->funcs->opp_set_dyn_expansion(
551 pipe_ctx->stream_res.opp,
552 COLOR_SPACE_YCBCR601,
553 stream->timing.display_color_depth,
559 void dc_stream_set_dither_option(struct dc_stream_state *stream,
560 enum dc_dither_option option)
562 struct bit_depth_reduction_params params;
563 struct dc_link *link = stream->link;
564 struct pipe_ctx *pipes = NULL;
567 for (i = 0; i < MAX_PIPES; i++) {
568 if (link->dc->current_state->res_ctx.pipe_ctx[i].stream ==
570 pipes = &link->dc->current_state->res_ctx.pipe_ctx[i];
577 if (option > DITHER_OPTION_MAX)
580 stream->dither_option = option;
582 memset(¶ms, 0, sizeof(params));
583 resource_build_bit_depth_reduction_params(stream, ¶ms);
584 stream->bit_depth_params = params;
586 if (pipes->plane_res.xfm &&
587 pipes->plane_res.xfm->funcs->transform_set_pixel_storage_depth) {
588 pipes->plane_res.xfm->funcs->transform_set_pixel_storage_depth(
589 pipes->plane_res.xfm,
590 pipes->plane_res.scl_data.lb_params.depth,
591 &stream->bit_depth_params);
594 pipes->stream_res.opp->funcs->
595 opp_program_bit_depth_reduction(pipes->stream_res.opp, ¶ms);
598 bool dc_stream_set_gamut_remap(struct dc *dc, const struct dc_stream_state *stream)
602 struct pipe_ctx *pipes;
604 for (i = 0; i < MAX_PIPES; i++) {
605 if (dc->current_state->res_ctx.pipe_ctx[i].stream == stream) {
606 pipes = &dc->current_state->res_ctx.pipe_ctx[i];
607 dc->hwss.program_gamut_remap(pipes);
615 bool dc_stream_program_csc_matrix(struct dc *dc, struct dc_stream_state *stream)
619 struct pipe_ctx *pipes;
621 for (i = 0; i < MAX_PIPES; i++) {
622 if (dc->current_state->res_ctx.pipe_ctx[i].stream
625 pipes = &dc->current_state->res_ctx.pipe_ctx[i];
626 dc->hwss.program_output_csc(dc,
628 stream->output_color_space,
629 stream->csc_color_matrix.matrix,
630 pipes->stream_res.opp->inst);
638 void dc_stream_set_static_screen_params(struct dc *dc,
639 struct dc_stream_state **streams,
641 const struct dc_static_screen_params *params)
645 struct pipe_ctx *pipes_affected[MAX_PIPES];
646 int num_pipes_affected = 0;
648 for (i = 0; i < num_streams; i++) {
649 struct dc_stream_state *stream = streams[i];
651 for (j = 0; j < MAX_PIPES; j++) {
652 if (dc->current_state->res_ctx.pipe_ctx[j].stream
654 pipes_affected[num_pipes_affected++] =
655 &dc->current_state->res_ctx.pipe_ctx[j];
660 dc->hwss.set_static_screen_control(pipes_affected, num_pipes_affected, params);
663 static void dc_destruct(struct dc *dc)
665 if (dc->current_state) {
666 dc_release_state(dc->current_state);
667 dc->current_state = NULL;
673 dc_destroy_clk_mgr(dc->clk_mgr);
677 dc_destroy_resource_pool(dc);
679 if (dc->ctx->gpio_service)
680 dal_gpio_service_destroy(&dc->ctx->gpio_service);
682 if (dc->ctx->created_bios)
683 dal_bios_parser_destroy(&dc->ctx->dc_bios);
685 dc_perf_trace_destroy(&dc->ctx->perf_trace);
696 #ifdef CONFIG_DRM_AMD_DC_DCN
704 kfree(dc->vm_helper);
705 dc->vm_helper = NULL;
709 static bool dc_construct_ctx(struct dc *dc,
710 const struct dc_init_data *init_params)
712 struct dc_context *dc_ctx;
713 enum dce_version dc_version = DCE_VERSION_UNKNOWN;
715 dc_ctx = kzalloc(sizeof(*dc_ctx), GFP_KERNEL);
719 dc_ctx->cgs_device = init_params->cgs_device;
720 dc_ctx->driver_context = init_params->driver;
722 dc_ctx->asic_id = init_params->asic_id;
723 dc_ctx->dc_sink_id_count = 0;
724 dc_ctx->dc_stream_id_count = 0;
725 dc_ctx->dce_environment = init_params->dce_environment;
729 dc_version = resource_parse_asic_id(init_params->asic_id);
730 dc_ctx->dce_version = dc_version;
732 dc_ctx->perf_trace = dc_perf_trace_create();
733 if (!dc_ctx->perf_trace) {
734 ASSERT_CRITICAL(false);
743 static bool dc_construct(struct dc *dc,
744 const struct dc_init_data *init_params)
746 struct dc_context *dc_ctx;
747 struct bw_calcs_dceip *dc_dceip;
748 struct bw_calcs_vbios *dc_vbios;
749 #ifdef CONFIG_DRM_AMD_DC_DCN
750 struct dcn_soc_bounding_box *dcn_soc;
751 struct dcn_ip_params *dcn_ip;
754 dc->config = init_params->flags;
756 // Allocate memory for the vm_helper
757 dc->vm_helper = kzalloc(sizeof(struct vm_helper), GFP_KERNEL);
758 if (!dc->vm_helper) {
759 dm_error("%s: failed to create dc->vm_helper\n", __func__);
763 memcpy(&dc->bb_overrides, &init_params->bb_overrides, sizeof(dc->bb_overrides));
765 dc_dceip = kzalloc(sizeof(*dc_dceip), GFP_KERNEL);
767 dm_error("%s: failed to create dceip\n", __func__);
771 dc->bw_dceip = dc_dceip;
773 dc_vbios = kzalloc(sizeof(*dc_vbios), GFP_KERNEL);
775 dm_error("%s: failed to create vbios\n", __func__);
779 dc->bw_vbios = dc_vbios;
780 #ifdef CONFIG_DRM_AMD_DC_DCN
781 dcn_soc = kzalloc(sizeof(*dcn_soc), GFP_KERNEL);
783 dm_error("%s: failed to create dcn_soc\n", __func__);
787 dc->dcn_soc = dcn_soc;
789 dcn_ip = kzalloc(sizeof(*dcn_ip), GFP_KERNEL);
791 dm_error("%s: failed to create dcn_ip\n", __func__);
798 if (!dc_construct_ctx(dc, init_params)) {
799 dm_error("%s: failed to create ctx\n", __func__);
805 /* Resource should construct all asic specific resources.
806 * This should be the only place where we need to parse the asic id
808 if (init_params->vbios_override)
809 dc_ctx->dc_bios = init_params->vbios_override;
811 /* Create BIOS parser */
812 struct bp_init_data bp_init_data;
814 bp_init_data.ctx = dc_ctx;
815 bp_init_data.bios = init_params->asic_id.atombios_base_address;
817 dc_ctx->dc_bios = dal_bios_parser_create(
818 &bp_init_data, dc_ctx->dce_version);
820 if (!dc_ctx->dc_bios) {
821 ASSERT_CRITICAL(false);
825 dc_ctx->created_bios = true;
828 dc->vendor_signature = init_params->vendor_signature;
830 /* Create GPIO service */
831 dc_ctx->gpio_service = dal_gpio_service_create(
833 dc_ctx->dce_environment,
836 if (!dc_ctx->gpio_service) {
837 ASSERT_CRITICAL(false);
841 dc->res_pool = dc_create_resource_pool(dc, init_params, dc_ctx->dce_version);
845 /* set i2c speed if not done by the respective dcnxxx__resource.c */
846 if (dc->caps.i2c_speed_in_khz_hdcp == 0)
847 dc->caps.i2c_speed_in_khz_hdcp = dc->caps.i2c_speed_in_khz;
849 dc->clk_mgr = dc_clk_mgr_create(dc->ctx, dc->res_pool->pp_smu, dc->res_pool->dccg);
852 #ifdef CONFIG_DRM_AMD_DC_DCN
853 dc->clk_mgr->force_smu_not_present = init_params->force_smu_not_present;
856 if (dc->res_pool->funcs->update_bw_bounding_box)
857 dc->res_pool->funcs->update_bw_bounding_box(dc, dc->clk_mgr->bw_params);
859 /* Creation of current_state must occur after dc->dml
860 * is initialized in dc_create_resource_pool because
861 * on creation it copies the contents of dc->dml
864 dc->current_state = dc_create_state(dc);
866 if (!dc->current_state) {
867 dm_error("%s: failed to create validate ctx\n", __func__);
871 dc_resource_state_construct(dc, dc->current_state);
873 if (!create_links(dc, init_params->num_virtual_links))
876 /* Initialise DIG link encoder resource tracking variables. */
877 link_enc_cfg_init(dc, dc->current_state);
885 static void disable_all_writeback_pipes_for_stream(
887 struct dc_stream_state *stream,
888 struct dc_state *context)
892 for (i = 0; i < stream->num_wb_info; i++)
893 stream->writeback_info[i].wb_enabled = false;
896 static void apply_ctx_interdependent_lock(struct dc *dc, struct dc_state *context,
897 struct dc_stream_state *stream, bool lock)
901 /* Checks if interdependent update function pointer is NULL or not, takes care of DCE110 case */
902 if (dc->hwss.interdependent_update_lock)
903 dc->hwss.interdependent_update_lock(dc, context, lock);
905 for (i = 0; i < dc->res_pool->pipe_count; i++) {
906 struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i];
907 struct pipe_ctx *old_pipe_ctx = &dc->current_state->res_ctx.pipe_ctx[i];
909 // Copied conditions that were previously in dce110_apply_ctx_for_surface
910 if (stream == pipe_ctx->stream) {
911 if (!pipe_ctx->top_pipe &&
912 (pipe_ctx->plane_state || old_pipe_ctx->plane_state))
913 dc->hwss.pipe_control_lock(dc, pipe_ctx, lock);
919 static void disable_dangling_plane(struct dc *dc, struct dc_state *context)
922 struct dc_state *dangling_context = dc_create_state(dc);
923 struct dc_state *current_ctx;
925 if (dangling_context == NULL)
928 dc_resource_state_copy_construct(dc->current_state, dangling_context);
930 for (i = 0; i < dc->res_pool->pipe_count; i++) {
931 struct dc_stream_state *old_stream =
932 dc->current_state->res_ctx.pipe_ctx[i].stream;
933 bool should_disable = true;
935 for (j = 0; j < context->stream_count; j++) {
936 if (old_stream == context->streams[j]) {
937 should_disable = false;
941 if (should_disable && old_stream) {
942 dc_rem_all_planes_for_stream(dc, old_stream, dangling_context);
943 disable_all_writeback_pipes_for_stream(dc, old_stream, dangling_context);
945 if (dc->hwss.apply_ctx_for_surface) {
946 apply_ctx_interdependent_lock(dc, dc->current_state, old_stream, true);
947 dc->hwss.apply_ctx_for_surface(dc, old_stream, 0, dangling_context);
948 apply_ctx_interdependent_lock(dc, dc->current_state, old_stream, false);
949 dc->hwss.post_unlock_program_front_end(dc, dangling_context);
951 if (dc->hwss.program_front_end_for_ctx) {
952 dc->hwss.interdependent_update_lock(dc, dc->current_state, true);
953 dc->hwss.program_front_end_for_ctx(dc, dangling_context);
954 dc->hwss.interdependent_update_lock(dc, dc->current_state, false);
955 dc->hwss.post_unlock_program_front_end(dc, dangling_context);
960 current_ctx = dc->current_state;
961 dc->current_state = dangling_context;
962 dc_release_state(current_ctx);
965 static void disable_vbios_mode_if_required(
967 struct dc_state *context)
971 /* check if timing_changed, disable stream*/
972 for (i = 0; i < dc->res_pool->pipe_count; i++) {
973 struct dc_stream_state *stream = NULL;
974 struct dc_link *link = NULL;
975 struct pipe_ctx *pipe = NULL;
977 pipe = &context->res_ctx.pipe_ctx[i];
978 stream = pipe->stream;
982 // only looking for first odm pipe
983 if (pipe->prev_odm_pipe)
986 if (stream->link->local_sink &&
987 stream->link->local_sink->sink_signal == SIGNAL_TYPE_EDP) {
991 if (link != NULL && link->link_enc->funcs->is_dig_enabled(link->link_enc)) {
992 unsigned int enc_inst, tg_inst = 0;
993 unsigned int pix_clk_100hz;
995 enc_inst = link->link_enc->funcs->get_dig_frontend(link->link_enc);
996 if (enc_inst != ENGINE_ID_UNKNOWN) {
997 for (j = 0; j < dc->res_pool->stream_enc_count; j++) {
998 if (dc->res_pool->stream_enc[j]->id == enc_inst) {
999 tg_inst = dc->res_pool->stream_enc[j]->funcs->dig_source_otg(
1000 dc->res_pool->stream_enc[j]);
1005 dc->res_pool->dp_clock_source->funcs->get_pixel_clk_frequency_100hz(
1006 dc->res_pool->dp_clock_source,
1007 tg_inst, &pix_clk_100hz);
1009 if (link->link_status.link_active) {
1010 uint32_t requested_pix_clk_100hz =
1011 pipe->stream_res.pix_clk_params.requested_pix_clk_100hz;
1013 if (pix_clk_100hz != requested_pix_clk_100hz) {
1014 core_link_disable_stream(pipe);
1015 pipe->stream->dpms_off = false;
1023 static void wait_for_no_pipes_pending(struct dc *dc, struct dc_state *context)
1027 for (i = 0; i < MAX_PIPES; i++) {
1029 struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i];
1031 if (!pipe->plane_state)
1034 /* Timeout 100 ms */
1035 while (count < 100000) {
1036 /* Must set to false to start with, due to OR in update function */
1037 pipe->plane_state->status.is_flip_pending = false;
1038 dc->hwss.update_pending_status(pipe);
1039 if (!pipe->plane_state->status.is_flip_pending)
1044 ASSERT(!pipe->plane_state->status.is_flip_pending);
1049 /*******************************************************************************
1051 ******************************************************************************/
1053 struct dc *dc_create(const struct dc_init_data *init_params)
1055 struct dc *dc = kzalloc(sizeof(*dc), GFP_KERNEL);
1056 unsigned int full_pipe_count;
1061 if (init_params->dce_environment == DCE_ENV_VIRTUAL_HW) {
1062 if (!dc_construct_ctx(dc, init_params))
1065 if (!dc_construct(dc, init_params))
1068 full_pipe_count = dc->res_pool->pipe_count;
1069 if (dc->res_pool->underlay_pipe_index != NO_UNDERLAY_PIPE)
1071 dc->caps.max_streams = min(
1073 dc->res_pool->stream_enc_count);
1075 dc->caps.max_links = dc->link_count;
1076 dc->caps.max_audios = dc->res_pool->audio_count;
1077 dc->caps.linear_pitch_alignment = 64;
1079 dc->caps.max_dp_protocol_version = DP_VERSION_1_4;
1081 if (dc->res_pool->dmcu != NULL)
1082 dc->versions.dmcu_version = dc->res_pool->dmcu->dmcu_version;
1085 /* Populate versioning information */
1086 dc->versions.dc_ver = DC_VER;
1088 dc->build_id = DC_BUILD_ID;
1090 DC_LOG_DC("Display Core initialized\n");
1102 static void detect_edp_presence(struct dc *dc)
1104 struct dc_link *edp_links[MAX_NUM_EDP];
1105 struct dc_link *edp_link = NULL;
1106 enum dc_connection_type type;
1110 get_edp_links(dc, edp_links, &edp_num);
1114 for (i = 0; i < edp_num; i++) {
1115 edp_link = edp_links[i];
1116 if (dc->config.edp_not_connected) {
1117 edp_link->edp_sink_present = false;
1119 dc_link_detect_sink(edp_link, &type);
1120 edp_link->edp_sink_present = (type != dc_connection_none);
1125 void dc_hardware_init(struct dc *dc)
1128 detect_edp_presence(dc);
1129 if (dc->ctx->dce_environment != DCE_ENV_VIRTUAL_HW)
1130 dc->hwss.init_hw(dc);
1133 void dc_init_callbacks(struct dc *dc,
1134 const struct dc_callback_init *init_params)
1136 #ifdef CONFIG_DRM_AMD_DC_HDCP
1137 dc->ctx->cp_psp = init_params->cp_psp;
1141 void dc_deinit_callbacks(struct dc *dc)
1143 #ifdef CONFIG_DRM_AMD_DC_HDCP
1144 memset(&dc->ctx->cp_psp, 0, sizeof(dc->ctx->cp_psp));
1148 void dc_destroy(struct dc **dc)
1155 static void enable_timing_multisync(
1157 struct dc_state *ctx)
1159 int i = 0, multisync_count = 0;
1160 int pipe_count = dc->res_pool->pipe_count;
1161 struct pipe_ctx *multisync_pipes[MAX_PIPES] = { NULL };
1163 for (i = 0; i < pipe_count; i++) {
1164 if (!ctx->res_ctx.pipe_ctx[i].stream ||
1165 !ctx->res_ctx.pipe_ctx[i].stream->triggered_crtc_reset.enabled)
1167 if (ctx->res_ctx.pipe_ctx[i].stream == ctx->res_ctx.pipe_ctx[i].stream->triggered_crtc_reset.event_source)
1169 multisync_pipes[multisync_count] = &ctx->res_ctx.pipe_ctx[i];
1173 if (multisync_count > 0) {
1174 dc->hwss.enable_per_frame_crtc_position_reset(
1175 dc, multisync_count, multisync_pipes);
1179 static void program_timing_sync(
1181 struct dc_state *ctx)
1184 int group_index = 0;
1186 int pipe_count = dc->res_pool->pipe_count;
1187 struct pipe_ctx *unsynced_pipes[MAX_PIPES] = { NULL };
1189 for (i = 0; i < pipe_count; i++) {
1190 if (!ctx->res_ctx.pipe_ctx[i].stream || ctx->res_ctx.pipe_ctx[i].top_pipe)
1193 unsynced_pipes[i] = &ctx->res_ctx.pipe_ctx[i];
1196 for (i = 0; i < pipe_count; i++) {
1198 enum timing_synchronization_type sync_type = NOT_SYNCHRONIZABLE;
1199 struct pipe_ctx *pipe_set[MAX_PIPES];
1201 if (!unsynced_pipes[i])
1204 pipe_set[0] = unsynced_pipes[i];
1205 unsynced_pipes[i] = NULL;
1207 /* Add tg to the set, search rest of the tg's for ones with
1208 * same timing, add all tgs with same timing to the group
1210 for (j = i + 1; j < pipe_count; j++) {
1211 if (!unsynced_pipes[j])
1213 if (sync_type != TIMING_SYNCHRONIZABLE &&
1214 dc->hwss.enable_vblanks_synchronization &&
1215 unsynced_pipes[j]->stream_res.tg->funcs->align_vblanks &&
1216 resource_are_vblanks_synchronizable(
1217 unsynced_pipes[j]->stream,
1218 pipe_set[0]->stream)) {
1219 sync_type = VBLANK_SYNCHRONIZABLE;
1220 pipe_set[group_size] = unsynced_pipes[j];
1221 unsynced_pipes[j] = NULL;
1224 if (sync_type != VBLANK_SYNCHRONIZABLE &&
1225 resource_are_streams_timing_synchronizable(
1226 unsynced_pipes[j]->stream,
1227 pipe_set[0]->stream)) {
1228 sync_type = TIMING_SYNCHRONIZABLE;
1229 pipe_set[group_size] = unsynced_pipes[j];
1230 unsynced_pipes[j] = NULL;
1235 /* set first unblanked pipe as master */
1236 for (j = 0; j < group_size; j++) {
1239 if (pipe_set[j]->stream_res.opp->funcs->dpg_is_blanked)
1241 pipe_set[j]->stream_res.opp->funcs->dpg_is_blanked(pipe_set[j]->stream_res.opp);
1244 pipe_set[j]->stream_res.tg->funcs->is_blanked(pipe_set[j]->stream_res.tg);
1249 swap(pipe_set[0], pipe_set[j]);
1254 for (k = 0; k < group_size; k++) {
1255 struct dc_stream_status *status = dc_stream_get_status_from_state(ctx, pipe_set[k]->stream);
1257 status->timing_sync_info.group_id = num_group;
1258 status->timing_sync_info.group_size = group_size;
1260 status->timing_sync_info.master = true;
1262 status->timing_sync_info.master = false;
1265 /* remove any other unblanked pipes as they have already been synced */
1266 for (j = j + 1; j < group_size; j++) {
1269 if (pipe_set[j]->stream_res.opp->funcs->dpg_is_blanked)
1271 pipe_set[j]->stream_res.opp->funcs->dpg_is_blanked(pipe_set[j]->stream_res.opp);
1274 pipe_set[j]->stream_res.tg->funcs->is_blanked(pipe_set[j]->stream_res.tg);
1277 pipe_set[j] = pipe_set[group_size];
1282 if (group_size > 1) {
1283 if (sync_type == TIMING_SYNCHRONIZABLE) {
1284 dc->hwss.enable_timing_synchronization(
1285 dc, group_index, group_size, pipe_set);
1287 if (sync_type == VBLANK_SYNCHRONIZABLE) {
1288 dc->hwss.enable_vblanks_synchronization(
1289 dc, group_index, group_size, pipe_set);
1297 static bool context_changed(
1299 struct dc_state *context)
1303 if (context->stream_count != dc->current_state->stream_count)
1306 for (i = 0; i < dc->current_state->stream_count; i++) {
1307 if (dc->current_state->streams[i] != context->streams[i])
1314 bool dc_validate_seamless_boot_timing(const struct dc *dc,
1315 const struct dc_sink *sink,
1316 struct dc_crtc_timing *crtc_timing)
1318 struct timing_generator *tg;
1319 struct stream_encoder *se = NULL;
1321 struct dc_crtc_timing hw_crtc_timing = {0};
1323 struct dc_link *link = sink->link;
1324 unsigned int i, enc_inst, tg_inst = 0;
1326 /* Support seamless boot on EDP displays only */
1327 if (sink->sink_signal != SIGNAL_TYPE_EDP) {
1331 /* Check for enabled DIG to identify enabled display */
1332 if (!link->link_enc->funcs->is_dig_enabled(link->link_enc))
1335 enc_inst = link->link_enc->funcs->get_dig_frontend(link->link_enc);
1337 if (enc_inst == ENGINE_ID_UNKNOWN)
1340 for (i = 0; i < dc->res_pool->stream_enc_count; i++) {
1341 if (dc->res_pool->stream_enc[i]->id == enc_inst) {
1343 se = dc->res_pool->stream_enc[i];
1345 tg_inst = dc->res_pool->stream_enc[i]->funcs->dig_source_otg(
1346 dc->res_pool->stream_enc[i]);
1351 // tg_inst not found
1352 if (i == dc->res_pool->stream_enc_count)
1355 if (tg_inst >= dc->res_pool->timing_generator_count)
1358 tg = dc->res_pool->timing_generators[tg_inst];
1360 if (!tg->funcs->get_hw_timing)
1363 if (!tg->funcs->get_hw_timing(tg, &hw_crtc_timing))
1366 if (crtc_timing->h_total != hw_crtc_timing.h_total)
1369 if (crtc_timing->h_border_left != hw_crtc_timing.h_border_left)
1372 if (crtc_timing->h_addressable != hw_crtc_timing.h_addressable)
1375 if (crtc_timing->h_border_right != hw_crtc_timing.h_border_right)
1378 if (crtc_timing->h_front_porch != hw_crtc_timing.h_front_porch)
1381 if (crtc_timing->h_sync_width != hw_crtc_timing.h_sync_width)
1384 if (crtc_timing->v_total != hw_crtc_timing.v_total)
1387 if (crtc_timing->v_border_top != hw_crtc_timing.v_border_top)
1390 if (crtc_timing->v_addressable != hw_crtc_timing.v_addressable)
1393 if (crtc_timing->v_border_bottom != hw_crtc_timing.v_border_bottom)
1396 if (crtc_timing->v_front_porch != hw_crtc_timing.v_front_porch)
1399 if (crtc_timing->v_sync_width != hw_crtc_timing.v_sync_width)
1402 /* block DSC for now, as VBIOS does not currently support DSC timings */
1403 if (crtc_timing->flags.DSC)
1406 if (dc_is_dp_signal(link->connector_signal)) {
1407 unsigned int pix_clk_100hz;
1409 dc->res_pool->dp_clock_source->funcs->get_pixel_clk_frequency_100hz(
1410 dc->res_pool->dp_clock_source,
1411 tg_inst, &pix_clk_100hz);
1413 if (crtc_timing->pix_clk_100hz != pix_clk_100hz)
1416 if (!se->funcs->dp_get_pixel_format)
1419 if (!se->funcs->dp_get_pixel_format(
1421 &hw_crtc_timing.pixel_encoding,
1422 &hw_crtc_timing.display_color_depth))
1425 if (hw_crtc_timing.display_color_depth != crtc_timing->display_color_depth)
1428 if (hw_crtc_timing.pixel_encoding != crtc_timing->pixel_encoding)
1432 if (link->dpcd_caps.dprx_feature.bits.VSC_SDP_COLORIMETRY_SUPPORTED) {
1436 if (is_edp_ilr_optimization_required(link, crtc_timing)) {
1437 DC_LOG_EVENT_LINK_TRAINING("Seamless boot disabled to optimize eDP link rate\n");
1444 void dc_enable_stereo(
1446 struct dc_state *context,
1447 struct dc_stream_state *streams[],
1448 uint8_t stream_count)
1451 struct pipe_ctx *pipe;
1453 for (i = 0; i < MAX_PIPES; i++) {
1454 if (context != NULL)
1455 pipe = &context->res_ctx.pipe_ctx[i];
1457 pipe = &dc->current_state->res_ctx.pipe_ctx[i];
1458 for (j = 0 ; pipe && j < stream_count; j++) {
1459 if (streams[j] && streams[j] == pipe->stream &&
1460 dc->hwss.setup_stereo)
1461 dc->hwss.setup_stereo(pipe, dc);
1466 void dc_trigger_sync(struct dc *dc, struct dc_state *context)
1468 if (context->stream_count > 1 && !dc->debug.disable_timing_sync) {
1469 enable_timing_multisync(dc, context);
1470 program_timing_sync(dc, context);
1474 static uint8_t get_stream_mask(struct dc *dc, struct dc_state *context)
1477 unsigned int stream_mask = 0;
1479 for (i = 0; i < dc->res_pool->pipe_count; i++) {
1480 if (context->res_ctx.pipe_ctx[i].stream)
1481 stream_mask |= 1 << i;
1488 * Applies given context to HW and copy it into current context.
1489 * It's up to the user to release the src context afterwards.
1491 static enum dc_status dc_commit_state_no_check(struct dc *dc, struct dc_state *context)
1493 struct dc_bios *dcb = dc->ctx->dc_bios;
1494 enum dc_status result = DC_ERROR_UNEXPECTED;
1495 struct pipe_ctx *pipe;
1497 struct dc_stream_state *dc_streams[MAX_STREAMS] = {0};
1499 #if defined(CONFIG_DRM_AMD_DC_DCN)
1500 dc_allow_idle_optimizations(dc, false);
1503 for (i = 0; i < context->stream_count; i++)
1504 dc_streams[i] = context->streams[i];
1506 if (!dcb->funcs->is_accelerated_mode(dcb)) {
1507 disable_vbios_mode_if_required(dc, context);
1508 dc->hwss.enable_accelerated_mode(dc, context);
1511 if (context->stream_count > get_seamless_boot_stream_count(context) ||
1512 context->stream_count == 0)
1513 dc->hwss.prepare_bandwidth(dc, context);
1515 disable_dangling_plane(dc, context);
1516 /* re-program planes for existing stream, in case we need to
1517 * free up plane resource for later use
1519 if (dc->hwss.apply_ctx_for_surface) {
1520 for (i = 0; i < context->stream_count; i++) {
1521 if (context->streams[i]->mode_changed)
1523 apply_ctx_interdependent_lock(dc, context, context->streams[i], true);
1524 dc->hwss.apply_ctx_for_surface(
1525 dc, context->streams[i],
1526 context->stream_status[i].plane_count,
1527 context); /* use new pipe config in new context */
1528 apply_ctx_interdependent_lock(dc, context, context->streams[i], false);
1529 dc->hwss.post_unlock_program_front_end(dc, context);
1533 /* Program hardware */
1534 for (i = 0; i < dc->res_pool->pipe_count; i++) {
1535 pipe = &context->res_ctx.pipe_ctx[i];
1536 dc->hwss.wait_for_mpcc_disconnect(dc, dc->res_pool, pipe);
1539 result = dc->hwss.apply_ctx_to_hw(dc, context);
1541 if (result != DC_OK)
1544 dc_trigger_sync(dc, context);
1546 /* Program all planes within new context*/
1547 if (dc->hwss.program_front_end_for_ctx) {
1548 dc->hwss.interdependent_update_lock(dc, context, true);
1549 dc->hwss.program_front_end_for_ctx(dc, context);
1550 dc->hwss.interdependent_update_lock(dc, context, false);
1551 dc->hwss.post_unlock_program_front_end(dc, context);
1553 for (i = 0; i < context->stream_count; i++) {
1554 const struct dc_link *link = context->streams[i]->link;
1556 if (!context->streams[i]->mode_changed)
1559 if (dc->hwss.apply_ctx_for_surface) {
1560 apply_ctx_interdependent_lock(dc, context, context->streams[i], true);
1561 dc->hwss.apply_ctx_for_surface(
1562 dc, context->streams[i],
1563 context->stream_status[i].plane_count,
1565 apply_ctx_interdependent_lock(dc, context, context->streams[i], false);
1566 dc->hwss.post_unlock_program_front_end(dc, context);
1571 * TODO rework dc_enable_stereo call to work with validation sets?
1573 for (k = 0; k < MAX_PIPES; k++) {
1574 pipe = &context->res_ctx.pipe_ctx[k];
1576 for (l = 0 ; pipe && l < context->stream_count; l++) {
1577 if (context->streams[l] &&
1578 context->streams[l] == pipe->stream &&
1579 dc->hwss.setup_stereo)
1580 dc->hwss.setup_stereo(pipe, dc);
1584 CONN_MSG_MODE(link, "{%dx%d, %dx%d@%dKhz}",
1585 context->streams[i]->timing.h_addressable,
1586 context->streams[i]->timing.v_addressable,
1587 context->streams[i]->timing.h_total,
1588 context->streams[i]->timing.v_total,
1589 context->streams[i]->timing.pix_clk_100hz / 10);
1592 dc_enable_stereo(dc, context, dc_streams, context->stream_count);
1594 if (context->stream_count > get_seamless_boot_stream_count(context) ||
1595 context->stream_count == 0) {
1596 /* Must wait for no flips to be pending before doing optimize bw */
1597 wait_for_no_pipes_pending(dc, context);
1598 /* pplib is notified if disp_num changed */
1599 dc->hwss.optimize_bandwidth(dc, context);
1602 if (dc->ctx->dce_version >= DCE_VERSION_MAX)
1603 TRACE_DCN_CLOCK_STATE(&context->bw_ctx.bw.dcn.clk);
1605 TRACE_DCE_CLOCK_STATE(&context->bw_ctx.bw.dce);
1607 context->stream_mask = get_stream_mask(dc, context);
1609 if (context->stream_mask != dc->current_state->stream_mask)
1610 dc_dmub_srv_notify_stream_mask(dc->ctx->dmub_srv, context->stream_mask);
1612 for (i = 0; i < context->stream_count; i++)
1613 context->streams[i]->mode_changed = false;
1615 dc_release_state(dc->current_state);
1617 dc->current_state = context;
1619 dc_retain_state(dc->current_state);
1624 bool dc_commit_state(struct dc *dc, struct dc_state *context)
1626 enum dc_status result = DC_ERROR_UNEXPECTED;
1629 if (!context_changed(dc, context))
1632 DC_LOG_DC("%s: %d streams\n",
1633 __func__, context->stream_count);
1635 for (i = 0; i < context->stream_count; i++) {
1636 struct dc_stream_state *stream = context->streams[i];
1638 dc_stream_log(dc, stream);
1641 result = dc_commit_state_no_check(dc, context);
1643 return (result == DC_OK);
1646 #if defined(CONFIG_DRM_AMD_DC_DCN)
1647 bool dc_acquire_release_mpc_3dlut(
1648 struct dc *dc, bool acquire,
1649 struct dc_stream_state *stream,
1650 struct dc_3dlut **lut,
1651 struct dc_transfer_func **shaper)
1655 bool found_pipe_idx = false;
1656 const struct resource_pool *pool = dc->res_pool;
1657 struct resource_context *res_ctx = &dc->current_state->res_ctx;
1660 if (pool && res_ctx) {
1662 /*find pipe idx for the given stream*/
1663 for (pipe_idx = 0; pipe_idx < pool->pipe_count; pipe_idx++) {
1664 if (res_ctx->pipe_ctx[pipe_idx].stream == stream) {
1665 found_pipe_idx = true;
1666 mpcc_id = res_ctx->pipe_ctx[pipe_idx].plane_res.hubp->inst;
1671 found_pipe_idx = true;/*for release pipe_idx is not required*/
1673 if (found_pipe_idx) {
1674 if (acquire && pool->funcs->acquire_post_bldn_3dlut)
1675 ret = pool->funcs->acquire_post_bldn_3dlut(res_ctx, pool, mpcc_id, lut, shaper);
1676 else if (!acquire && pool->funcs->release_post_bldn_3dlut)
1677 ret = pool->funcs->release_post_bldn_3dlut(res_ctx, pool, lut, shaper);
1683 static bool is_flip_pending_in_pipes(struct dc *dc, struct dc_state *context)
1686 struct pipe_ctx *pipe;
1688 for (i = 0; i < MAX_PIPES; i++) {
1689 pipe = &context->res_ctx.pipe_ctx[i];
1691 if (!pipe->plane_state)
1694 /* Must set to false to start with, due to OR in update function */
1695 pipe->plane_state->status.is_flip_pending = false;
1696 dc->hwss.update_pending_status(pipe);
1697 if (pipe->plane_state->status.is_flip_pending)
1703 void dc_post_update_surfaces_to_stream(struct dc *dc)
1706 struct dc_state *context = dc->current_state;
1708 if ((!dc->optimized_required) || get_seamless_boot_stream_count(context) > 0)
1711 post_surface_trace(dc);
1713 if (is_flip_pending_in_pipes(dc, context))
1716 for (i = 0; i < dc->res_pool->pipe_count; i++)
1717 if (context->res_ctx.pipe_ctx[i].stream == NULL ||
1718 context->res_ctx.pipe_ctx[i].plane_state == NULL) {
1719 context->res_ctx.pipe_ctx[i].pipe_idx = i;
1720 dc->hwss.disable_plane(dc, &context->res_ctx.pipe_ctx[i]);
1723 dc->hwss.optimize_bandwidth(dc, context);
1725 dc->optimized_required = false;
1726 dc->wm_optimized_required = false;
1729 static void init_state(struct dc *dc, struct dc_state *context)
1731 /* Each context must have their own instance of VBA and in order to
1732 * initialize and obtain IP and SOC the base DML instance from DC is
1733 * initially copied into every context
1735 #ifdef CONFIG_DRM_AMD_DC_DCN
1736 memcpy(&context->bw_ctx.dml, &dc->dml, sizeof(struct display_mode_lib));
1740 struct dc_state *dc_create_state(struct dc *dc)
1742 struct dc_state *context = kvzalloc(sizeof(struct dc_state),
1748 init_state(dc, context);
1750 kref_init(&context->refcount);
1755 struct dc_state *dc_copy_state(struct dc_state *src_ctx)
1758 struct dc_state *new_ctx = kvmalloc(sizeof(struct dc_state), GFP_KERNEL);
1762 memcpy(new_ctx, src_ctx, sizeof(struct dc_state));
1764 for (i = 0; i < MAX_PIPES; i++) {
1765 struct pipe_ctx *cur_pipe = &new_ctx->res_ctx.pipe_ctx[i];
1767 if (cur_pipe->top_pipe)
1768 cur_pipe->top_pipe = &new_ctx->res_ctx.pipe_ctx[cur_pipe->top_pipe->pipe_idx];
1770 if (cur_pipe->bottom_pipe)
1771 cur_pipe->bottom_pipe = &new_ctx->res_ctx.pipe_ctx[cur_pipe->bottom_pipe->pipe_idx];
1773 if (cur_pipe->prev_odm_pipe)
1774 cur_pipe->prev_odm_pipe = &new_ctx->res_ctx.pipe_ctx[cur_pipe->prev_odm_pipe->pipe_idx];
1776 if (cur_pipe->next_odm_pipe)
1777 cur_pipe->next_odm_pipe = &new_ctx->res_ctx.pipe_ctx[cur_pipe->next_odm_pipe->pipe_idx];
1781 for (i = 0; i < new_ctx->stream_count; i++) {
1782 dc_stream_retain(new_ctx->streams[i]);
1783 for (j = 0; j < new_ctx->stream_status[i].plane_count; j++)
1784 dc_plane_state_retain(
1785 new_ctx->stream_status[i].plane_states[j]);
1788 kref_init(&new_ctx->refcount);
1793 void dc_retain_state(struct dc_state *context)
1795 kref_get(&context->refcount);
1798 static void dc_state_free(struct kref *kref)
1800 struct dc_state *context = container_of(kref, struct dc_state, refcount);
1801 dc_resource_state_destruct(context);
1805 void dc_release_state(struct dc_state *context)
1807 kref_put(&context->refcount, dc_state_free);
1810 bool dc_set_generic_gpio_for_stereo(bool enable,
1811 struct gpio_service *gpio_service)
1813 enum gpio_result gpio_result = GPIO_RESULT_NON_SPECIFIC_ERROR;
1814 struct gpio_pin_info pin_info;
1815 struct gpio *generic;
1816 struct gpio_generic_mux_config *config = kzalloc(sizeof(struct gpio_generic_mux_config),
1821 pin_info = dal_gpio_get_generic_pin_info(gpio_service, GPIO_ID_GENERIC, 0);
1823 if (pin_info.mask == 0xFFFFFFFF || pin_info.offset == 0xFFFFFFFF) {
1827 generic = dal_gpio_service_create_generic_mux(
1838 gpio_result = dal_gpio_open(generic, GPIO_MODE_OUTPUT);
1840 config->enable_output_from_mux = enable;
1841 config->mux_select = GPIO_SIGNAL_SOURCE_PASS_THROUGH_STEREO_SYNC;
1843 if (gpio_result == GPIO_RESULT_OK)
1844 gpio_result = dal_mux_setup_config(generic, config);
1846 if (gpio_result == GPIO_RESULT_OK) {
1847 dal_gpio_close(generic);
1848 dal_gpio_destroy_generic_mux(&generic);
1852 dal_gpio_close(generic);
1853 dal_gpio_destroy_generic_mux(&generic);
1859 static bool is_surface_in_context(
1860 const struct dc_state *context,
1861 const struct dc_plane_state *plane_state)
1865 for (j = 0; j < MAX_PIPES; j++) {
1866 const struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[j];
1868 if (plane_state == pipe_ctx->plane_state) {
1876 static enum surface_update_type get_plane_info_update_type(const struct dc_surface_update *u)
1878 union surface_update_flags *update_flags = &u->surface->update_flags;
1879 enum surface_update_type update_type = UPDATE_TYPE_FAST;
1882 return UPDATE_TYPE_FAST;
1884 if (u->plane_info->color_space != u->surface->color_space) {
1885 update_flags->bits.color_space_change = 1;
1886 elevate_update_type(&update_type, UPDATE_TYPE_MED);
1889 if (u->plane_info->horizontal_mirror != u->surface->horizontal_mirror) {
1890 update_flags->bits.horizontal_mirror_change = 1;
1891 elevate_update_type(&update_type, UPDATE_TYPE_MED);
1894 if (u->plane_info->rotation != u->surface->rotation) {
1895 update_flags->bits.rotation_change = 1;
1896 elevate_update_type(&update_type, UPDATE_TYPE_FULL);
1899 if (u->plane_info->format != u->surface->format) {
1900 update_flags->bits.pixel_format_change = 1;
1901 elevate_update_type(&update_type, UPDATE_TYPE_FULL);
1904 if (u->plane_info->stereo_format != u->surface->stereo_format) {
1905 update_flags->bits.stereo_format_change = 1;
1906 elevate_update_type(&update_type, UPDATE_TYPE_FULL);
1909 if (u->plane_info->per_pixel_alpha != u->surface->per_pixel_alpha) {
1910 update_flags->bits.per_pixel_alpha_change = 1;
1911 elevate_update_type(&update_type, UPDATE_TYPE_MED);
1914 if (u->plane_info->global_alpha_value != u->surface->global_alpha_value) {
1915 update_flags->bits.global_alpha_change = 1;
1916 elevate_update_type(&update_type, UPDATE_TYPE_MED);
1919 if (u->plane_info->dcc.enable != u->surface->dcc.enable
1920 || u->plane_info->dcc.independent_64b_blks != u->surface->dcc.independent_64b_blks
1921 || u->plane_info->dcc.meta_pitch != u->surface->dcc.meta_pitch) {
1922 update_flags->bits.dcc_change = 1;
1923 elevate_update_type(&update_type, UPDATE_TYPE_MED);
1926 if (resource_pixel_format_to_bpp(u->plane_info->format) !=
1927 resource_pixel_format_to_bpp(u->surface->format)) {
1928 /* different bytes per element will require full bandwidth
1929 * and DML calculation
1931 update_flags->bits.bpp_change = 1;
1932 elevate_update_type(&update_type, UPDATE_TYPE_FULL);
1935 if (u->plane_info->plane_size.surface_pitch != u->surface->plane_size.surface_pitch
1936 || u->plane_info->plane_size.chroma_pitch != u->surface->plane_size.chroma_pitch) {
1937 update_flags->bits.plane_size_change = 1;
1938 elevate_update_type(&update_type, UPDATE_TYPE_MED);
1942 if (memcmp(&u->plane_info->tiling_info, &u->surface->tiling_info,
1943 sizeof(union dc_tiling_info)) != 0) {
1944 update_flags->bits.swizzle_change = 1;
1945 elevate_update_type(&update_type, UPDATE_TYPE_MED);
1947 /* todo: below are HW dependent, we should add a hook to
1948 * DCE/N resource and validated there.
1950 if (u->plane_info->tiling_info.gfx9.swizzle != DC_SW_LINEAR) {
1951 /* swizzled mode requires RQ to be setup properly,
1952 * thus need to run DML to calculate RQ settings
1954 update_flags->bits.bandwidth_change = 1;
1955 elevate_update_type(&update_type, UPDATE_TYPE_FULL);
1959 /* This should be UPDATE_TYPE_FAST if nothing has changed. */
1963 static enum surface_update_type get_scaling_info_update_type(
1964 const struct dc_surface_update *u)
1966 union surface_update_flags *update_flags = &u->surface->update_flags;
1968 if (!u->scaling_info)
1969 return UPDATE_TYPE_FAST;
1971 if (u->scaling_info->clip_rect.width != u->surface->clip_rect.width
1972 || u->scaling_info->clip_rect.height != u->surface->clip_rect.height
1973 || u->scaling_info->dst_rect.width != u->surface->dst_rect.width
1974 || u->scaling_info->dst_rect.height != u->surface->dst_rect.height
1975 || u->scaling_info->scaling_quality.integer_scaling !=
1976 u->surface->scaling_quality.integer_scaling
1978 update_flags->bits.scaling_change = 1;
1980 if ((u->scaling_info->dst_rect.width < u->surface->dst_rect.width
1981 || u->scaling_info->dst_rect.height < u->surface->dst_rect.height)
1982 && (u->scaling_info->dst_rect.width < u->surface->src_rect.width
1983 || u->scaling_info->dst_rect.height < u->surface->src_rect.height))
1984 /* Making dst rect smaller requires a bandwidth change */
1985 update_flags->bits.bandwidth_change = 1;
1988 if (u->scaling_info->src_rect.width != u->surface->src_rect.width
1989 || u->scaling_info->src_rect.height != u->surface->src_rect.height) {
1991 update_flags->bits.scaling_change = 1;
1992 if (u->scaling_info->src_rect.width > u->surface->src_rect.width
1993 || u->scaling_info->src_rect.height > u->surface->src_rect.height)
1994 /* Making src rect bigger requires a bandwidth change */
1995 update_flags->bits.clock_change = 1;
1998 if (u->scaling_info->src_rect.x != u->surface->src_rect.x
1999 || u->scaling_info->src_rect.y != u->surface->src_rect.y
2000 || u->scaling_info->clip_rect.x != u->surface->clip_rect.x
2001 || u->scaling_info->clip_rect.y != u->surface->clip_rect.y
2002 || u->scaling_info->dst_rect.x != u->surface->dst_rect.x
2003 || u->scaling_info->dst_rect.y != u->surface->dst_rect.y)
2004 update_flags->bits.position_change = 1;
2006 if (update_flags->bits.clock_change
2007 || update_flags->bits.bandwidth_change
2008 || update_flags->bits.scaling_change)
2009 return UPDATE_TYPE_FULL;
2011 if (update_flags->bits.position_change)
2012 return UPDATE_TYPE_MED;
2014 return UPDATE_TYPE_FAST;
2017 static enum surface_update_type det_surface_update(const struct dc *dc,
2018 const struct dc_surface_update *u)
2020 const struct dc_state *context = dc->current_state;
2021 enum surface_update_type type;
2022 enum surface_update_type overall_type = UPDATE_TYPE_FAST;
2023 union surface_update_flags *update_flags = &u->surface->update_flags;
2026 update_flags->bits.addr_update = 1;
2028 if (!is_surface_in_context(context, u->surface) || u->surface->force_full_update) {
2029 update_flags->raw = 0xFFFFFFFF;
2030 return UPDATE_TYPE_FULL;
2033 update_flags->raw = 0; // Reset all flags
2035 type = get_plane_info_update_type(u);
2036 elevate_update_type(&overall_type, type);
2038 type = get_scaling_info_update_type(u);
2039 elevate_update_type(&overall_type, type);
2042 update_flags->bits.addr_update = 1;
2044 if (u->in_transfer_func)
2045 update_flags->bits.in_transfer_func_change = 1;
2047 if (u->input_csc_color_matrix)
2048 update_flags->bits.input_csc_change = 1;
2050 if (u->coeff_reduction_factor)
2051 update_flags->bits.coeff_reduction_change = 1;
2053 if (u->gamut_remap_matrix)
2054 update_flags->bits.gamut_remap_change = 1;
2057 enum surface_pixel_format format = SURFACE_PIXEL_FORMAT_GRPH_BEGIN;
2060 format = u->plane_info->format;
2061 else if (u->surface)
2062 format = u->surface->format;
2064 if (dce_use_lut(format))
2065 update_flags->bits.gamma_change = 1;
2068 if (u->hdr_mult.value)
2069 if (u->hdr_mult.value != u->surface->hdr_mult.value) {
2070 update_flags->bits.hdr_mult = 1;
2071 elevate_update_type(&overall_type, UPDATE_TYPE_MED);
2074 if (update_flags->bits.in_transfer_func_change) {
2075 type = UPDATE_TYPE_MED;
2076 elevate_update_type(&overall_type, type);
2079 if (update_flags->bits.input_csc_change
2080 || update_flags->bits.coeff_reduction_change
2081 || update_flags->bits.gamma_change
2082 || update_flags->bits.gamut_remap_change) {
2083 type = UPDATE_TYPE_FULL;
2084 elevate_update_type(&overall_type, type);
2087 return overall_type;
2090 static enum surface_update_type check_update_surfaces_for_stream(
2092 struct dc_surface_update *updates,
2094 struct dc_stream_update *stream_update,
2095 const struct dc_stream_status *stream_status)
2098 enum surface_update_type overall_type = UPDATE_TYPE_FAST;
2100 #if defined(CONFIG_DRM_AMD_DC_DCN)
2101 if (dc->idle_optimizations_allowed)
2102 overall_type = UPDATE_TYPE_FULL;
2105 if (stream_status == NULL || stream_status->plane_count != surface_count)
2106 overall_type = UPDATE_TYPE_FULL;
2108 if (stream_update && stream_update->pending_test_pattern) {
2109 overall_type = UPDATE_TYPE_FULL;
2112 /* some stream updates require passive update */
2113 if (stream_update) {
2114 union stream_update_flags *su_flags = &stream_update->stream->update_flags;
2116 if ((stream_update->src.height != 0 && stream_update->src.width != 0) ||
2117 (stream_update->dst.height != 0 && stream_update->dst.width != 0) ||
2118 stream_update->integer_scaling_update)
2119 su_flags->bits.scaling = 1;
2121 if (stream_update->out_transfer_func)
2122 su_flags->bits.out_tf = 1;
2124 if (stream_update->abm_level)
2125 su_flags->bits.abm_level = 1;
2127 if (stream_update->dpms_off)
2128 su_flags->bits.dpms_off = 1;
2130 if (stream_update->gamut_remap)
2131 su_flags->bits.gamut_remap = 1;
2133 if (stream_update->wb_update)
2134 su_flags->bits.wb_update = 1;
2136 if (stream_update->dsc_config)
2137 su_flags->bits.dsc_changed = 1;
2139 if (su_flags->raw != 0)
2140 overall_type = UPDATE_TYPE_FULL;
2142 if (stream_update->output_csc_transform || stream_update->output_color_space)
2143 su_flags->bits.out_csc = 1;
2146 for (i = 0 ; i < surface_count; i++) {
2147 enum surface_update_type type =
2148 det_surface_update(dc, &updates[i]);
2150 elevate_update_type(&overall_type, type);
2153 return overall_type;
2157 * dc_check_update_surfaces_for_stream() - Determine update type (fast, med, or full)
2159 * See :c:type:`enum surface_update_type <surface_update_type>` for explanation of update types
2161 enum surface_update_type dc_check_update_surfaces_for_stream(
2163 struct dc_surface_update *updates,
2165 struct dc_stream_update *stream_update,
2166 const struct dc_stream_status *stream_status)
2169 enum surface_update_type type;
2172 stream_update->stream->update_flags.raw = 0;
2173 for (i = 0; i < surface_count; i++)
2174 updates[i].surface->update_flags.raw = 0;
2176 type = check_update_surfaces_for_stream(dc, updates, surface_count, stream_update, stream_status);
2177 if (type == UPDATE_TYPE_FULL) {
2178 if (stream_update) {
2179 uint32_t dsc_changed = stream_update->stream->update_flags.bits.dsc_changed;
2180 stream_update->stream->update_flags.raw = 0xFFFFFFFF;
2181 stream_update->stream->update_flags.bits.dsc_changed = dsc_changed;
2183 for (i = 0; i < surface_count; i++)
2184 updates[i].surface->update_flags.raw = 0xFFFFFFFF;
2187 if (type == UPDATE_TYPE_FAST) {
2188 // If there's an available clock comparator, we use that.
2189 if (dc->clk_mgr->funcs->are_clock_states_equal) {
2190 if (!dc->clk_mgr->funcs->are_clock_states_equal(&dc->clk_mgr->clks, &dc->current_state->bw_ctx.bw.dcn.clk))
2191 dc->optimized_required = true;
2192 // Else we fallback to mem compare.
2193 } else if (memcmp(&dc->current_state->bw_ctx.bw.dcn.clk, &dc->clk_mgr->clks, offsetof(struct dc_clocks, prev_p_state_change_support)) != 0) {
2194 dc->optimized_required = true;
2197 dc->optimized_required |= dc->wm_optimized_required;
2203 static struct dc_stream_status *stream_get_status(
2204 struct dc_state *ctx,
2205 struct dc_stream_state *stream)
2209 for (i = 0; i < ctx->stream_count; i++) {
2210 if (stream == ctx->streams[i]) {
2211 return &ctx->stream_status[i];
2218 static const enum surface_update_type update_surface_trace_level = UPDATE_TYPE_FULL;
2220 static void copy_surface_update_to_plane(
2221 struct dc_plane_state *surface,
2222 struct dc_surface_update *srf_update)
2224 if (srf_update->flip_addr) {
2225 surface->address = srf_update->flip_addr->address;
2226 surface->flip_immediate =
2227 srf_update->flip_addr->flip_immediate;
2228 surface->time.time_elapsed_in_us[surface->time.index] =
2229 srf_update->flip_addr->flip_timestamp_in_us -
2230 surface->time.prev_update_time_in_us;
2231 surface->time.prev_update_time_in_us =
2232 srf_update->flip_addr->flip_timestamp_in_us;
2233 surface->time.index++;
2234 if (surface->time.index >= DC_PLANE_UPDATE_TIMES_MAX)
2235 surface->time.index = 0;
2237 surface->triplebuffer_flips = srf_update->flip_addr->triplebuffer_flips;
2240 if (srf_update->scaling_info) {
2241 surface->scaling_quality =
2242 srf_update->scaling_info->scaling_quality;
2244 srf_update->scaling_info->dst_rect;
2246 srf_update->scaling_info->src_rect;
2247 surface->clip_rect =
2248 srf_update->scaling_info->clip_rect;
2251 if (srf_update->plane_info) {
2252 surface->color_space =
2253 srf_update->plane_info->color_space;
2255 srf_update->plane_info->format;
2256 surface->plane_size =
2257 srf_update->plane_info->plane_size;
2259 srf_update->plane_info->rotation;
2260 surface->horizontal_mirror =
2261 srf_update->plane_info->horizontal_mirror;
2262 surface->stereo_format =
2263 srf_update->plane_info->stereo_format;
2264 surface->tiling_info =
2265 srf_update->plane_info->tiling_info;
2267 srf_update->plane_info->visible;
2268 surface->per_pixel_alpha =
2269 srf_update->plane_info->per_pixel_alpha;
2270 surface->global_alpha =
2271 srf_update->plane_info->global_alpha;
2272 surface->global_alpha_value =
2273 srf_update->plane_info->global_alpha_value;
2275 srf_update->plane_info->dcc;
2276 surface->layer_index =
2277 srf_update->plane_info->layer_index;
2280 if (srf_update->gamma &&
2281 (surface->gamma_correction !=
2282 srf_update->gamma)) {
2283 memcpy(&surface->gamma_correction->entries,
2284 &srf_update->gamma->entries,
2285 sizeof(struct dc_gamma_entries));
2286 surface->gamma_correction->is_identity =
2287 srf_update->gamma->is_identity;
2288 surface->gamma_correction->num_entries =
2289 srf_update->gamma->num_entries;
2290 surface->gamma_correction->type =
2291 srf_update->gamma->type;
2294 if (srf_update->in_transfer_func &&
2295 (surface->in_transfer_func !=
2296 srf_update->in_transfer_func)) {
2297 surface->in_transfer_func->sdr_ref_white_level =
2298 srf_update->in_transfer_func->sdr_ref_white_level;
2299 surface->in_transfer_func->tf =
2300 srf_update->in_transfer_func->tf;
2301 surface->in_transfer_func->type =
2302 srf_update->in_transfer_func->type;
2303 memcpy(&surface->in_transfer_func->tf_pts,
2304 &srf_update->in_transfer_func->tf_pts,
2305 sizeof(struct dc_transfer_func_distributed_points));
2308 if (srf_update->func_shaper &&
2309 (surface->in_shaper_func !=
2310 srf_update->func_shaper))
2311 memcpy(surface->in_shaper_func, srf_update->func_shaper,
2312 sizeof(*surface->in_shaper_func));
2314 if (srf_update->lut3d_func &&
2315 (surface->lut3d_func !=
2316 srf_update->lut3d_func))
2317 memcpy(surface->lut3d_func, srf_update->lut3d_func,
2318 sizeof(*surface->lut3d_func));
2320 if (srf_update->hdr_mult.value)
2322 srf_update->hdr_mult;
2324 if (srf_update->blend_tf &&
2325 (surface->blend_tf !=
2326 srf_update->blend_tf))
2327 memcpy(surface->blend_tf, srf_update->blend_tf,
2328 sizeof(*surface->blend_tf));
2330 if (srf_update->input_csc_color_matrix)
2331 surface->input_csc_color_matrix =
2332 *srf_update->input_csc_color_matrix;
2334 if (srf_update->coeff_reduction_factor)
2335 surface->coeff_reduction_factor =
2336 *srf_update->coeff_reduction_factor;
2338 if (srf_update->gamut_remap_matrix)
2339 surface->gamut_remap_matrix =
2340 *srf_update->gamut_remap_matrix;
2343 static void copy_stream_update_to_stream(struct dc *dc,
2344 struct dc_state *context,
2345 struct dc_stream_state *stream,
2346 struct dc_stream_update *update)
2348 struct dc_context *dc_ctx = dc->ctx;
2350 if (update == NULL || stream == NULL)
2353 if (update->src.height && update->src.width)
2354 stream->src = update->src;
2356 if (update->dst.height && update->dst.width)
2357 stream->dst = update->dst;
2359 if (update->out_transfer_func &&
2360 stream->out_transfer_func != update->out_transfer_func) {
2361 stream->out_transfer_func->sdr_ref_white_level =
2362 update->out_transfer_func->sdr_ref_white_level;
2363 stream->out_transfer_func->tf = update->out_transfer_func->tf;
2364 stream->out_transfer_func->type =
2365 update->out_transfer_func->type;
2366 memcpy(&stream->out_transfer_func->tf_pts,
2367 &update->out_transfer_func->tf_pts,
2368 sizeof(struct dc_transfer_func_distributed_points));
2371 if (update->hdr_static_metadata)
2372 stream->hdr_static_metadata = *update->hdr_static_metadata;
2374 if (update->abm_level)
2375 stream->abm_level = *update->abm_level;
2377 if (update->periodic_interrupt0)
2378 stream->periodic_interrupt0 = *update->periodic_interrupt0;
2380 if (update->periodic_interrupt1)
2381 stream->periodic_interrupt1 = *update->periodic_interrupt1;
2383 if (update->gamut_remap)
2384 stream->gamut_remap_matrix = *update->gamut_remap;
2386 /* Note: this being updated after mode set is currently not a use case
2387 * however if it arises OCSC would need to be reprogrammed at the
2390 if (update->output_color_space)
2391 stream->output_color_space = *update->output_color_space;
2393 if (update->output_csc_transform)
2394 stream->csc_color_matrix = *update->output_csc_transform;
2396 if (update->vrr_infopacket)
2397 stream->vrr_infopacket = *update->vrr_infopacket;
2399 if (update->dpms_off)
2400 stream->dpms_off = *update->dpms_off;
2402 if (update->vsc_infopacket)
2403 stream->vsc_infopacket = *update->vsc_infopacket;
2405 if (update->vsp_infopacket)
2406 stream->vsp_infopacket = *update->vsp_infopacket;
2408 if (update->dither_option)
2409 stream->dither_option = *update->dither_option;
2411 if (update->pending_test_pattern)
2412 stream->test_pattern = *update->pending_test_pattern;
2413 /* update current stream with writeback info */
2414 if (update->wb_update) {
2417 stream->num_wb_info = update->wb_update->num_wb_info;
2418 ASSERT(stream->num_wb_info <= MAX_DWB_PIPES);
2419 for (i = 0; i < stream->num_wb_info; i++)
2420 stream->writeback_info[i] =
2421 update->wb_update->writeback_info[i];
2423 if (update->dsc_config) {
2424 struct dc_dsc_config old_dsc_cfg = stream->timing.dsc_cfg;
2425 uint32_t old_dsc_enabled = stream->timing.flags.DSC;
2426 uint32_t enable_dsc = (update->dsc_config->num_slices_h != 0 &&
2427 update->dsc_config->num_slices_v != 0);
2429 /* Use temporarry context for validating new DSC config */
2430 struct dc_state *dsc_validate_context = dc_create_state(dc);
2432 if (dsc_validate_context) {
2433 dc_resource_state_copy_construct(dc->current_state, dsc_validate_context);
2435 stream->timing.dsc_cfg = *update->dsc_config;
2436 stream->timing.flags.DSC = enable_dsc;
2437 if (!dc->res_pool->funcs->validate_bandwidth(dc, dsc_validate_context, true)) {
2438 stream->timing.dsc_cfg = old_dsc_cfg;
2439 stream->timing.flags.DSC = old_dsc_enabled;
2440 update->dsc_config = NULL;
2443 dc_release_state(dsc_validate_context);
2445 DC_ERROR("Failed to allocate new validate context for DSC change\n");
2446 update->dsc_config = NULL;
2451 static void commit_planes_do_stream_update(struct dc *dc,
2452 struct dc_stream_state *stream,
2453 struct dc_stream_update *stream_update,
2454 enum surface_update_type update_type,
2455 struct dc_state *context)
2460 for (j = 0; j < dc->res_pool->pipe_count; j++) {
2461 struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[j];
2463 if (!pipe_ctx->top_pipe && !pipe_ctx->prev_odm_pipe && pipe_ctx->stream == stream) {
2465 if (stream_update->periodic_interrupt0 &&
2466 dc->hwss.setup_periodic_interrupt)
2467 dc->hwss.setup_periodic_interrupt(dc, pipe_ctx, VLINE0);
2469 if (stream_update->periodic_interrupt1 &&
2470 dc->hwss.setup_periodic_interrupt)
2471 dc->hwss.setup_periodic_interrupt(dc, pipe_ctx, VLINE1);
2473 if ((stream_update->hdr_static_metadata && !stream->use_dynamic_meta) ||
2474 stream_update->vrr_infopacket ||
2475 stream_update->vsc_infopacket ||
2476 stream_update->vsp_infopacket) {
2477 resource_build_info_frame(pipe_ctx);
2478 dc->hwss.update_info_frame(pipe_ctx);
2481 if (stream_update->hdr_static_metadata &&
2482 stream->use_dynamic_meta &&
2483 dc->hwss.set_dmdata_attributes &&
2484 pipe_ctx->stream->dmdata_address.quad_part != 0)
2485 dc->hwss.set_dmdata_attributes(pipe_ctx);
2487 if (stream_update->gamut_remap)
2488 dc_stream_set_gamut_remap(dc, stream);
2490 if (stream_update->output_csc_transform)
2491 dc_stream_program_csc_matrix(dc, stream);
2493 if (stream_update->dither_option) {
2494 struct pipe_ctx *odm_pipe = pipe_ctx->next_odm_pipe;
2495 resource_build_bit_depth_reduction_params(pipe_ctx->stream,
2496 &pipe_ctx->stream->bit_depth_params);
2497 pipe_ctx->stream_res.opp->funcs->opp_program_fmt(pipe_ctx->stream_res.opp,
2498 &stream->bit_depth_params,
2501 odm_pipe->stream_res.opp->funcs->opp_program_fmt(odm_pipe->stream_res.opp,
2502 &stream->bit_depth_params,
2504 odm_pipe = odm_pipe->next_odm_pipe;
2510 if (update_type == UPDATE_TYPE_FAST)
2513 if (stream_update->dsc_config)
2514 dp_update_dsc_config(pipe_ctx);
2516 if (stream_update->pending_test_pattern) {
2517 dc_link_dp_set_test_pattern(stream->link,
2518 stream->test_pattern.type,
2519 stream->test_pattern.color_space,
2520 stream->test_pattern.p_link_settings,
2521 stream->test_pattern.p_custom_pattern,
2522 stream->test_pattern.cust_pattern_size);
2525 if (stream_update->dpms_off) {
2526 if (*stream_update->dpms_off) {
2527 core_link_disable_stream(pipe_ctx);
2528 /* for dpms, keep acquired resources*/
2529 if (pipe_ctx->stream_res.audio && !dc->debug.az_endpoint_mute_only)
2530 pipe_ctx->stream_res.audio->funcs->az_disable(pipe_ctx->stream_res.audio);
2532 dc->optimized_required = true;
2535 if (get_seamless_boot_stream_count(context) == 0)
2536 dc->hwss.prepare_bandwidth(dc, dc->current_state);
2538 core_link_enable_stream(dc->current_state, pipe_ctx);
2542 if (stream_update->abm_level && pipe_ctx->stream_res.abm) {
2543 bool should_program_abm = true;
2545 // if otg funcs defined check if blanked before programming
2546 if (pipe_ctx->stream_res.tg->funcs->is_blanked)
2547 if (pipe_ctx->stream_res.tg->funcs->is_blanked(pipe_ctx->stream_res.tg))
2548 should_program_abm = false;
2550 if (should_program_abm) {
2551 if (*stream_update->abm_level == ABM_LEVEL_IMMEDIATE_DISABLE) {
2552 dc->hwss.set_abm_immediate_disable(pipe_ctx);
2554 pipe_ctx->stream_res.abm->funcs->set_abm_level(
2555 pipe_ctx->stream_res.abm, stream->abm_level);
2563 static void commit_planes_for_stream(struct dc *dc,
2564 struct dc_surface_update *srf_updates,
2566 struct dc_stream_state *stream,
2567 struct dc_stream_update *stream_update,
2568 enum surface_update_type update_type,
2569 struct dc_state *context)
2572 struct pipe_ctx *top_pipe_to_program = NULL;
2574 if (get_seamless_boot_stream_count(context) > 0 && surface_count > 0) {
2575 /* Optimize seamless boot flag keeps clocks and watermarks high until
2576 * first flip. After first flip, optimization is required to lower
2577 * bandwidth. Important to note that it is expected UEFI will
2578 * only light up a single display on POST, therefore we only expect
2579 * one stream with seamless boot flag set.
2581 if (stream->apply_seamless_boot_optimization) {
2582 stream->apply_seamless_boot_optimization = false;
2584 if (get_seamless_boot_stream_count(context) == 0)
2585 dc->optimized_required = true;
2589 if (update_type == UPDATE_TYPE_FULL) {
2590 #if defined(CONFIG_DRM_AMD_DC_DCN)
2591 dc_allow_idle_optimizations(dc, false);
2594 if (get_seamless_boot_stream_count(context) == 0)
2595 dc->hwss.prepare_bandwidth(dc, context);
2597 context_clock_trace(dc, context);
2600 for (j = 0; j < dc->res_pool->pipe_count; j++) {
2601 struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[j];
2603 if (!pipe_ctx->top_pipe &&
2604 !pipe_ctx->prev_odm_pipe &&
2606 pipe_ctx->stream == stream) {
2607 top_pipe_to_program = pipe_ctx;
2611 #ifdef CONFIG_DRM_AMD_DC_DCN
2612 if (stream->test_pattern.type != DP_TEST_PATTERN_VIDEO_MODE) {
2613 struct pipe_ctx *mpcc_pipe;
2614 struct pipe_ctx *odm_pipe;
2616 for (mpcc_pipe = top_pipe_to_program; mpcc_pipe; mpcc_pipe = mpcc_pipe->bottom_pipe)
2617 for (odm_pipe = mpcc_pipe; odm_pipe; odm_pipe = odm_pipe->next_odm_pipe)
2618 odm_pipe->ttu_regs.min_ttu_vblank = MAX_TTU;
2622 if ((update_type != UPDATE_TYPE_FAST) && stream->update_flags.bits.dsc_changed)
2623 if (top_pipe_to_program->stream_res.tg->funcs->lock_doublebuffer_enable) {
2624 if (should_use_dmub_lock(stream->link)) {
2625 union dmub_hw_lock_flags hw_locks = { 0 };
2626 struct dmub_hw_lock_inst_flags inst_flags = { 0 };
2628 hw_locks.bits.lock_dig = 1;
2629 inst_flags.dig_inst = top_pipe_to_program->stream_res.tg->inst;
2631 dmub_hw_lock_mgr_cmd(dc->ctx->dmub_srv,
2636 top_pipe_to_program->stream_res.tg->funcs->lock_doublebuffer_enable(
2637 top_pipe_to_program->stream_res.tg);
2640 if ((update_type != UPDATE_TYPE_FAST) && dc->hwss.interdependent_update_lock)
2641 dc->hwss.interdependent_update_lock(dc, context, true);
2643 /* Lock the top pipe while updating plane addrs, since freesync requires
2644 * plane addr update event triggers to be synchronized.
2645 * top_pipe_to_program is expected to never be NULL
2647 dc->hwss.pipe_control_lock(dc, top_pipe_to_program, true);
2651 commit_planes_do_stream_update(dc, stream, stream_update, update_type, context);
2653 if (surface_count == 0) {
2655 * In case of turning off screen, no need to program front end a second time.
2656 * just return after program blank.
2658 if (dc->hwss.apply_ctx_for_surface)
2659 dc->hwss.apply_ctx_for_surface(dc, stream, 0, context);
2660 if (dc->hwss.program_front_end_for_ctx)
2661 dc->hwss.program_front_end_for_ctx(dc, context);
2663 if ((update_type != UPDATE_TYPE_FAST) && dc->hwss.interdependent_update_lock)
2664 dc->hwss.interdependent_update_lock(dc, context, false);
2666 dc->hwss.pipe_control_lock(dc, top_pipe_to_program, false);
2668 dc->hwss.post_unlock_program_front_end(dc, context);
2672 if (!IS_DIAG_DC(dc->ctx->dce_environment)) {
2673 for (i = 0; i < surface_count; i++) {
2674 struct dc_plane_state *plane_state = srf_updates[i].surface;
2675 /*set logical flag for lock/unlock use*/
2676 for (j = 0; j < dc->res_pool->pipe_count; j++) {
2677 struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[j];
2678 if (!pipe_ctx->plane_state)
2680 if (pipe_ctx->plane_state != plane_state)
2682 plane_state->triplebuffer_flips = false;
2683 if (update_type == UPDATE_TYPE_FAST &&
2684 dc->hwss.program_triplebuffer != NULL &&
2685 !plane_state->flip_immediate && dc->debug.enable_tri_buf) {
2686 /*triple buffer for VUpdate only*/
2687 plane_state->triplebuffer_flips = true;
2690 if (update_type == UPDATE_TYPE_FULL) {
2691 /* force vsync flip when reconfiguring pipes to prevent underflow */
2692 plane_state->flip_immediate = false;
2697 // Update Type FULL, Surface updates
2698 for (j = 0; j < dc->res_pool->pipe_count; j++) {
2699 struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[j];
2701 if (!pipe_ctx->top_pipe &&
2702 !pipe_ctx->prev_odm_pipe &&
2704 pipe_ctx->stream == stream) {
2705 struct dc_stream_status *stream_status = NULL;
2707 if (!pipe_ctx->plane_state)
2711 if (update_type == UPDATE_TYPE_FAST)
2714 ASSERT(!pipe_ctx->plane_state->triplebuffer_flips);
2716 if (dc->hwss.program_triplebuffer != NULL && dc->debug.enable_tri_buf) {
2717 /*turn off triple buffer for full update*/
2718 dc->hwss.program_triplebuffer(
2719 dc, pipe_ctx, pipe_ctx->plane_state->triplebuffer_flips);
2722 stream_get_status(context, pipe_ctx->stream);
2724 if (dc->hwss.apply_ctx_for_surface)
2725 dc->hwss.apply_ctx_for_surface(
2726 dc, pipe_ctx->stream, stream_status->plane_count, context);
2729 if (dc->hwss.program_front_end_for_ctx && update_type != UPDATE_TYPE_FAST) {
2730 dc->hwss.program_front_end_for_ctx(dc, context);
2731 #ifdef CONFIG_DRM_AMD_DC_DCN
2732 if (dc->debug.validate_dml_output) {
2733 for (i = 0; i < dc->res_pool->pipe_count; i++) {
2734 struct pipe_ctx cur_pipe = context->res_ctx.pipe_ctx[i];
2735 if (cur_pipe.stream == NULL)
2738 cur_pipe.plane_res.hubp->funcs->validate_dml_output(
2739 cur_pipe.plane_res.hubp, dc->ctx,
2740 &context->res_ctx.pipe_ctx[i].rq_regs,
2741 &context->res_ctx.pipe_ctx[i].dlg_regs,
2742 &context->res_ctx.pipe_ctx[i].ttu_regs);
2748 // Update Type FAST, Surface updates
2749 if (update_type == UPDATE_TYPE_FAST) {
2750 if (dc->hwss.set_flip_control_gsl)
2751 for (i = 0; i < surface_count; i++) {
2752 struct dc_plane_state *plane_state = srf_updates[i].surface;
2754 for (j = 0; j < dc->res_pool->pipe_count; j++) {
2755 struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[j];
2757 if (pipe_ctx->stream != stream)
2760 if (pipe_ctx->plane_state != plane_state)
2763 // GSL has to be used for flip immediate
2764 dc->hwss.set_flip_control_gsl(pipe_ctx,
2765 plane_state->flip_immediate);
2768 /* Perform requested Updates */
2769 for (i = 0; i < surface_count; i++) {
2770 struct dc_plane_state *plane_state = srf_updates[i].surface;
2772 for (j = 0; j < dc->res_pool->pipe_count; j++) {
2773 struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[j];
2775 if (pipe_ctx->stream != stream)
2778 if (pipe_ctx->plane_state != plane_state)
2780 /*program triple buffer after lock based on flip type*/
2781 if (dc->hwss.program_triplebuffer != NULL && dc->debug.enable_tri_buf) {
2782 /*only enable triplebuffer for fast_update*/
2783 dc->hwss.program_triplebuffer(
2784 dc, pipe_ctx, plane_state->triplebuffer_flips);
2786 if (srf_updates[i].flip_addr)
2787 dc->hwss.update_plane_addr(dc, pipe_ctx);
2792 if ((update_type != UPDATE_TYPE_FAST) && dc->hwss.interdependent_update_lock)
2793 dc->hwss.interdependent_update_lock(dc, context, false);
2795 dc->hwss.pipe_control_lock(dc, top_pipe_to_program, false);
2797 if ((update_type != UPDATE_TYPE_FAST) && stream->update_flags.bits.dsc_changed)
2798 if (top_pipe_to_program->stream_res.tg->funcs->lock_doublebuffer_enable) {
2799 top_pipe_to_program->stream_res.tg->funcs->wait_for_state(
2800 top_pipe_to_program->stream_res.tg,
2801 CRTC_STATE_VACTIVE);
2802 top_pipe_to_program->stream_res.tg->funcs->wait_for_state(
2803 top_pipe_to_program->stream_res.tg,
2805 top_pipe_to_program->stream_res.tg->funcs->wait_for_state(
2806 top_pipe_to_program->stream_res.tg,
2807 CRTC_STATE_VACTIVE);
2809 if (stream && should_use_dmub_lock(stream->link)) {
2810 union dmub_hw_lock_flags hw_locks = { 0 };
2811 struct dmub_hw_lock_inst_flags inst_flags = { 0 };
2813 hw_locks.bits.lock_dig = 1;
2814 inst_flags.dig_inst = top_pipe_to_program->stream_res.tg->inst;
2816 dmub_hw_lock_mgr_cmd(dc->ctx->dmub_srv,
2821 top_pipe_to_program->stream_res.tg->funcs->lock_doublebuffer_disable(
2822 top_pipe_to_program->stream_res.tg);
2825 if (update_type != UPDATE_TYPE_FAST)
2826 dc->hwss.post_unlock_program_front_end(dc, context);
2828 // Fire manual trigger only when bottom plane is flipped
2829 for (j = 0; j < dc->res_pool->pipe_count; j++) {
2830 struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[j];
2832 if (!pipe_ctx->plane_state)
2835 if (pipe_ctx->bottom_pipe || pipe_ctx->next_odm_pipe ||
2836 !pipe_ctx->stream || pipe_ctx->stream != stream ||
2837 !pipe_ctx->plane_state->update_flags.bits.addr_update ||
2838 pipe_ctx->plane_state->skip_manual_trigger)
2841 if (pipe_ctx->stream_res.tg->funcs->program_manual_trigger)
2842 pipe_ctx->stream_res.tg->funcs->program_manual_trigger(pipe_ctx->stream_res.tg);
2846 void dc_commit_updates_for_stream(struct dc *dc,
2847 struct dc_surface_update *srf_updates,
2849 struct dc_stream_state *stream,
2850 struct dc_stream_update *stream_update,
2851 struct dc_state *state)
2853 const struct dc_stream_status *stream_status;
2854 enum surface_update_type update_type;
2855 struct dc_state *context;
2856 struct dc_context *dc_ctx = dc->ctx;
2859 stream_status = dc_stream_get_status(stream);
2860 context = dc->current_state;
2862 update_type = dc_check_update_surfaces_for_stream(
2863 dc, srf_updates, surface_count, stream_update, stream_status);
2865 if (update_type >= update_surface_trace_level)
2866 update_surface_trace(dc, srf_updates, surface_count);
2869 if (update_type >= UPDATE_TYPE_FULL) {
2871 /* initialize scratch memory for building context */
2872 context = dc_create_state(dc);
2873 if (context == NULL) {
2874 DC_ERROR("Failed to allocate new validate context!\n");
2878 dc_resource_state_copy_construct(state, context);
2880 for (i = 0; i < dc->res_pool->pipe_count; i++) {
2881 struct pipe_ctx *new_pipe = &context->res_ctx.pipe_ctx[i];
2882 struct pipe_ctx *old_pipe = &dc->current_state->res_ctx.pipe_ctx[i];
2884 if (new_pipe->plane_state && new_pipe->plane_state != old_pipe->plane_state)
2885 new_pipe->plane_state->force_full_update = true;
2890 for (i = 0; i < surface_count; i++) {
2891 struct dc_plane_state *surface = srf_updates[i].surface;
2893 copy_surface_update_to_plane(surface, &srf_updates[i]);
2895 if (update_type >= UPDATE_TYPE_MED) {
2896 for (j = 0; j < dc->res_pool->pipe_count; j++) {
2897 struct pipe_ctx *pipe_ctx =
2898 &context->res_ctx.pipe_ctx[j];
2900 if (pipe_ctx->plane_state != surface)
2903 resource_build_scaling_params(pipe_ctx);
2908 copy_stream_update_to_stream(dc, context, stream, stream_update);
2910 if (update_type >= UPDATE_TYPE_FULL) {
2911 if (!dc->res_pool->funcs->validate_bandwidth(dc, context, false)) {
2912 DC_ERROR("Mode validation failed for stream update!\n");
2913 dc_release_state(context);
2918 TRACE_DC_PIPE_STATE(pipe_ctx, i, MAX_PIPES);
2920 commit_planes_for_stream(
2928 /*update current_State*/
2929 if (dc->current_state != context) {
2931 struct dc_state *old = dc->current_state;
2933 dc->current_state = context;
2934 dc_release_state(old);
2936 for (i = 0; i < dc->res_pool->pipe_count; i++) {
2937 struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i];
2939 if (pipe_ctx->plane_state && pipe_ctx->stream == stream)
2940 pipe_ctx->plane_state->force_full_update = false;
2943 /*let's use current_state to update watermark etc*/
2944 if (update_type >= UPDATE_TYPE_FULL) {
2945 dc_post_update_surfaces_to_stream(dc);
2947 if (dc_ctx->dce_version >= DCE_VERSION_MAX)
2948 TRACE_DCN_CLOCK_STATE(&context->bw_ctx.bw.dcn.clk);
2950 TRACE_DCE_CLOCK_STATE(&context->bw_ctx.bw.dce);
2957 uint8_t dc_get_current_stream_count(struct dc *dc)
2959 return dc->current_state->stream_count;
2962 struct dc_stream_state *dc_get_stream_at_index(struct dc *dc, uint8_t i)
2964 if (i < dc->current_state->stream_count)
2965 return dc->current_state->streams[i];
2969 struct dc_stream_state *dc_stream_find_from_link(const struct dc_link *link)
2972 struct dc_context *ctx = link->ctx;
2974 for (i = 0; i < ctx->dc->current_state->stream_count; i++) {
2975 if (ctx->dc->current_state->streams[i]->link == link)
2976 return ctx->dc->current_state->streams[i];
2982 enum dc_irq_source dc_interrupt_to_irq_source(
2987 return dal_irq_service_to_irq_source(dc->res_pool->irqs, src_id, ext_id);
2991 * dc_interrupt_set() - Enable/disable an AMD hw interrupt source
2993 bool dc_interrupt_set(struct dc *dc, enum dc_irq_source src, bool enable)
2999 return dal_irq_service_set(dc->res_pool->irqs, src, enable);
3002 void dc_interrupt_ack(struct dc *dc, enum dc_irq_source src)
3004 dal_irq_service_ack(dc->res_pool->irqs, src);
3007 void dc_power_down_on_boot(struct dc *dc)
3009 if (dc->ctx->dce_environment != DCE_ENV_VIRTUAL_HW &&
3010 dc->hwss.power_down_on_boot)
3011 dc->hwss.power_down_on_boot(dc);
3014 void dc_set_power_state(
3016 enum dc_acpi_cm_power_state power_state)
3018 struct kref refcount;
3019 struct display_mode_lib *dml;
3021 if (!dc->current_state)
3024 switch (power_state) {
3025 case DC_ACPI_CM_POWER_STATE_D0:
3026 dc_resource_state_construct(dc, dc->current_state);
3028 if (dc->ctx->dmub_srv)
3029 dc_dmub_srv_wait_phy_init(dc->ctx->dmub_srv);
3031 dc->hwss.init_hw(dc);
3033 if (dc->hwss.init_sys_ctx != NULL &&
3034 dc->vm_pa_config.valid) {
3035 dc->hwss.init_sys_ctx(dc->hwseq, dc, &dc->vm_pa_config);
3040 ASSERT(dc->current_state->stream_count == 0);
3041 /* Zero out the current context so that on resume we start with
3042 * clean state, and dc hw programming optimizations will not
3043 * cause any trouble.
3045 dml = kzalloc(sizeof(struct display_mode_lib),
3052 /* Preserve refcount */
3053 refcount = dc->current_state->refcount;
3054 /* Preserve display mode lib */
3055 memcpy(dml, &dc->current_state->bw_ctx.dml, sizeof(struct display_mode_lib));
3057 dc_resource_state_destruct(dc->current_state);
3058 memset(dc->current_state, 0,
3059 sizeof(*dc->current_state));
3061 dc->current_state->refcount = refcount;
3062 dc->current_state->bw_ctx.dml = *dml;
3070 void dc_resume(struct dc *dc)
3074 for (i = 0; i < dc->link_count; i++)
3075 core_link_resume(dc->links[i]);
3078 bool dc_is_dmcu_initialized(struct dc *dc)
3080 struct dmcu *dmcu = dc->res_pool->dmcu;
3083 return dmcu->funcs->is_dmcu_initialized(dmcu);
3089 uint32_t link_index,
3090 struct i2c_command *cmd)
3093 struct dc_link *link = dc->links[link_index];
3094 struct ddc_service *ddc = link->ddc;
3095 return dce_i2c_submit_command(
3101 bool dc_submit_i2c_oem(
3103 struct i2c_command *cmd)
3105 struct ddc_service *ddc = dc->res_pool->oem_device;
3106 return dce_i2c_submit_command(
3112 static bool link_add_remote_sink_helper(struct dc_link *dc_link, struct dc_sink *sink)
3114 if (dc_link->sink_count >= MAX_SINKS_PER_LINK) {
3115 BREAK_TO_DEBUGGER();
3119 dc_sink_retain(sink);
3121 dc_link->remote_sinks[dc_link->sink_count] = sink;
3122 dc_link->sink_count++;
3128 * dc_link_add_remote_sink() - Create a sink and attach it to an existing link
3130 * EDID length is in bytes
3132 struct dc_sink *dc_link_add_remote_sink(
3133 struct dc_link *link,
3134 const uint8_t *edid,
3136 struct dc_sink_init_data *init_data)
3138 struct dc_sink *dc_sink;
3139 enum dc_edid_status edid_status;
3141 if (len > DC_MAX_EDID_BUFFER_SIZE) {
3142 dm_error("Max EDID buffer size breached!\n");
3147 BREAK_TO_DEBUGGER();
3151 if (!init_data->link) {
3152 BREAK_TO_DEBUGGER();
3156 dc_sink = dc_sink_create(init_data);
3161 memmove(dc_sink->dc_edid.raw_edid, edid, len);
3162 dc_sink->dc_edid.length = len;
3164 if (!link_add_remote_sink_helper(
3169 edid_status = dm_helpers_parse_edid_caps(
3172 &dc_sink->edid_caps);
3175 * Treat device as no EDID device if EDID
3178 if (edid_status != EDID_OK) {
3179 dc_sink->dc_edid.length = 0;
3180 dm_error("Bad EDID, status%d!\n", edid_status);
3186 dc_sink_release(dc_sink);
3191 * dc_link_remove_remote_sink() - Remove a remote sink from a dc_link
3193 * Note that this just removes the struct dc_sink - it doesn't
3194 * program hardware or alter other members of dc_link
3196 void dc_link_remove_remote_sink(struct dc_link *link, struct dc_sink *sink)
3200 if (!link->sink_count) {
3201 BREAK_TO_DEBUGGER();
3205 for (i = 0; i < link->sink_count; i++) {
3206 if (link->remote_sinks[i] == sink) {
3207 dc_sink_release(sink);
3208 link->remote_sinks[i] = NULL;
3210 /* shrink array to remove empty place */
3211 while (i < link->sink_count - 1) {
3212 link->remote_sinks[i] = link->remote_sinks[i+1];
3215 link->remote_sinks[i] = NULL;
3222 void dc_wait_for_vblank(struct dc *dc, struct dc_stream_state *stream)
3226 for (i = 0; i < dc->res_pool->pipe_count; i++)
3227 if (dc->current_state->res_ctx.pipe_ctx[i].stream == stream) {
3228 struct timing_generator *tg =
3229 dc->current_state->res_ctx.pipe_ctx[i].stream_res.tg;
3230 tg->funcs->wait_for_state(tg, CRTC_STATE_VBLANK);
3235 void get_clock_requirements_for_state(struct dc_state *state, struct AsicStateEx *info)
3237 info->displayClock = (unsigned int)state->bw_ctx.bw.dcn.clk.dispclk_khz;
3238 info->engineClock = (unsigned int)state->bw_ctx.bw.dcn.clk.dcfclk_khz;
3239 info->memoryClock = (unsigned int)state->bw_ctx.bw.dcn.clk.dramclk_khz;
3240 info->maxSupportedDppClock = (unsigned int)state->bw_ctx.bw.dcn.clk.max_supported_dppclk_khz;
3241 info->dppClock = (unsigned int)state->bw_ctx.bw.dcn.clk.dppclk_khz;
3242 info->socClock = (unsigned int)state->bw_ctx.bw.dcn.clk.socclk_khz;
3243 info->dcfClockDeepSleep = (unsigned int)state->bw_ctx.bw.dcn.clk.dcfclk_deep_sleep_khz;
3244 info->fClock = (unsigned int)state->bw_ctx.bw.dcn.clk.fclk_khz;
3245 info->phyClock = (unsigned int)state->bw_ctx.bw.dcn.clk.phyclk_khz;
3247 enum dc_status dc_set_clock(struct dc *dc, enum dc_clock_type clock_type, uint32_t clk_khz, uint32_t stepping)
3249 if (dc->hwss.set_clock)
3250 return dc->hwss.set_clock(dc, clock_type, clk_khz, stepping);
3251 return DC_ERROR_UNEXPECTED;
3253 void dc_get_clock(struct dc *dc, enum dc_clock_type clock_type, struct dc_clock_config *clock_cfg)
3255 if (dc->hwss.get_clock)
3256 dc->hwss.get_clock(dc, clock_type, clock_cfg);
3259 /* enable/disable eDP PSR without specify stream for eDP */
3260 bool dc_set_psr_allow_active(struct dc *dc, bool enable)
3264 for (i = 0; i < dc->current_state->stream_count ; i++) {
3265 struct dc_link *link;
3266 struct dc_stream_state *stream = dc->current_state->streams[i];
3268 link = stream->link;
3272 if (link->psr_settings.psr_feature_enabled) {
3273 if (enable && !link->psr_settings.psr_allow_active)
3274 return dc_link_set_psr_allow_active(link, true, false, false);
3275 else if (!enable && link->psr_settings.psr_allow_active)
3276 return dc_link_set_psr_allow_active(link, false, true, false);
3283 #if defined(CONFIG_DRM_AMD_DC_DCN)
3285 void dc_allow_idle_optimizations(struct dc *dc, bool allow)
3287 if (dc->debug.disable_idle_power_optimizations)
3290 if (dc->clk_mgr->funcs->is_smu_present)
3291 if (!dc->clk_mgr->funcs->is_smu_present(dc->clk_mgr))
3294 if (allow == dc->idle_optimizations_allowed)
3297 if (dc->hwss.apply_idle_power_optimizations && dc->hwss.apply_idle_power_optimizations(dc, allow))
3298 dc->idle_optimizations_allowed = allow;
3302 * blank all streams, and set min and max memory clock to
3303 * lowest and highest DPM level, respectively
3305 void dc_unlock_memory_clock_frequency(struct dc *dc)
3309 for (i = 0; i < MAX_PIPES; i++)
3310 if (dc->current_state->res_ctx.pipe_ctx[i].plane_state)
3311 core_link_disable_stream(&dc->current_state->res_ctx.pipe_ctx[i]);
3313 dc->clk_mgr->funcs->set_hard_min_memclk(dc->clk_mgr, false);
3314 dc->clk_mgr->funcs->set_hard_max_memclk(dc->clk_mgr);
3318 * set min memory clock to the min required for current mode,
3319 * max to maxDPM, and unblank streams
3321 void dc_lock_memory_clock_frequency(struct dc *dc)
3325 dc->clk_mgr->funcs->get_memclk_states_from_smu(dc->clk_mgr);
3326 dc->clk_mgr->funcs->set_hard_min_memclk(dc->clk_mgr, true);
3327 dc->clk_mgr->funcs->set_hard_max_memclk(dc->clk_mgr);
3329 for (i = 0; i < MAX_PIPES; i++)
3330 if (dc->current_state->res_ctx.pipe_ctx[i].plane_state)
3331 core_link_enable_stream(dc->current_state, &dc->current_state->res_ctx.pipe_ctx[i]);
3334 bool dc_is_plane_eligible_for_idle_optimizations(struct dc *dc, struct dc_plane_state *plane,
3335 struct dc_cursor_attributes *cursor_attr)
3337 if (dc->hwss.does_plane_fit_in_mall && dc->hwss.does_plane_fit_in_mall(dc, plane, cursor_attr))
3342 /* cleanup on driver unload */
3343 void dc_hardware_release(struct dc *dc)
3345 if (dc->hwss.hardware_release)
3346 dc->hwss.hardware_release(dc);
3351 *****************************************************************************
3352 * Function: dc_enable_dmub_notifications
3355 * Returns whether dmub notification can be enabled
3358 * [in] dc: dc structure
3361 * True to enable dmub notifications, False otherwise
3362 *****************************************************************************
3364 bool dc_enable_dmub_notifications(struct dc *dc)
3366 /* dmub aux needs dmub notifications to be enabled */
3367 return dc->debug.enable_dmub_aux_for_legacy_ddc;
3371 *****************************************************************************
3372 * Function: dc_process_dmub_aux_transfer_async
3375 * Submits aux command to dmub via inbox message
3376 * Sets port index appropriately for legacy DDC
3379 * [in] dc: dc structure
3380 * [in] link_index: link index
3381 * [in] payload: aux payload
3384 * True if successful, False if failure
3385 *****************************************************************************
3387 bool dc_process_dmub_aux_transfer_async(struct dc *dc,
3388 uint32_t link_index,
3389 struct aux_payload *payload)
3392 union dmub_rb_cmd cmd = {0};
3393 struct dc_dmub_srv *dmub_srv = dc->ctx->dmub_srv;
3395 ASSERT(payload->length <= 16);
3397 cmd.dp_aux_access.header.type = DMUB_CMD__DP_AUX_ACCESS;
3398 cmd.dp_aux_access.header.payload_bytes = 0;
3399 cmd.dp_aux_access.aux_control.type = AUX_CHANNEL_LEGACY_DDC;
3400 cmd.dp_aux_access.aux_control.instance = dc->links[link_index]->ddc_hw_inst;
3401 cmd.dp_aux_access.aux_control.sw_crc_enabled = 0;
3402 cmd.dp_aux_access.aux_control.timeout = 0;
3403 cmd.dp_aux_access.aux_control.dpaux.address = payload->address;
3404 cmd.dp_aux_access.aux_control.dpaux.is_i2c_over_aux = payload->i2c_over_aux;
3405 cmd.dp_aux_access.aux_control.dpaux.length = payload->length;
3407 /* set aux action */
3408 if (payload->i2c_over_aux) {
3409 if (payload->write) {
3411 action = DP_AUX_REQ_ACTION_I2C_WRITE_MOT;
3413 action = DP_AUX_REQ_ACTION_I2C_WRITE;
3416 action = DP_AUX_REQ_ACTION_I2C_READ_MOT;
3418 action = DP_AUX_REQ_ACTION_I2C_READ;
3422 action = DP_AUX_REQ_ACTION_DPCD_WRITE;
3424 action = DP_AUX_REQ_ACTION_DPCD_READ;
3427 cmd.dp_aux_access.aux_control.dpaux.action = action;
3429 if (payload->length && payload->write) {
3430 memcpy(cmd.dp_aux_access.aux_control.dpaux.data,
3436 dc_dmub_srv_cmd_queue(dmub_srv, &cmd);
3437 dc_dmub_srv_cmd_execute(dmub_srv);
3438 dc_dmub_srv_wait_idle(dmub_srv);
3444 *****************************************************************************
3445 * Function: dc_disable_accelerated_mode
3448 * disable accelerated mode
3451 * [in] dc: dc structure
3453 *****************************************************************************
3455 void dc_disable_accelerated_mode(struct dc *dc)
3457 bios_set_scratch_acc_mode_change(dc->ctx->dc_bios, 0);