]> Git Repo - linux.git/blob - drivers/gpu/drm/amd/display/dc/core/dc.c
Merge tag 'mips_6.9' of git://git.kernel.org/pub/scm/linux/kernel/git/mips/linux
[linux.git] / drivers / gpu / drm / amd / display / dc / core / dc.c
1 /*
2  * Copyright 2015 Advanced Micro Devices, Inc.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20  * OTHER DEALINGS IN THE SOFTWARE.
21  *
22  * Authors: AMD
23  */
24
25 #include "dm_services.h"
26
27 #include "amdgpu.h"
28
29 #include "dc.h"
30
31 #include "core_status.h"
32 #include "core_types.h"
33 #include "hw_sequencer.h"
34 #include "dce/dce_hwseq.h"
35
36 #include "resource.h"
37 #include "dc_state.h"
38 #include "dc_state_priv.h"
39
40 #include "gpio_service_interface.h"
41 #include "clk_mgr.h"
42 #include "clock_source.h"
43 #include "dc_bios_types.h"
44
45 #include "bios_parser_interface.h"
46 #include "bios/bios_parser_helper.h"
47 #include "include/irq_service_interface.h"
48 #include "transform.h"
49 #include "dmcu.h"
50 #include "dpp.h"
51 #include "timing_generator.h"
52 #include "abm.h"
53 #include "virtual/virtual_link_encoder.h"
54 #include "hubp.h"
55
56 #include "link_hwss.h"
57 #include "link_encoder.h"
58 #include "link_enc_cfg.h"
59
60 #include "link.h"
61 #include "dm_helpers.h"
62 #include "mem_input.h"
63
64 #include "dc_dmub_srv.h"
65
66 #include "dsc.h"
67
68 #include "vm_helper.h"
69
70 #include "dce/dce_i2c.h"
71
72 #include "dmub/dmub_srv.h"
73
74 #include "dce/dmub_psr.h"
75
76 #include "dce/dmub_hw_lock_mgr.h"
77
78 #include "dc_trace.h"
79
80 #include "hw_sequencer_private.h"
81
82 #include "dml2/dml2_internal_types.h"
83
84 #include "dce/dmub_outbox.h"
85
86 #define CTX \
87         dc->ctx
88
89 #define DC_LOGGER \
90         dc->ctx->logger
91
92 static const char DC_BUILD_ID[] = "production-build";
93
94 /**
95  * DOC: Overview
96  *
97  * DC is the OS-agnostic component of the amdgpu DC driver.
98  *
99  * DC maintains and validates a set of structs representing the state of the
100  * driver and writes that state to AMD hardware
101  *
102  * Main DC HW structs:
103  *
104  * struct dc - The central struct.  One per driver.  Created on driver load,
105  * destroyed on driver unload.
106  *
107  * struct dc_context - One per driver.
108  * Used as a backpointer by most other structs in dc.
109  *
110  * struct dc_link - One per connector (the physical DP, HDMI, miniDP, or eDP
111  * plugpoints).  Created on driver load, destroyed on driver unload.
112  *
113  * struct dc_sink - One per display.  Created on boot or hotplug.
114  * Destroyed on shutdown or hotunplug.  A dc_link can have a local sink
115  * (the display directly attached).  It may also have one or more remote
116  * sinks (in the Multi-Stream Transport case)
117  *
118  * struct resource_pool - One per driver.  Represents the hw blocks not in the
119  * main pipeline.  Not directly accessible by dm.
120  *
121  * Main dc state structs:
122  *
123  * These structs can be created and destroyed as needed.  There is a full set of
124  * these structs in dc->current_state representing the currently programmed state.
125  *
126  * struct dc_state - The global DC state to track global state information,
127  * such as bandwidth values.
128  *
129  * struct dc_stream_state - Represents the hw configuration for the pipeline from
130  * a framebuffer to a display.  Maps one-to-one with dc_sink.
131  *
132  * struct dc_plane_state - Represents a framebuffer.  Each stream has at least one,
133  * and may have more in the Multi-Plane Overlay case.
134  *
135  * struct resource_context - Represents the programmable state of everything in
136  * the resource_pool.  Not directly accessible by dm.
137  *
138  * struct pipe_ctx - A member of struct resource_context.  Represents the
139  * internal hardware pipeline components.  Each dc_plane_state has either
140  * one or two (in the pipe-split case).
141  */
142
143 /* Private functions */
144
145 static inline void elevate_update_type(enum surface_update_type *original, enum surface_update_type new)
146 {
147         if (new > *original)
148                 *original = new;
149 }
150
151 static void destroy_links(struct dc *dc)
152 {
153         uint32_t i;
154
155         for (i = 0; i < dc->link_count; i++) {
156                 if (NULL != dc->links[i])
157                         dc->link_srv->destroy_link(&dc->links[i]);
158         }
159 }
160
161 static uint32_t get_num_of_internal_disp(struct dc_link **links, uint32_t num_links)
162 {
163         int i;
164         uint32_t count = 0;
165
166         for (i = 0; i < num_links; i++) {
167                 if (links[i]->connector_signal == SIGNAL_TYPE_EDP ||
168                                 links[i]->is_internal_display)
169                         count++;
170         }
171
172         return count;
173 }
174
175 static int get_seamless_boot_stream_count(struct dc_state *ctx)
176 {
177         uint8_t i;
178         uint8_t seamless_boot_stream_count = 0;
179
180         for (i = 0; i < ctx->stream_count; i++)
181                 if (ctx->streams[i]->apply_seamless_boot_optimization)
182                         seamless_boot_stream_count++;
183
184         return seamless_boot_stream_count;
185 }
186
187 static bool create_links(
188                 struct dc *dc,
189                 uint32_t num_virtual_links)
190 {
191         int i;
192         int connectors_num;
193         struct dc_bios *bios = dc->ctx->dc_bios;
194
195         dc->link_count = 0;
196
197         connectors_num = bios->funcs->get_connectors_number(bios);
198
199         DC_LOG_DC("BIOS object table - number of connectors: %d", connectors_num);
200
201         if (connectors_num > ENUM_ID_COUNT) {
202                 dm_error(
203                         "DC: Number of connectors %d exceeds maximum of %d!\n",
204                         connectors_num,
205                         ENUM_ID_COUNT);
206                 return false;
207         }
208
209         dm_output_to_console(
210                 "DC: %s: connectors_num: physical:%d, virtual:%d\n",
211                 __func__,
212                 connectors_num,
213                 num_virtual_links);
214
215         for (i = 0; i < connectors_num; i++) {
216                 struct link_init_data link_init_params = {0};
217                 struct dc_link *link;
218
219                 DC_LOG_DC("BIOS object table - printing link object info for connector number: %d, link_index: %d", i, dc->link_count);
220
221                 link_init_params.ctx = dc->ctx;
222                 /* next BIOS object table connector */
223                 link_init_params.connector_index = i;
224                 link_init_params.link_index = dc->link_count;
225                 link_init_params.dc = dc;
226                 link = dc->link_srv->create_link(&link_init_params);
227
228                 if (link) {
229                         dc->links[dc->link_count] = link;
230                         link->dc = dc;
231                         ++dc->link_count;
232                 }
233         }
234
235         DC_LOG_DC("BIOS object table - end");
236
237         /* Create a link for each usb4 dpia port */
238         for (i = 0; i < dc->res_pool->usb4_dpia_count; i++) {
239                 struct link_init_data link_init_params = {0};
240                 struct dc_link *link;
241
242                 link_init_params.ctx = dc->ctx;
243                 link_init_params.connector_index = i;
244                 link_init_params.link_index = dc->link_count;
245                 link_init_params.dc = dc;
246                 link_init_params.is_dpia_link = true;
247
248                 link = dc->link_srv->create_link(&link_init_params);
249                 if (link) {
250                         dc->links[dc->link_count] = link;
251                         link->dc = dc;
252                         ++dc->link_count;
253                 }
254         }
255
256         for (i = 0; i < num_virtual_links; i++) {
257                 struct dc_link *link = kzalloc(sizeof(*link), GFP_KERNEL);
258                 struct encoder_init_data enc_init = {0};
259
260                 if (link == NULL) {
261                         BREAK_TO_DEBUGGER();
262                         goto failed_alloc;
263                 }
264
265                 link->link_index = dc->link_count;
266                 dc->links[dc->link_count] = link;
267                 dc->link_count++;
268
269                 link->ctx = dc->ctx;
270                 link->dc = dc;
271                 link->connector_signal = SIGNAL_TYPE_VIRTUAL;
272                 link->link_id.type = OBJECT_TYPE_CONNECTOR;
273                 link->link_id.id = CONNECTOR_ID_VIRTUAL;
274                 link->link_id.enum_id = ENUM_ID_1;
275                 link->link_enc = kzalloc(sizeof(*link->link_enc), GFP_KERNEL);
276
277                 if (!link->link_enc) {
278                         BREAK_TO_DEBUGGER();
279                         goto failed_alloc;
280                 }
281
282                 link->link_status.dpcd_caps = &link->dpcd_caps;
283
284                 enc_init.ctx = dc->ctx;
285                 enc_init.channel = CHANNEL_ID_UNKNOWN;
286                 enc_init.hpd_source = HPD_SOURCEID_UNKNOWN;
287                 enc_init.transmitter = TRANSMITTER_UNKNOWN;
288                 enc_init.connector = link->link_id;
289                 enc_init.encoder.type = OBJECT_TYPE_ENCODER;
290                 enc_init.encoder.id = ENCODER_ID_INTERNAL_VIRTUAL;
291                 enc_init.encoder.enum_id = ENUM_ID_1;
292                 virtual_link_encoder_construct(link->link_enc, &enc_init);
293         }
294
295         dc->caps.num_of_internal_disp = get_num_of_internal_disp(dc->links, dc->link_count);
296
297         return true;
298
299 failed_alloc:
300         return false;
301 }
302
303 /* Create additional DIG link encoder objects if fewer than the platform
304  * supports were created during link construction. This can happen if the
305  * number of physical connectors is less than the number of DIGs.
306  */
307 static bool create_link_encoders(struct dc *dc)
308 {
309         bool res = true;
310         unsigned int num_usb4_dpia = dc->res_pool->res_cap->num_usb4_dpia;
311         unsigned int num_dig_link_enc = dc->res_pool->res_cap->num_dig_link_enc;
312         int i;
313
314         /* A platform without USB4 DPIA endpoints has a fixed mapping between DIG
315          * link encoders and physical display endpoints and does not require
316          * additional link encoder objects.
317          */
318         if (num_usb4_dpia == 0)
319                 return res;
320
321         /* Create as many link encoder objects as the platform supports. DPIA
322          * endpoints can be programmably mapped to any DIG.
323          */
324         if (num_dig_link_enc > dc->res_pool->dig_link_enc_count) {
325                 for (i = 0; i < num_dig_link_enc; i++) {
326                         struct link_encoder *link_enc = dc->res_pool->link_encoders[i];
327
328                         if (!link_enc && dc->res_pool->funcs->link_enc_create_minimal) {
329                                 link_enc = dc->res_pool->funcs->link_enc_create_minimal(dc->ctx,
330                                                 (enum engine_id)(ENGINE_ID_DIGA + i));
331                                 if (link_enc) {
332                                         dc->res_pool->link_encoders[i] = link_enc;
333                                         dc->res_pool->dig_link_enc_count++;
334                                 } else {
335                                         res = false;
336                                 }
337                         }
338                 }
339         }
340
341         return res;
342 }
343
344 /* Destroy any additional DIG link encoder objects created by
345  * create_link_encoders().
346  * NB: Must only be called after destroy_links().
347  */
348 static void destroy_link_encoders(struct dc *dc)
349 {
350         unsigned int num_usb4_dpia;
351         unsigned int num_dig_link_enc;
352         int i;
353
354         if (!dc->res_pool)
355                 return;
356
357         num_usb4_dpia = dc->res_pool->res_cap->num_usb4_dpia;
358         num_dig_link_enc = dc->res_pool->res_cap->num_dig_link_enc;
359
360         /* A platform without USB4 DPIA endpoints has a fixed mapping between DIG
361          * link encoders and physical display endpoints and does not require
362          * additional link encoder objects.
363          */
364         if (num_usb4_dpia == 0)
365                 return;
366
367         for (i = 0; i < num_dig_link_enc; i++) {
368                 struct link_encoder *link_enc = dc->res_pool->link_encoders[i];
369
370                 if (link_enc) {
371                         link_enc->funcs->destroy(&link_enc);
372                         dc->res_pool->link_encoders[i] = NULL;
373                         dc->res_pool->dig_link_enc_count--;
374                 }
375         }
376 }
377
378 static struct dc_perf_trace *dc_perf_trace_create(void)
379 {
380         return kzalloc(sizeof(struct dc_perf_trace), GFP_KERNEL);
381 }
382
383 static void dc_perf_trace_destroy(struct dc_perf_trace **perf_trace)
384 {
385         kfree(*perf_trace);
386         *perf_trace = NULL;
387 }
388
389 /**
390  *  dc_stream_adjust_vmin_vmax - look up pipe context & update parts of DRR
391  *  @dc:     dc reference
392  *  @stream: Initial dc stream state
393  *  @adjust: Updated parameters for vertical_total_min and vertical_total_max
394  *
395  *  Looks up the pipe context of dc_stream_state and updates the
396  *  vertical_total_min and vertical_total_max of the DRR, Dynamic Refresh
397  *  Rate, which is a power-saving feature that targets reducing panel
398  *  refresh rate while the screen is static
399  *
400  *  Return: %true if the pipe context is found and adjusted;
401  *          %false if the pipe context is not found.
402  */
403 bool dc_stream_adjust_vmin_vmax(struct dc *dc,
404                 struct dc_stream_state *stream,
405                 struct dc_crtc_timing_adjust *adjust)
406 {
407         int i;
408
409         /*
410          * Don't adjust DRR while there's bandwidth optimizations pending to
411          * avoid conflicting with firmware updates.
412          */
413         if (dc->ctx->dce_version > DCE_VERSION_MAX)
414                 if (dc->optimized_required || dc->wm_optimized_required)
415                         return false;
416
417         dc_exit_ips_for_hw_access(dc);
418
419         stream->adjust.v_total_max = adjust->v_total_max;
420         stream->adjust.v_total_mid = adjust->v_total_mid;
421         stream->adjust.v_total_mid_frame_num = adjust->v_total_mid_frame_num;
422         stream->adjust.v_total_min = adjust->v_total_min;
423
424         for (i = 0; i < MAX_PIPES; i++) {
425                 struct pipe_ctx *pipe = &dc->current_state->res_ctx.pipe_ctx[i];
426
427                 if (pipe->stream == stream && pipe->stream_res.tg) {
428                         dc->hwss.set_drr(&pipe,
429                                         1,
430                                         *adjust);
431
432                         return true;
433                 }
434         }
435         return false;
436 }
437
438 /**
439  * dc_stream_get_last_used_drr_vtotal - Looks up the pipe context of
440  * dc_stream_state and gets the last VTOTAL used by DRR (Dynamic Refresh Rate)
441  *
442  * @dc: [in] dc reference
443  * @stream: [in] Initial dc stream state
444  * @refresh_rate: [in] new refresh_rate
445  *
446  * Return: %true if the pipe context is found and there is an associated
447  *         timing_generator for the DC;
448  *         %false if the pipe context is not found or there is no
449  *         timing_generator for the DC.
450  */
451 bool dc_stream_get_last_used_drr_vtotal(struct dc *dc,
452                 struct dc_stream_state *stream,
453                 uint32_t *refresh_rate)
454 {
455         bool status = false;
456
457         int i = 0;
458
459         dc_exit_ips_for_hw_access(dc);
460
461         for (i = 0; i < MAX_PIPES; i++) {
462                 struct pipe_ctx *pipe = &dc->current_state->res_ctx.pipe_ctx[i];
463
464                 if (pipe->stream == stream && pipe->stream_res.tg) {
465                         /* Only execute if a function pointer has been defined for
466                          * the DC version in question
467                          */
468                         if (pipe->stream_res.tg->funcs->get_last_used_drr_vtotal) {
469                                 pipe->stream_res.tg->funcs->get_last_used_drr_vtotal(pipe->stream_res.tg, refresh_rate);
470
471                                 status = true;
472
473                                 break;
474                         }
475                 }
476         }
477
478         return status;
479 }
480
481 bool dc_stream_get_crtc_position(struct dc *dc,
482                 struct dc_stream_state **streams, int num_streams,
483                 unsigned int *v_pos, unsigned int *nom_v_pos)
484 {
485         /* TODO: Support multiple streams */
486         const struct dc_stream_state *stream = streams[0];
487         int i;
488         bool ret = false;
489         struct crtc_position position;
490
491         dc_exit_ips_for_hw_access(dc);
492
493         for (i = 0; i < MAX_PIPES; i++) {
494                 struct pipe_ctx *pipe =
495                                 &dc->current_state->res_ctx.pipe_ctx[i];
496
497                 if (pipe->stream == stream && pipe->stream_res.stream_enc) {
498                         dc->hwss.get_position(&pipe, 1, &position);
499
500                         *v_pos = position.vertical_count;
501                         *nom_v_pos = position.nominal_vcount;
502                         ret = true;
503                 }
504         }
505         return ret;
506 }
507
508 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
509 static inline void
510 dc_stream_forward_dmub_crc_window(struct dc_dmub_srv *dmub_srv,
511                 struct rect *rect, struct otg_phy_mux *mux_mapping, bool is_stop)
512 {
513         union dmub_rb_cmd cmd = {0};
514
515         cmd.secure_display.roi_info.phy_id = mux_mapping->phy_output_num;
516         cmd.secure_display.roi_info.otg_id = mux_mapping->otg_output_num;
517
518         if (is_stop) {
519                 cmd.secure_display.header.type = DMUB_CMD__SECURE_DISPLAY;
520                 cmd.secure_display.header.sub_type = DMUB_CMD__SECURE_DISPLAY_CRC_STOP_UPDATE;
521         } else {
522                 cmd.secure_display.header.type = DMUB_CMD__SECURE_DISPLAY;
523                 cmd.secure_display.header.sub_type = DMUB_CMD__SECURE_DISPLAY_CRC_WIN_NOTIFY;
524                 cmd.secure_display.roi_info.x_start = rect->x;
525                 cmd.secure_display.roi_info.y_start = rect->y;
526                 cmd.secure_display.roi_info.x_end = rect->x + rect->width;
527                 cmd.secure_display.roi_info.y_end = rect->y + rect->height;
528         }
529
530         dc_wake_and_execute_dmub_cmd(dmub_srv->ctx, &cmd, DM_DMUB_WAIT_TYPE_NO_WAIT);
531 }
532
533 static inline void
534 dc_stream_forward_dmcu_crc_window(struct dmcu *dmcu,
535                 struct rect *rect, struct otg_phy_mux *mux_mapping, bool is_stop)
536 {
537         if (is_stop)
538                 dmcu->funcs->stop_crc_win_update(dmcu, mux_mapping);
539         else
540                 dmcu->funcs->forward_crc_window(dmcu, rect, mux_mapping);
541 }
542
543 bool
544 dc_stream_forward_crc_window(struct dc_stream_state *stream,
545                 struct rect *rect, bool is_stop)
546 {
547         struct dmcu *dmcu;
548         struct dc_dmub_srv *dmub_srv;
549         struct otg_phy_mux mux_mapping;
550         struct pipe_ctx *pipe;
551         int i;
552         struct dc *dc = stream->ctx->dc;
553
554         for (i = 0; i < MAX_PIPES; i++) {
555                 pipe = &dc->current_state->res_ctx.pipe_ctx[i];
556                 if (pipe->stream == stream && !pipe->top_pipe && !pipe->prev_odm_pipe)
557                         break;
558         }
559
560         /* Stream not found */
561         if (i == MAX_PIPES)
562                 return false;
563
564         mux_mapping.phy_output_num = stream->link->link_enc_hw_inst;
565         mux_mapping.otg_output_num = pipe->stream_res.tg->inst;
566
567         dmcu = dc->res_pool->dmcu;
568         dmub_srv = dc->ctx->dmub_srv;
569
570         /* forward to dmub */
571         if (dmub_srv)
572                 dc_stream_forward_dmub_crc_window(dmub_srv, rect, &mux_mapping, is_stop);
573         /* forward to dmcu */
574         else if (dmcu && dmcu->funcs->is_dmcu_initialized(dmcu))
575                 dc_stream_forward_dmcu_crc_window(dmcu, rect, &mux_mapping, is_stop);
576         else
577                 return false;
578
579         return true;
580 }
581 #endif /* CONFIG_DRM_AMD_SECURE_DISPLAY */
582
583 /**
584  * dc_stream_configure_crc() - Configure CRC capture for the given stream.
585  * @dc: DC Object
586  * @stream: The stream to configure CRC on.
587  * @enable: Enable CRC if true, disable otherwise.
588  * @crc_window: CRC window (x/y start/end) information
589  * @continuous: Capture CRC on every frame if true. Otherwise, only capture
590  *              once.
591  *
592  * By default, only CRC0 is configured, and the entire frame is used to
593  * calculate the CRC.
594  *
595  * Return: %false if the stream is not found or CRC capture is not supported;
596  *         %true if the stream has been configured.
597  */
598 bool dc_stream_configure_crc(struct dc *dc, struct dc_stream_state *stream,
599                              struct crc_params *crc_window, bool enable, bool continuous)
600 {
601         struct pipe_ctx *pipe;
602         struct crc_params param;
603         struct timing_generator *tg;
604
605         pipe = resource_get_otg_master_for_stream(
606                         &dc->current_state->res_ctx, stream);
607
608         /* Stream not found */
609         if (pipe == NULL)
610                 return false;
611
612         dc_exit_ips_for_hw_access(dc);
613
614         /* By default, capture the full frame */
615         param.windowa_x_start = 0;
616         param.windowa_y_start = 0;
617         param.windowa_x_end = pipe->stream->timing.h_addressable;
618         param.windowa_y_end = pipe->stream->timing.v_addressable;
619         param.windowb_x_start = 0;
620         param.windowb_y_start = 0;
621         param.windowb_x_end = pipe->stream->timing.h_addressable;
622         param.windowb_y_end = pipe->stream->timing.v_addressable;
623
624         if (crc_window) {
625                 param.windowa_x_start = crc_window->windowa_x_start;
626                 param.windowa_y_start = crc_window->windowa_y_start;
627                 param.windowa_x_end = crc_window->windowa_x_end;
628                 param.windowa_y_end = crc_window->windowa_y_end;
629                 param.windowb_x_start = crc_window->windowb_x_start;
630                 param.windowb_y_start = crc_window->windowb_y_start;
631                 param.windowb_x_end = crc_window->windowb_x_end;
632                 param.windowb_y_end = crc_window->windowb_y_end;
633         }
634
635         param.dsc_mode = pipe->stream->timing.flags.DSC ? 1:0;
636         param.odm_mode = pipe->next_odm_pipe ? 1:0;
637
638         /* Default to the union of both windows */
639         param.selection = UNION_WINDOW_A_B;
640         param.continuous_mode = continuous;
641         param.enable = enable;
642
643         tg = pipe->stream_res.tg;
644
645         /* Only call if supported */
646         if (tg->funcs->configure_crc)
647                 return tg->funcs->configure_crc(tg, &param);
648         DC_LOG_WARNING("CRC capture not supported.");
649         return false;
650 }
651
652 /**
653  * dc_stream_get_crc() - Get CRC values for the given stream.
654  *
655  * @dc: DC object.
656  * @stream: The DC stream state of the stream to get CRCs from.
657  * @r_cr: CRC value for the red component.
658  * @g_y:  CRC value for the green component.
659  * @b_cb: CRC value for the blue component.
660  *
661  * dc_stream_configure_crc needs to be called beforehand to enable CRCs.
662  *
663  * Return:
664  * %false if stream is not found, or if CRCs are not enabled.
665  */
666 bool dc_stream_get_crc(struct dc *dc, struct dc_stream_state *stream,
667                        uint32_t *r_cr, uint32_t *g_y, uint32_t *b_cb)
668 {
669         int i;
670         struct pipe_ctx *pipe;
671         struct timing_generator *tg;
672
673         dc_exit_ips_for_hw_access(dc);
674
675         for (i = 0; i < MAX_PIPES; i++) {
676                 pipe = &dc->current_state->res_ctx.pipe_ctx[i];
677                 if (pipe->stream == stream)
678                         break;
679         }
680         /* Stream not found */
681         if (i == MAX_PIPES)
682                 return false;
683
684         tg = pipe->stream_res.tg;
685
686         if (tg->funcs->get_crc)
687                 return tg->funcs->get_crc(tg, r_cr, g_y, b_cb);
688         DC_LOG_WARNING("CRC capture not supported.");
689         return false;
690 }
691
692 void dc_stream_set_dyn_expansion(struct dc *dc, struct dc_stream_state *stream,
693                 enum dc_dynamic_expansion option)
694 {
695         /* OPP FMT dyn expansion updates*/
696         int i;
697         struct pipe_ctx *pipe_ctx;
698
699         dc_exit_ips_for_hw_access(dc);
700
701         for (i = 0; i < MAX_PIPES; i++) {
702                 if (dc->current_state->res_ctx.pipe_ctx[i].stream
703                                 == stream) {
704                         pipe_ctx = &dc->current_state->res_ctx.pipe_ctx[i];
705                         pipe_ctx->stream_res.opp->dyn_expansion = option;
706                         pipe_ctx->stream_res.opp->funcs->opp_set_dyn_expansion(
707                                         pipe_ctx->stream_res.opp,
708                                         COLOR_SPACE_YCBCR601,
709                                         stream->timing.display_color_depth,
710                                         stream->signal);
711                 }
712         }
713 }
714
715 void dc_stream_set_dither_option(struct dc_stream_state *stream,
716                 enum dc_dither_option option)
717 {
718         struct bit_depth_reduction_params params;
719         struct dc_link *link = stream->link;
720         struct pipe_ctx *pipes = NULL;
721         int i;
722
723         for (i = 0; i < MAX_PIPES; i++) {
724                 if (link->dc->current_state->res_ctx.pipe_ctx[i].stream ==
725                                 stream) {
726                         pipes = &link->dc->current_state->res_ctx.pipe_ctx[i];
727                         break;
728                 }
729         }
730
731         if (!pipes)
732                 return;
733         if (option > DITHER_OPTION_MAX)
734                 return;
735
736         dc_exit_ips_for_hw_access(stream->ctx->dc);
737
738         stream->dither_option = option;
739
740         memset(&params, 0, sizeof(params));
741         resource_build_bit_depth_reduction_params(stream, &params);
742         stream->bit_depth_params = params;
743
744         if (pipes->plane_res.xfm &&
745             pipes->plane_res.xfm->funcs->transform_set_pixel_storage_depth) {
746                 pipes->plane_res.xfm->funcs->transform_set_pixel_storage_depth(
747                         pipes->plane_res.xfm,
748                         pipes->plane_res.scl_data.lb_params.depth,
749                         &stream->bit_depth_params);
750         }
751
752         pipes->stream_res.opp->funcs->
753                 opp_program_bit_depth_reduction(pipes->stream_res.opp, &params);
754 }
755
756 bool dc_stream_set_gamut_remap(struct dc *dc, const struct dc_stream_state *stream)
757 {
758         int i;
759         bool ret = false;
760         struct pipe_ctx *pipes;
761
762         dc_exit_ips_for_hw_access(dc);
763
764         for (i = 0; i < MAX_PIPES; i++) {
765                 if (dc->current_state->res_ctx.pipe_ctx[i].stream == stream) {
766                         pipes = &dc->current_state->res_ctx.pipe_ctx[i];
767                         dc->hwss.program_gamut_remap(pipes);
768                         ret = true;
769                 }
770         }
771
772         return ret;
773 }
774
775 bool dc_stream_program_csc_matrix(struct dc *dc, struct dc_stream_state *stream)
776 {
777         int i;
778         bool ret = false;
779         struct pipe_ctx *pipes;
780
781         dc_exit_ips_for_hw_access(dc);
782
783         for (i = 0; i < MAX_PIPES; i++) {
784                 if (dc->current_state->res_ctx.pipe_ctx[i].stream
785                                 == stream) {
786
787                         pipes = &dc->current_state->res_ctx.pipe_ctx[i];
788                         dc->hwss.program_output_csc(dc,
789                                         pipes,
790                                         stream->output_color_space,
791                                         stream->csc_color_matrix.matrix,
792                                         pipes->stream_res.opp->inst);
793                         ret = true;
794                 }
795         }
796
797         return ret;
798 }
799
800 void dc_stream_set_static_screen_params(struct dc *dc,
801                 struct dc_stream_state **streams,
802                 int num_streams,
803                 const struct dc_static_screen_params *params)
804 {
805         int i, j;
806         struct pipe_ctx *pipes_affected[MAX_PIPES];
807         int num_pipes_affected = 0;
808
809         dc_exit_ips_for_hw_access(dc);
810
811         for (i = 0; i < num_streams; i++) {
812                 struct dc_stream_state *stream = streams[i];
813
814                 for (j = 0; j < MAX_PIPES; j++) {
815                         if (dc->current_state->res_ctx.pipe_ctx[j].stream
816                                         == stream) {
817                                 pipes_affected[num_pipes_affected++] =
818                                                 &dc->current_state->res_ctx.pipe_ctx[j];
819                         }
820                 }
821         }
822
823         dc->hwss.set_static_screen_control(pipes_affected, num_pipes_affected, params);
824 }
825
826 static void dc_destruct(struct dc *dc)
827 {
828         // reset link encoder assignment table on destruct
829         if (dc->res_pool && dc->res_pool->funcs->link_encs_assign)
830                 link_enc_cfg_init(dc, dc->current_state);
831
832         if (dc->current_state) {
833                 dc_state_release(dc->current_state);
834                 dc->current_state = NULL;
835         }
836
837         destroy_links(dc);
838
839         destroy_link_encoders(dc);
840
841         if (dc->clk_mgr) {
842                 dc_destroy_clk_mgr(dc->clk_mgr);
843                 dc->clk_mgr = NULL;
844         }
845
846         dc_destroy_resource_pool(dc);
847
848         if (dc->link_srv)
849                 link_destroy_link_service(&dc->link_srv);
850
851         if (dc->ctx->gpio_service)
852                 dal_gpio_service_destroy(&dc->ctx->gpio_service);
853
854         if (dc->ctx->created_bios)
855                 dal_bios_parser_destroy(&dc->ctx->dc_bios);
856
857         kfree(dc->ctx->logger);
858         dc_perf_trace_destroy(&dc->ctx->perf_trace);
859
860         kfree(dc->ctx);
861         dc->ctx = NULL;
862
863         kfree(dc->bw_vbios);
864         dc->bw_vbios = NULL;
865
866         kfree(dc->bw_dceip);
867         dc->bw_dceip = NULL;
868
869         kfree(dc->dcn_soc);
870         dc->dcn_soc = NULL;
871
872         kfree(dc->dcn_ip);
873         dc->dcn_ip = NULL;
874
875         kfree(dc->vm_helper);
876         dc->vm_helper = NULL;
877
878 }
879
880 static bool dc_construct_ctx(struct dc *dc,
881                 const struct dc_init_data *init_params)
882 {
883         struct dc_context *dc_ctx;
884
885         dc_ctx = kzalloc(sizeof(*dc_ctx), GFP_KERNEL);
886         if (!dc_ctx)
887                 return false;
888
889         dc_ctx->cgs_device = init_params->cgs_device;
890         dc_ctx->driver_context = init_params->driver;
891         dc_ctx->dc = dc;
892         dc_ctx->asic_id = init_params->asic_id;
893         dc_ctx->dc_sink_id_count = 0;
894         dc_ctx->dc_stream_id_count = 0;
895         dc_ctx->dce_environment = init_params->dce_environment;
896         dc_ctx->dcn_reg_offsets = init_params->dcn_reg_offsets;
897         dc_ctx->nbio_reg_offsets = init_params->nbio_reg_offsets;
898         dc_ctx->clk_reg_offsets = init_params->clk_reg_offsets;
899
900         /* Create logger */
901         dc_ctx->logger = kmalloc(sizeof(*dc_ctx->logger), GFP_KERNEL);
902
903         if (!dc_ctx->logger) {
904                 kfree(dc_ctx);
905                 return false;
906         }
907
908         dc_ctx->logger->dev = adev_to_drm(init_params->driver);
909         dc->dml.logger = dc_ctx->logger;
910
911         dc_ctx->dce_version = resource_parse_asic_id(init_params->asic_id);
912
913         dc_ctx->perf_trace = dc_perf_trace_create();
914         if (!dc_ctx->perf_trace) {
915                 kfree(dc_ctx);
916                 ASSERT_CRITICAL(false);
917                 return false;
918         }
919
920         dc->ctx = dc_ctx;
921
922         dc->link_srv = link_create_link_service();
923         if (!dc->link_srv)
924                 return false;
925
926         return true;
927 }
928
929 static bool dc_construct(struct dc *dc,
930                 const struct dc_init_data *init_params)
931 {
932         struct dc_context *dc_ctx;
933         struct bw_calcs_dceip *dc_dceip;
934         struct bw_calcs_vbios *dc_vbios;
935         struct dcn_soc_bounding_box *dcn_soc;
936         struct dcn_ip_params *dcn_ip;
937
938         dc->config = init_params->flags;
939
940         // Allocate memory for the vm_helper
941         dc->vm_helper = kzalloc(sizeof(struct vm_helper), GFP_KERNEL);
942         if (!dc->vm_helper) {
943                 dm_error("%s: failed to create dc->vm_helper\n", __func__);
944                 goto fail;
945         }
946
947         memcpy(&dc->bb_overrides, &init_params->bb_overrides, sizeof(dc->bb_overrides));
948
949         dc_dceip = kzalloc(sizeof(*dc_dceip), GFP_KERNEL);
950         if (!dc_dceip) {
951                 dm_error("%s: failed to create dceip\n", __func__);
952                 goto fail;
953         }
954
955         dc->bw_dceip = dc_dceip;
956
957         dc_vbios = kzalloc(sizeof(*dc_vbios), GFP_KERNEL);
958         if (!dc_vbios) {
959                 dm_error("%s: failed to create vbios\n", __func__);
960                 goto fail;
961         }
962
963         dc->bw_vbios = dc_vbios;
964         dcn_soc = kzalloc(sizeof(*dcn_soc), GFP_KERNEL);
965         if (!dcn_soc) {
966                 dm_error("%s: failed to create dcn_soc\n", __func__);
967                 goto fail;
968         }
969
970         dc->dcn_soc = dcn_soc;
971
972         dcn_ip = kzalloc(sizeof(*dcn_ip), GFP_KERNEL);
973         if (!dcn_ip) {
974                 dm_error("%s: failed to create dcn_ip\n", __func__);
975                 goto fail;
976         }
977
978         dc->dcn_ip = dcn_ip;
979
980         if (!dc_construct_ctx(dc, init_params)) {
981                 dm_error("%s: failed to create ctx\n", __func__);
982                 goto fail;
983         }
984
985         dc_ctx = dc->ctx;
986
987         /* Resource should construct all asic specific resources.
988          * This should be the only place where we need to parse the asic id
989          */
990         if (init_params->vbios_override)
991                 dc_ctx->dc_bios = init_params->vbios_override;
992         else {
993                 /* Create BIOS parser */
994                 struct bp_init_data bp_init_data;
995
996                 bp_init_data.ctx = dc_ctx;
997                 bp_init_data.bios = init_params->asic_id.atombios_base_address;
998
999                 dc_ctx->dc_bios = dal_bios_parser_create(
1000                                 &bp_init_data, dc_ctx->dce_version);
1001
1002                 if (!dc_ctx->dc_bios) {
1003                         ASSERT_CRITICAL(false);
1004                         goto fail;
1005                 }
1006
1007                 dc_ctx->created_bios = true;
1008         }
1009
1010         dc->vendor_signature = init_params->vendor_signature;
1011
1012         /* Create GPIO service */
1013         dc_ctx->gpio_service = dal_gpio_service_create(
1014                         dc_ctx->dce_version,
1015                         dc_ctx->dce_environment,
1016                         dc_ctx);
1017
1018         if (!dc_ctx->gpio_service) {
1019                 ASSERT_CRITICAL(false);
1020                 goto fail;
1021         }
1022
1023         dc->res_pool = dc_create_resource_pool(dc, init_params, dc_ctx->dce_version);
1024         if (!dc->res_pool)
1025                 goto fail;
1026
1027         /* set i2c speed if not done by the respective dcnxxx__resource.c */
1028         if (dc->caps.i2c_speed_in_khz_hdcp == 0)
1029                 dc->caps.i2c_speed_in_khz_hdcp = dc->caps.i2c_speed_in_khz;
1030         if (dc->caps.max_optimizable_video_width == 0)
1031                 dc->caps.max_optimizable_video_width = 5120;
1032         dc->clk_mgr = dc_clk_mgr_create(dc->ctx, dc->res_pool->pp_smu, dc->res_pool->dccg);
1033         if (!dc->clk_mgr)
1034                 goto fail;
1035 #ifdef CONFIG_DRM_AMD_DC_FP
1036         dc->clk_mgr->force_smu_not_present = init_params->force_smu_not_present;
1037
1038         if (dc->res_pool->funcs->update_bw_bounding_box) {
1039                 DC_FP_START();
1040                 dc->res_pool->funcs->update_bw_bounding_box(dc, dc->clk_mgr->bw_params);
1041                 DC_FP_END();
1042         }
1043 #endif
1044
1045         if (!create_links(dc, init_params->num_virtual_links))
1046                 goto fail;
1047
1048         /* Create additional DIG link encoder objects if fewer than the platform
1049          * supports were created during link construction.
1050          */
1051         if (!create_link_encoders(dc))
1052                 goto fail;
1053
1054         /* Creation of current_state must occur after dc->dml
1055          * is initialized in dc_create_resource_pool because
1056          * on creation it copies the contents of dc->dml
1057          */
1058
1059         dc->current_state = dc_state_create(dc);
1060
1061         if (!dc->current_state) {
1062                 dm_error("%s: failed to create validate ctx\n", __func__);
1063                 goto fail;
1064         }
1065
1066         return true;
1067
1068 fail:
1069         return false;
1070 }
1071
1072 static void disable_all_writeback_pipes_for_stream(
1073                 const struct dc *dc,
1074                 struct dc_stream_state *stream,
1075                 struct dc_state *context)
1076 {
1077         int i;
1078
1079         for (i = 0; i < stream->num_wb_info; i++)
1080                 stream->writeback_info[i].wb_enabled = false;
1081 }
1082
1083 static void apply_ctx_interdependent_lock(struct dc *dc,
1084                                           struct dc_state *context,
1085                                           struct dc_stream_state *stream,
1086                                           bool lock)
1087 {
1088         int i;
1089
1090         /* Checks if interdependent update function pointer is NULL or not, takes care of DCE110 case */
1091         if (dc->hwss.interdependent_update_lock)
1092                 dc->hwss.interdependent_update_lock(dc, context, lock);
1093         else {
1094                 for (i = 0; i < dc->res_pool->pipe_count; i++) {
1095                         struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i];
1096                         struct pipe_ctx *old_pipe_ctx = &dc->current_state->res_ctx.pipe_ctx[i];
1097
1098                         // Copied conditions that were previously in dce110_apply_ctx_for_surface
1099                         if (stream == pipe_ctx->stream) {
1100                                 if (resource_is_pipe_type(pipe_ctx, OPP_HEAD) &&
1101                                         (pipe_ctx->plane_state || old_pipe_ctx->plane_state))
1102                                         dc->hwss.pipe_control_lock(dc, pipe_ctx, lock);
1103                         }
1104                 }
1105         }
1106 }
1107
1108 static void dc_update_visual_confirm_color(struct dc *dc, struct dc_state *context, struct pipe_ctx *pipe_ctx)
1109 {
1110         if (dc->ctx->dce_version >= DCN_VERSION_1_0) {
1111                 memset(&pipe_ctx->visual_confirm_color, 0, sizeof(struct tg_color));
1112
1113                 if (dc->debug.visual_confirm == VISUAL_CONFIRM_HDR)
1114                         get_hdr_visual_confirm_color(pipe_ctx, &(pipe_ctx->visual_confirm_color));
1115                 else if (dc->debug.visual_confirm == VISUAL_CONFIRM_SURFACE)
1116                         get_surface_visual_confirm_color(pipe_ctx, &(pipe_ctx->visual_confirm_color));
1117                 else if (dc->debug.visual_confirm == VISUAL_CONFIRM_SWIZZLE)
1118                         get_surface_tile_visual_confirm_color(pipe_ctx, &(pipe_ctx->visual_confirm_color));
1119                 else {
1120                         if (dc->ctx->dce_version < DCN_VERSION_2_0)
1121                                 color_space_to_black_color(
1122                                         dc, pipe_ctx->stream->output_color_space, &(pipe_ctx->visual_confirm_color));
1123                 }
1124                 if (dc->ctx->dce_version >= DCN_VERSION_2_0) {
1125                         if (dc->debug.visual_confirm == VISUAL_CONFIRM_MPCTREE)
1126                                 get_mpctree_visual_confirm_color(pipe_ctx, &(pipe_ctx->visual_confirm_color));
1127                         else if (dc->debug.visual_confirm == VISUAL_CONFIRM_SUBVP)
1128                                 get_subvp_visual_confirm_color(pipe_ctx, &(pipe_ctx->visual_confirm_color));
1129                         else if (dc->debug.visual_confirm == VISUAL_CONFIRM_MCLK_SWITCH)
1130                                 get_mclk_switch_visual_confirm_color(pipe_ctx, &(pipe_ctx->visual_confirm_color));
1131                 }
1132         }
1133 }
1134
1135 static void disable_dangling_plane(struct dc *dc, struct dc_state *context)
1136 {
1137         int i, j;
1138         struct dc_state *dangling_context = dc_state_create_current_copy(dc);
1139         struct dc_state *current_ctx;
1140         struct pipe_ctx *pipe;
1141         struct timing_generator *tg;
1142
1143         if (dangling_context == NULL)
1144                 return;
1145
1146         for (i = 0; i < dc->res_pool->pipe_count; i++) {
1147                 struct dc_stream_state *old_stream =
1148                                 dc->current_state->res_ctx.pipe_ctx[i].stream;
1149                 bool should_disable = true;
1150                 bool pipe_split_change = false;
1151
1152                 if ((context->res_ctx.pipe_ctx[i].top_pipe) &&
1153                         (dc->current_state->res_ctx.pipe_ctx[i].top_pipe))
1154                         pipe_split_change = context->res_ctx.pipe_ctx[i].top_pipe->pipe_idx !=
1155                                 dc->current_state->res_ctx.pipe_ctx[i].top_pipe->pipe_idx;
1156                 else
1157                         pipe_split_change = context->res_ctx.pipe_ctx[i].top_pipe !=
1158                                 dc->current_state->res_ctx.pipe_ctx[i].top_pipe;
1159
1160                 for (j = 0; j < context->stream_count; j++) {
1161                         if (old_stream == context->streams[j]) {
1162                                 should_disable = false;
1163                                 break;
1164                         }
1165                 }
1166                 if (!should_disable && pipe_split_change &&
1167                                 dc->current_state->stream_count != context->stream_count)
1168                         should_disable = true;
1169
1170                 if (old_stream && !dc->current_state->res_ctx.pipe_ctx[i].top_pipe &&
1171                                 !dc->current_state->res_ctx.pipe_ctx[i].prev_odm_pipe) {
1172                         struct pipe_ctx *old_pipe, *new_pipe;
1173
1174                         old_pipe = &dc->current_state->res_ctx.pipe_ctx[i];
1175                         new_pipe = &context->res_ctx.pipe_ctx[i];
1176
1177                         if (old_pipe->plane_state && !new_pipe->plane_state)
1178                                 should_disable = true;
1179                 }
1180
1181                 if (should_disable && old_stream) {
1182                         bool is_phantom = dc_state_get_stream_subvp_type(dc->current_state, old_stream) == SUBVP_PHANTOM;
1183                         pipe = &dc->current_state->res_ctx.pipe_ctx[i];
1184                         tg = pipe->stream_res.tg;
1185                         /* When disabling plane for a phantom pipe, we must turn on the
1186                          * phantom OTG so the disable programming gets the double buffer
1187                          * update. Otherwise the pipe will be left in a partially disabled
1188                          * state that can result in underflow or hang when enabling it
1189                          * again for different use.
1190                          */
1191                         if (is_phantom) {
1192                                 if (tg->funcs->enable_crtc) {
1193                                         int main_pipe_width, main_pipe_height;
1194                                         struct dc_stream_state *old_paired_stream = dc_state_get_paired_subvp_stream(dc->current_state, old_stream);
1195
1196                                         main_pipe_width = old_paired_stream->dst.width;
1197                                         main_pipe_height = old_paired_stream->dst.height;
1198                                         if (dc->hwss.blank_phantom)
1199                                                 dc->hwss.blank_phantom(dc, tg, main_pipe_width, main_pipe_height);
1200                                         tg->funcs->enable_crtc(tg);
1201                                 }
1202                         }
1203
1204                         if (is_phantom)
1205                                 dc_state_rem_all_phantom_planes_for_stream(dc, old_stream, dangling_context, true);
1206                         else
1207                                 dc_state_rem_all_planes_for_stream(dc, old_stream, dangling_context);
1208                         disable_all_writeback_pipes_for_stream(dc, old_stream, dangling_context);
1209
1210                         if (pipe->stream && pipe->plane_state) {
1211                                 set_p_state_switch_method(dc, context, pipe);
1212                                 dc_update_visual_confirm_color(dc, context, pipe);
1213                         }
1214
1215                         if (dc->hwss.apply_ctx_for_surface) {
1216                                 apply_ctx_interdependent_lock(dc, dc->current_state, old_stream, true);
1217                                 dc->hwss.apply_ctx_for_surface(dc, old_stream, 0, dangling_context);
1218                                 apply_ctx_interdependent_lock(dc, dc->current_state, old_stream, false);
1219                                 dc->hwss.post_unlock_program_front_end(dc, dangling_context);
1220                         }
1221                         if (dc->hwss.program_front_end_for_ctx) {
1222                                 dc->hwss.interdependent_update_lock(dc, dc->current_state, true);
1223                                 dc->hwss.program_front_end_for_ctx(dc, dangling_context);
1224                                 dc->hwss.interdependent_update_lock(dc, dc->current_state, false);
1225                                 dc->hwss.post_unlock_program_front_end(dc, dangling_context);
1226                         }
1227                         /* We need to put the phantom OTG back into it's default (disabled) state or we
1228                          * can get corruption when transition from one SubVP config to a different one.
1229                          * The OTG is set to disable on falling edge of VUPDATE so the plane disable
1230                          * will still get it's double buffer update.
1231                          */
1232                         if (is_phantom) {
1233                                 if (tg->funcs->disable_phantom_crtc)
1234                                         tg->funcs->disable_phantom_crtc(tg);
1235                         }
1236                 }
1237         }
1238
1239         current_ctx = dc->current_state;
1240         dc->current_state = dangling_context;
1241         dc_state_release(current_ctx);
1242 }
1243
1244 static void disable_vbios_mode_if_required(
1245                 struct dc *dc,
1246                 struct dc_state *context)
1247 {
1248         unsigned int i, j;
1249
1250         /* check if timing_changed, disable stream*/
1251         for (i = 0; i < dc->res_pool->pipe_count; i++) {
1252                 struct dc_stream_state *stream = NULL;
1253                 struct dc_link *link = NULL;
1254                 struct pipe_ctx *pipe = NULL;
1255
1256                 pipe = &context->res_ctx.pipe_ctx[i];
1257                 stream = pipe->stream;
1258                 if (stream == NULL)
1259                         continue;
1260
1261                 if (stream->apply_seamless_boot_optimization)
1262                         continue;
1263
1264                 // only looking for first odm pipe
1265                 if (pipe->prev_odm_pipe)
1266                         continue;
1267
1268                 if (stream->link->local_sink &&
1269                         stream->link->local_sink->sink_signal == SIGNAL_TYPE_EDP) {
1270                         link = stream->link;
1271                 }
1272
1273                 if (link != NULL && link->link_enc->funcs->is_dig_enabled(link->link_enc)) {
1274                         unsigned int enc_inst, tg_inst = 0;
1275                         unsigned int pix_clk_100hz;
1276
1277                         enc_inst = link->link_enc->funcs->get_dig_frontend(link->link_enc);
1278                         if (enc_inst != ENGINE_ID_UNKNOWN) {
1279                                 for (j = 0; j < dc->res_pool->stream_enc_count; j++) {
1280                                         if (dc->res_pool->stream_enc[j]->id == enc_inst) {
1281                                                 tg_inst = dc->res_pool->stream_enc[j]->funcs->dig_source_otg(
1282                                                         dc->res_pool->stream_enc[j]);
1283                                                 break;
1284                                         }
1285                                 }
1286
1287                                 dc->res_pool->dp_clock_source->funcs->get_pixel_clk_frequency_100hz(
1288                                         dc->res_pool->dp_clock_source,
1289                                         tg_inst, &pix_clk_100hz);
1290
1291                                 if (link->link_status.link_active) {
1292                                         uint32_t requested_pix_clk_100hz =
1293                                                 pipe->stream_res.pix_clk_params.requested_pix_clk_100hz;
1294
1295                                         if (pix_clk_100hz != requested_pix_clk_100hz) {
1296                                                 dc->link_srv->set_dpms_off(pipe);
1297                                                 pipe->stream->dpms_off = false;
1298                                         }
1299                                 }
1300                         }
1301                 }
1302         }
1303 }
1304
1305 static void wait_for_no_pipes_pending(struct dc *dc, struct dc_state *context)
1306 {
1307         int i;
1308         PERF_TRACE();
1309         for (i = 0; i < MAX_PIPES; i++) {
1310                 int count = 0;
1311                 struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i];
1312
1313                 if (!pipe->plane_state || dc_state_get_pipe_subvp_type(context, pipe) == SUBVP_PHANTOM)
1314                         continue;
1315
1316                 /* Timeout 100 ms */
1317                 while (count < 100000) {
1318                         /* Must set to false to start with, due to OR in update function */
1319                         pipe->plane_state->status.is_flip_pending = false;
1320                         dc->hwss.update_pending_status(pipe);
1321                         if (!pipe->plane_state->status.is_flip_pending)
1322                                 break;
1323                         udelay(1);
1324                         count++;
1325                 }
1326                 ASSERT(!pipe->plane_state->status.is_flip_pending);
1327         }
1328         PERF_TRACE();
1329 }
1330
1331 /* Public functions */
1332
1333 struct dc *dc_create(const struct dc_init_data *init_params)
1334 {
1335         struct dc *dc = kzalloc(sizeof(*dc), GFP_KERNEL);
1336         unsigned int full_pipe_count;
1337
1338         if (!dc)
1339                 return NULL;
1340
1341         if (init_params->dce_environment == DCE_ENV_VIRTUAL_HW) {
1342                 if (!dc_construct_ctx(dc, init_params))
1343                         goto destruct_dc;
1344         } else {
1345                 if (!dc_construct(dc, init_params))
1346                         goto destruct_dc;
1347
1348                 full_pipe_count = dc->res_pool->pipe_count;
1349                 if (dc->res_pool->underlay_pipe_index != NO_UNDERLAY_PIPE)
1350                         full_pipe_count--;
1351                 dc->caps.max_streams = min(
1352                                 full_pipe_count,
1353                                 dc->res_pool->stream_enc_count);
1354
1355                 dc->caps.max_links = dc->link_count;
1356                 dc->caps.max_audios = dc->res_pool->audio_count;
1357                 dc->caps.linear_pitch_alignment = 64;
1358
1359                 dc->caps.max_dp_protocol_version = DP_VERSION_1_4;
1360
1361                 dc->caps.max_otg_num = dc->res_pool->res_cap->num_timing_generator;
1362
1363                 if (dc->res_pool->dmcu != NULL)
1364                         dc->versions.dmcu_version = dc->res_pool->dmcu->dmcu_version;
1365         }
1366
1367         dc->dcn_reg_offsets = init_params->dcn_reg_offsets;
1368         dc->nbio_reg_offsets = init_params->nbio_reg_offsets;
1369         dc->clk_reg_offsets = init_params->clk_reg_offsets;
1370
1371         /* Populate versioning information */
1372         dc->versions.dc_ver = DC_VER;
1373
1374         dc->build_id = DC_BUILD_ID;
1375
1376         DC_LOG_DC("Display Core initialized\n");
1377
1378
1379
1380         return dc;
1381
1382 destruct_dc:
1383         dc_destruct(dc);
1384         kfree(dc);
1385         return NULL;
1386 }
1387
1388 static void detect_edp_presence(struct dc *dc)
1389 {
1390         struct dc_link *edp_links[MAX_NUM_EDP];
1391         struct dc_link *edp_link = NULL;
1392         enum dc_connection_type type;
1393         int i;
1394         int edp_num;
1395
1396         dc_get_edp_links(dc, edp_links, &edp_num);
1397         if (!edp_num)
1398                 return;
1399
1400         for (i = 0; i < edp_num; i++) {
1401                 edp_link = edp_links[i];
1402                 if (dc->config.edp_not_connected) {
1403                         edp_link->edp_sink_present = false;
1404                 } else {
1405                         dc_link_detect_connection_type(edp_link, &type);
1406                         edp_link->edp_sink_present = (type != dc_connection_none);
1407                 }
1408         }
1409 }
1410
1411 void dc_hardware_init(struct dc *dc)
1412 {
1413
1414         detect_edp_presence(dc);
1415         if (dc->ctx->dce_environment != DCE_ENV_VIRTUAL_HW)
1416                 dc->hwss.init_hw(dc);
1417 }
1418
1419 void dc_init_callbacks(struct dc *dc,
1420                 const struct dc_callback_init *init_params)
1421 {
1422         dc->ctx->cp_psp = init_params->cp_psp;
1423 }
1424
1425 void dc_deinit_callbacks(struct dc *dc)
1426 {
1427         memset(&dc->ctx->cp_psp, 0, sizeof(dc->ctx->cp_psp));
1428 }
1429
1430 void dc_destroy(struct dc **dc)
1431 {
1432         dc_destruct(*dc);
1433         kfree(*dc);
1434         *dc = NULL;
1435 }
1436
1437 static void enable_timing_multisync(
1438                 struct dc *dc,
1439                 struct dc_state *ctx)
1440 {
1441         int i, multisync_count = 0;
1442         int pipe_count = dc->res_pool->pipe_count;
1443         struct pipe_ctx *multisync_pipes[MAX_PIPES] = { NULL };
1444
1445         for (i = 0; i < pipe_count; i++) {
1446                 if (!ctx->res_ctx.pipe_ctx[i].stream ||
1447                                 !ctx->res_ctx.pipe_ctx[i].stream->triggered_crtc_reset.enabled)
1448                         continue;
1449                 if (ctx->res_ctx.pipe_ctx[i].stream == ctx->res_ctx.pipe_ctx[i].stream->triggered_crtc_reset.event_source)
1450                         continue;
1451                 multisync_pipes[multisync_count] = &ctx->res_ctx.pipe_ctx[i];
1452                 multisync_count++;
1453         }
1454
1455         if (multisync_count > 0) {
1456                 dc->hwss.enable_per_frame_crtc_position_reset(
1457                         dc, multisync_count, multisync_pipes);
1458         }
1459 }
1460
1461 static void program_timing_sync(
1462                 struct dc *dc,
1463                 struct dc_state *ctx)
1464 {
1465         int i, j, k;
1466         int group_index = 0;
1467         int num_group = 0;
1468         int pipe_count = dc->res_pool->pipe_count;
1469         struct pipe_ctx *unsynced_pipes[MAX_PIPES] = { NULL };
1470
1471         for (i = 0; i < pipe_count; i++) {
1472                 if (!ctx->res_ctx.pipe_ctx[i].stream
1473                                 || ctx->res_ctx.pipe_ctx[i].top_pipe
1474                                 || ctx->res_ctx.pipe_ctx[i].prev_odm_pipe)
1475                         continue;
1476
1477                 unsynced_pipes[i] = &ctx->res_ctx.pipe_ctx[i];
1478         }
1479
1480         for (i = 0; i < pipe_count; i++) {
1481                 int group_size = 1;
1482                 enum timing_synchronization_type sync_type = NOT_SYNCHRONIZABLE;
1483                 struct pipe_ctx *pipe_set[MAX_PIPES];
1484
1485                 if (!unsynced_pipes[i])
1486                         continue;
1487
1488                 pipe_set[0] = unsynced_pipes[i];
1489                 unsynced_pipes[i] = NULL;
1490
1491                 /* Add tg to the set, search rest of the tg's for ones with
1492                  * same timing, add all tgs with same timing to the group
1493                  */
1494                 for (j = i + 1; j < pipe_count; j++) {
1495                         if (!unsynced_pipes[j])
1496                                 continue;
1497                         if (sync_type != TIMING_SYNCHRONIZABLE &&
1498                                 dc->hwss.enable_vblanks_synchronization &&
1499                                 unsynced_pipes[j]->stream_res.tg->funcs->align_vblanks &&
1500                                 resource_are_vblanks_synchronizable(
1501                                         unsynced_pipes[j]->stream,
1502                                         pipe_set[0]->stream)) {
1503                                 sync_type = VBLANK_SYNCHRONIZABLE;
1504                                 pipe_set[group_size] = unsynced_pipes[j];
1505                                 unsynced_pipes[j] = NULL;
1506                                 group_size++;
1507                         } else
1508                         if (sync_type != VBLANK_SYNCHRONIZABLE &&
1509                                 resource_are_streams_timing_synchronizable(
1510                                         unsynced_pipes[j]->stream,
1511                                         pipe_set[0]->stream)) {
1512                                 sync_type = TIMING_SYNCHRONIZABLE;
1513                                 pipe_set[group_size] = unsynced_pipes[j];
1514                                 unsynced_pipes[j] = NULL;
1515                                 group_size++;
1516                         }
1517                 }
1518
1519                 /* set first unblanked pipe as master */
1520                 for (j = 0; j < group_size; j++) {
1521                         bool is_blanked;
1522
1523                         if (pipe_set[j]->stream_res.opp->funcs->dpg_is_blanked)
1524                                 is_blanked =
1525                                         pipe_set[j]->stream_res.opp->funcs->dpg_is_blanked(pipe_set[j]->stream_res.opp);
1526                         else
1527                                 is_blanked =
1528                                         pipe_set[j]->stream_res.tg->funcs->is_blanked(pipe_set[j]->stream_res.tg);
1529                         if (!is_blanked) {
1530                                 if (j == 0)
1531                                         break;
1532
1533                                 swap(pipe_set[0], pipe_set[j]);
1534                                 break;
1535                         }
1536                 }
1537
1538                 for (k = 0; k < group_size; k++) {
1539                         struct dc_stream_status *status = dc_state_get_stream_status(ctx, pipe_set[k]->stream);
1540
1541                         status->timing_sync_info.group_id = num_group;
1542                         status->timing_sync_info.group_size = group_size;
1543                         if (k == 0)
1544                                 status->timing_sync_info.master = true;
1545                         else
1546                                 status->timing_sync_info.master = false;
1547
1548                 }
1549
1550                 /* remove any other unblanked pipes as they have already been synced */
1551                 if (dc->config.use_pipe_ctx_sync_logic) {
1552                         /* check pipe's syncd to decide which pipe to be removed */
1553                         for (j = 1; j < group_size; j++) {
1554                                 if (pipe_set[j]->pipe_idx_syncd == pipe_set[0]->pipe_idx_syncd) {
1555                                         group_size--;
1556                                         pipe_set[j] = pipe_set[group_size];
1557                                         j--;
1558                                 } else
1559                                         /* link slave pipe's syncd with master pipe */
1560                                         pipe_set[j]->pipe_idx_syncd = pipe_set[0]->pipe_idx_syncd;
1561                         }
1562                 } else {
1563                         /* remove any other pipes by checking valid plane */
1564                         for (j = j + 1; j < group_size; j++) {
1565                                 bool is_blanked;
1566
1567                                 if (pipe_set[j]->stream_res.opp->funcs->dpg_is_blanked)
1568                                         is_blanked =
1569                                                 pipe_set[j]->stream_res.opp->funcs->dpg_is_blanked(pipe_set[j]->stream_res.opp);
1570                                 else
1571                                         is_blanked =
1572                                                 pipe_set[j]->stream_res.tg->funcs->is_blanked(pipe_set[j]->stream_res.tg);
1573                                 if (!is_blanked) {
1574                                         group_size--;
1575                                         pipe_set[j] = pipe_set[group_size];
1576                                         j--;
1577                                 }
1578                         }
1579                 }
1580
1581                 if (group_size > 1) {
1582                         if (sync_type == TIMING_SYNCHRONIZABLE) {
1583                                 dc->hwss.enable_timing_synchronization(
1584                                         dc, ctx, group_index, group_size, pipe_set);
1585                         } else
1586                                 if (sync_type == VBLANK_SYNCHRONIZABLE) {
1587                                 dc->hwss.enable_vblanks_synchronization(
1588                                         dc, group_index, group_size, pipe_set);
1589                                 }
1590                         group_index++;
1591                 }
1592                 num_group++;
1593         }
1594 }
1595
1596 static bool streams_changed(struct dc *dc,
1597                             struct dc_stream_state *streams[],
1598                             uint8_t stream_count)
1599 {
1600         uint8_t i;
1601
1602         if (stream_count != dc->current_state->stream_count)
1603                 return true;
1604
1605         for (i = 0; i < dc->current_state->stream_count; i++) {
1606                 if (dc->current_state->streams[i] != streams[i])
1607                         return true;
1608                 if (!streams[i]->link->link_state_valid)
1609                         return true;
1610         }
1611
1612         return false;
1613 }
1614
1615 bool dc_validate_boot_timing(const struct dc *dc,
1616                                 const struct dc_sink *sink,
1617                                 struct dc_crtc_timing *crtc_timing)
1618 {
1619         struct timing_generator *tg;
1620         struct stream_encoder *se = NULL;
1621
1622         struct dc_crtc_timing hw_crtc_timing = {0};
1623
1624         struct dc_link *link = sink->link;
1625         unsigned int i, enc_inst, tg_inst = 0;
1626
1627         /* Support seamless boot on EDP displays only */
1628         if (sink->sink_signal != SIGNAL_TYPE_EDP) {
1629                 return false;
1630         }
1631
1632         if (dc->debug.force_odm_combine)
1633                 return false;
1634
1635         /* Check for enabled DIG to identify enabled display */
1636         if (!link->link_enc->funcs->is_dig_enabled(link->link_enc))
1637                 return false;
1638
1639         enc_inst = link->link_enc->funcs->get_dig_frontend(link->link_enc);
1640
1641         if (enc_inst == ENGINE_ID_UNKNOWN)
1642                 return false;
1643
1644         for (i = 0; i < dc->res_pool->stream_enc_count; i++) {
1645                 if (dc->res_pool->stream_enc[i]->id == enc_inst) {
1646
1647                         se = dc->res_pool->stream_enc[i];
1648
1649                         tg_inst = dc->res_pool->stream_enc[i]->funcs->dig_source_otg(
1650                                 dc->res_pool->stream_enc[i]);
1651                         break;
1652                 }
1653         }
1654
1655         // tg_inst not found
1656         if (i == dc->res_pool->stream_enc_count)
1657                 return false;
1658
1659         if (tg_inst >= dc->res_pool->timing_generator_count)
1660                 return false;
1661
1662         if (tg_inst != link->link_enc->preferred_engine)
1663                 return false;
1664
1665         tg = dc->res_pool->timing_generators[tg_inst];
1666
1667         if (!tg->funcs->get_hw_timing)
1668                 return false;
1669
1670         if (!tg->funcs->get_hw_timing(tg, &hw_crtc_timing))
1671                 return false;
1672
1673         if (crtc_timing->h_total != hw_crtc_timing.h_total)
1674                 return false;
1675
1676         if (crtc_timing->h_border_left != hw_crtc_timing.h_border_left)
1677                 return false;
1678
1679         if (crtc_timing->h_addressable != hw_crtc_timing.h_addressable)
1680                 return false;
1681
1682         if (crtc_timing->h_border_right != hw_crtc_timing.h_border_right)
1683                 return false;
1684
1685         if (crtc_timing->h_front_porch != hw_crtc_timing.h_front_porch)
1686                 return false;
1687
1688         if (crtc_timing->h_sync_width != hw_crtc_timing.h_sync_width)
1689                 return false;
1690
1691         if (crtc_timing->v_total != hw_crtc_timing.v_total)
1692                 return false;
1693
1694         if (crtc_timing->v_border_top != hw_crtc_timing.v_border_top)
1695                 return false;
1696
1697         if (crtc_timing->v_addressable != hw_crtc_timing.v_addressable)
1698                 return false;
1699
1700         if (crtc_timing->v_border_bottom != hw_crtc_timing.v_border_bottom)
1701                 return false;
1702
1703         if (crtc_timing->v_front_porch != hw_crtc_timing.v_front_porch)
1704                 return false;
1705
1706         if (crtc_timing->v_sync_width != hw_crtc_timing.v_sync_width)
1707                 return false;
1708
1709         /* block DSC for now, as VBIOS does not currently support DSC timings */
1710         if (crtc_timing->flags.DSC)
1711                 return false;
1712
1713         if (dc_is_dp_signal(link->connector_signal)) {
1714                 unsigned int pix_clk_100hz;
1715                 uint32_t numOdmPipes = 1;
1716                 uint32_t id_src[4] = {0};
1717
1718                 dc->res_pool->dp_clock_source->funcs->get_pixel_clk_frequency_100hz(
1719                         dc->res_pool->dp_clock_source,
1720                         tg_inst, &pix_clk_100hz);
1721
1722                 if (tg->funcs->get_optc_source)
1723                         tg->funcs->get_optc_source(tg,
1724                                                 &numOdmPipes, &id_src[0], &id_src[1]);
1725
1726                 if (numOdmPipes == 2)
1727                         pix_clk_100hz *= 2;
1728                 if (numOdmPipes == 4)
1729                         pix_clk_100hz *= 4;
1730
1731                 // Note: In rare cases, HW pixclk may differ from crtc's pixclk
1732                 // slightly due to rounding issues in 10 kHz units.
1733                 if (crtc_timing->pix_clk_100hz != pix_clk_100hz)
1734                         return false;
1735
1736                 if (!se->funcs->dp_get_pixel_format)
1737                         return false;
1738
1739                 if (!se->funcs->dp_get_pixel_format(
1740                         se,
1741                         &hw_crtc_timing.pixel_encoding,
1742                         &hw_crtc_timing.display_color_depth))
1743                         return false;
1744
1745                 if (hw_crtc_timing.display_color_depth != crtc_timing->display_color_depth)
1746                         return false;
1747
1748                 if (hw_crtc_timing.pixel_encoding != crtc_timing->pixel_encoding)
1749                         return false;
1750         }
1751
1752         if (link->dpcd_caps.dprx_feature.bits.VSC_SDP_COLORIMETRY_SUPPORTED) {
1753                 return false;
1754         }
1755
1756         if (dc->link_srv->edp_is_ilr_optimization_required(link, crtc_timing)) {
1757                 DC_LOG_EVENT_LINK_TRAINING("Seamless boot disabled to optimize eDP link rate\n");
1758                 return false;
1759         }
1760
1761         return true;
1762 }
1763
1764 static inline bool should_update_pipe_for_stream(
1765                 struct dc_state *context,
1766                 struct pipe_ctx *pipe_ctx,
1767                 struct dc_stream_state *stream)
1768 {
1769         return (pipe_ctx->stream && pipe_ctx->stream == stream);
1770 }
1771
1772 static inline bool should_update_pipe_for_plane(
1773                 struct dc_state *context,
1774                 struct pipe_ctx *pipe_ctx,
1775                 struct dc_plane_state *plane_state)
1776 {
1777         return (pipe_ctx->plane_state == plane_state);
1778 }
1779
1780 void dc_enable_stereo(
1781         struct dc *dc,
1782         struct dc_state *context,
1783         struct dc_stream_state *streams[],
1784         uint8_t stream_count)
1785 {
1786         int i, j;
1787         struct pipe_ctx *pipe;
1788
1789         dc_exit_ips_for_hw_access(dc);
1790
1791         for (i = 0; i < MAX_PIPES; i++) {
1792                 if (context != NULL) {
1793                         pipe = &context->res_ctx.pipe_ctx[i];
1794                 } else {
1795                         context = dc->current_state;
1796                         pipe = &dc->current_state->res_ctx.pipe_ctx[i];
1797                 }
1798
1799                 for (j = 0; pipe && j < stream_count; j++)  {
1800                         if (should_update_pipe_for_stream(context, pipe, streams[j]) &&
1801                                 dc->hwss.setup_stereo)
1802                                 dc->hwss.setup_stereo(pipe, dc);
1803                 }
1804         }
1805 }
1806
1807 void dc_trigger_sync(struct dc *dc, struct dc_state *context)
1808 {
1809         if (context->stream_count > 1 && !dc->debug.disable_timing_sync) {
1810                 dc_exit_ips_for_hw_access(dc);
1811
1812                 enable_timing_multisync(dc, context);
1813                 program_timing_sync(dc, context);
1814         }
1815 }
1816
1817 static uint8_t get_stream_mask(struct dc *dc, struct dc_state *context)
1818 {
1819         int i;
1820         unsigned int stream_mask = 0;
1821
1822         for (i = 0; i < dc->res_pool->pipe_count; i++) {
1823                 if (context->res_ctx.pipe_ctx[i].stream)
1824                         stream_mask |= 1 << i;
1825         }
1826
1827         return stream_mask;
1828 }
1829
1830 void dc_z10_restore(const struct dc *dc)
1831 {
1832         if (dc->hwss.z10_restore)
1833                 dc->hwss.z10_restore(dc);
1834 }
1835
1836 void dc_z10_save_init(struct dc *dc)
1837 {
1838         if (dc->hwss.z10_save_init)
1839                 dc->hwss.z10_save_init(dc);
1840 }
1841
1842 /**
1843  * dc_commit_state_no_check - Apply context to the hardware
1844  *
1845  * @dc: DC object with the current status to be updated
1846  * @context: New state that will become the current status at the end of this function
1847  *
1848  * Applies given context to the hardware and copy it into current context.
1849  * It's up to the user to release the src context afterwards.
1850  *
1851  * Return: an enum dc_status result code for the operation
1852  */
1853 static enum dc_status dc_commit_state_no_check(struct dc *dc, struct dc_state *context)
1854 {
1855         struct dc_bios *dcb = dc->ctx->dc_bios;
1856         enum dc_status result = DC_ERROR_UNEXPECTED;
1857         struct pipe_ctx *pipe;
1858         int i, k, l;
1859         struct dc_stream_state *dc_streams[MAX_STREAMS] = {0};
1860         struct dc_state *old_state;
1861         bool subvp_prev_use = false;
1862
1863         dc_z10_restore(dc);
1864         dc_allow_idle_optimizations(dc, false);
1865
1866         for (i = 0; i < dc->res_pool->pipe_count; i++) {
1867                 struct pipe_ctx *old_pipe = &dc->current_state->res_ctx.pipe_ctx[i];
1868
1869                 /* Check old context for SubVP */
1870                 subvp_prev_use |= (dc_state_get_pipe_subvp_type(dc->current_state, old_pipe) == SUBVP_PHANTOM);
1871                 if (subvp_prev_use)
1872                         break;
1873         }
1874
1875         for (i = 0; i < context->stream_count; i++)
1876                 dc_streams[i] =  context->streams[i];
1877
1878         if (!dcb->funcs->is_accelerated_mode(dcb)) {
1879                 disable_vbios_mode_if_required(dc, context);
1880                 dc->hwss.enable_accelerated_mode(dc, context);
1881         }
1882
1883         if (context->stream_count > get_seamless_boot_stream_count(context) ||
1884                 context->stream_count == 0)
1885                 dc->hwss.prepare_bandwidth(dc, context);
1886
1887         /* When SubVP is active, all HW programming must be done while
1888          * SubVP lock is acquired
1889          */
1890         if (dc->hwss.subvp_pipe_control_lock)
1891                 dc->hwss.subvp_pipe_control_lock(dc, context, true, true, NULL, subvp_prev_use);
1892
1893         if (dc->hwss.update_dsc_pg)
1894                 dc->hwss.update_dsc_pg(dc, context, false);
1895
1896         disable_dangling_plane(dc, context);
1897         /* re-program planes for existing stream, in case we need to
1898          * free up plane resource for later use
1899          */
1900         if (dc->hwss.apply_ctx_for_surface) {
1901                 for (i = 0; i < context->stream_count; i++) {
1902                         if (context->streams[i]->mode_changed)
1903                                 continue;
1904                         apply_ctx_interdependent_lock(dc, context, context->streams[i], true);
1905                         dc->hwss.apply_ctx_for_surface(
1906                                 dc, context->streams[i],
1907                                 context->stream_status[i].plane_count,
1908                                 context); /* use new pipe config in new context */
1909                         apply_ctx_interdependent_lock(dc, context, context->streams[i], false);
1910                         dc->hwss.post_unlock_program_front_end(dc, context);
1911                 }
1912         }
1913
1914         /* Program hardware */
1915         for (i = 0; i < dc->res_pool->pipe_count; i++) {
1916                 pipe = &context->res_ctx.pipe_ctx[i];
1917                 dc->hwss.wait_for_mpcc_disconnect(dc, dc->res_pool, pipe);
1918         }
1919
1920         result = dc->hwss.apply_ctx_to_hw(dc, context);
1921
1922         if (result != DC_OK) {
1923                 /* Application of dc_state to hardware stopped. */
1924                 dc->current_state->res_ctx.link_enc_cfg_ctx.mode = LINK_ENC_CFG_STEADY;
1925                 return result;
1926         }
1927
1928         dc_trigger_sync(dc, context);
1929
1930         /* Full update should unconditionally be triggered when dc_commit_state_no_check is called */
1931         for (i = 0; i < context->stream_count; i++) {
1932                 uint32_t prev_dsc_changed = context->streams[i]->update_flags.bits.dsc_changed;
1933
1934                 context->streams[i]->update_flags.raw = 0xFFFFFFFF;
1935                 context->streams[i]->update_flags.bits.dsc_changed = prev_dsc_changed;
1936         }
1937
1938         /* Program all planes within new context*/
1939         if (dc->hwss.program_front_end_for_ctx) {
1940                 dc->hwss.interdependent_update_lock(dc, context, true);
1941                 dc->hwss.program_front_end_for_ctx(dc, context);
1942                 dc->hwss.interdependent_update_lock(dc, context, false);
1943                 dc->hwss.post_unlock_program_front_end(dc, context);
1944         }
1945
1946         if (dc->hwss.commit_subvp_config)
1947                 dc->hwss.commit_subvp_config(dc, context);
1948         if (dc->hwss.subvp_pipe_control_lock)
1949                 dc->hwss.subvp_pipe_control_lock(dc, context, false, true, NULL, subvp_prev_use);
1950
1951         for (i = 0; i < context->stream_count; i++) {
1952                 const struct dc_link *link = context->streams[i]->link;
1953
1954                 if (!context->streams[i]->mode_changed)
1955                         continue;
1956
1957                 if (dc->hwss.apply_ctx_for_surface) {
1958                         apply_ctx_interdependent_lock(dc, context, context->streams[i], true);
1959                         dc->hwss.apply_ctx_for_surface(
1960                                         dc, context->streams[i],
1961                                         context->stream_status[i].plane_count,
1962                                         context);
1963                         apply_ctx_interdependent_lock(dc, context, context->streams[i], false);
1964                         dc->hwss.post_unlock_program_front_end(dc, context);
1965                 }
1966
1967                 /*
1968                  * enable stereo
1969                  * TODO rework dc_enable_stereo call to work with validation sets?
1970                  */
1971                 for (k = 0; k < MAX_PIPES; k++) {
1972                         pipe = &context->res_ctx.pipe_ctx[k];
1973
1974                         for (l = 0 ; pipe && l < context->stream_count; l++)  {
1975                                 if (context->streams[l] &&
1976                                         context->streams[l] == pipe->stream &&
1977                                         dc->hwss.setup_stereo)
1978                                         dc->hwss.setup_stereo(pipe, dc);
1979                         }
1980                 }
1981
1982                 CONN_MSG_MODE(link, "{%dx%d, %dx%d@%dKhz}",
1983                                 context->streams[i]->timing.h_addressable,
1984                                 context->streams[i]->timing.v_addressable,
1985                                 context->streams[i]->timing.h_total,
1986                                 context->streams[i]->timing.v_total,
1987                                 context->streams[i]->timing.pix_clk_100hz / 10);
1988         }
1989
1990         dc_enable_stereo(dc, context, dc_streams, context->stream_count);
1991
1992         if (context->stream_count > get_seamless_boot_stream_count(context) ||
1993                 context->stream_count == 0) {
1994                 /* Must wait for no flips to be pending before doing optimize bw */
1995                 wait_for_no_pipes_pending(dc, context);
1996                 /* pplib is notified if disp_num changed */
1997                 dc->hwss.optimize_bandwidth(dc, context);
1998                 /* Need to do otg sync again as otg could be out of sync due to otg
1999                  * workaround applied during clock update
2000                  */
2001                 dc_trigger_sync(dc, context);
2002         }
2003
2004         if (dc->hwss.update_dsc_pg)
2005                 dc->hwss.update_dsc_pg(dc, context, true);
2006
2007         if (dc->ctx->dce_version >= DCE_VERSION_MAX)
2008                 TRACE_DCN_CLOCK_STATE(&context->bw_ctx.bw.dcn.clk);
2009         else
2010                 TRACE_DCE_CLOCK_STATE(&context->bw_ctx.bw.dce);
2011
2012         context->stream_mask = get_stream_mask(dc, context);
2013
2014         if (context->stream_mask != dc->current_state->stream_mask)
2015                 dc_dmub_srv_notify_stream_mask(dc->ctx->dmub_srv, context->stream_mask);
2016
2017         for (i = 0; i < context->stream_count; i++)
2018                 context->streams[i]->mode_changed = false;
2019
2020         /* Clear update flags that were set earlier to avoid redundant programming */
2021         for (i = 0; i < context->stream_count; i++) {
2022                 context->streams[i]->update_flags.raw = 0x0;
2023         }
2024
2025         old_state = dc->current_state;
2026         dc->current_state = context;
2027
2028         dc_state_release(old_state);
2029
2030         dc_state_retain(dc->current_state);
2031
2032         return result;
2033 }
2034
2035 static bool commit_minimal_transition_state_legacy(struct dc *dc,
2036                 struct dc_state *transition_base_context);
2037
2038 /**
2039  * dc_commit_streams - Commit current stream state
2040  *
2041  * @dc: DC object with the commit state to be configured in the hardware
2042  * @streams: Array with a list of stream state
2043  * @stream_count: Total of streams
2044  *
2045  * Function responsible for commit streams change to the hardware.
2046  *
2047  * Return:
2048  * Return DC_OK if everything work as expected, otherwise, return a dc_status
2049  * code.
2050  */
2051 enum dc_status dc_commit_streams(struct dc *dc,
2052                                  struct dc_stream_state *streams[],
2053                                  uint8_t stream_count)
2054 {
2055         int i, j;
2056         struct dc_state *context;
2057         enum dc_status res = DC_OK;
2058         struct dc_validation_set set[MAX_STREAMS] = {0};
2059         struct pipe_ctx *pipe;
2060         bool handle_exit_odm2to1 = false;
2061
2062         if (dc->ctx->dce_environment == DCE_ENV_VIRTUAL_HW)
2063                 return res;
2064
2065         if (!streams_changed(dc, streams, stream_count))
2066                 return res;
2067
2068         dc_exit_ips_for_hw_access(dc);
2069
2070         DC_LOG_DC("%s: %d streams\n", __func__, stream_count);
2071
2072         for (i = 0; i < stream_count; i++) {
2073                 struct dc_stream_state *stream = streams[i];
2074                 struct dc_stream_status *status = dc_stream_get_status(stream);
2075
2076                 dc_stream_log(dc, stream);
2077
2078                 set[i].stream = stream;
2079
2080                 if (status) {
2081                         set[i].plane_count = status->plane_count;
2082                         for (j = 0; j < status->plane_count; j++)
2083                                 set[i].plane_states[j] = status->plane_states[j];
2084                 }
2085         }
2086
2087         /* ODM Combine 2:1 power optimization is only applied for single stream
2088          * scenario, it uses extra pipes than needed to reduce power consumption
2089          * We need to switch off this feature to make room for new streams.
2090          */
2091         if (stream_count > dc->current_state->stream_count &&
2092                         dc->current_state->stream_count == 1) {
2093                 for (i = 0; i < dc->res_pool->pipe_count; i++) {
2094                         pipe = &dc->current_state->res_ctx.pipe_ctx[i];
2095                         if (pipe->next_odm_pipe)
2096                                 handle_exit_odm2to1 = true;
2097                 }
2098         }
2099
2100         if (handle_exit_odm2to1)
2101                 res = commit_minimal_transition_state_legacy(dc, dc->current_state);
2102
2103         context = dc_state_create_current_copy(dc);
2104         if (!context)
2105                 goto context_alloc_fail;
2106
2107         res = dc_validate_with_context(dc, set, stream_count, context, false);
2108         if (res != DC_OK) {
2109                 BREAK_TO_DEBUGGER();
2110                 goto fail;
2111         }
2112
2113         res = dc_commit_state_no_check(dc, context);
2114
2115         for (i = 0; i < stream_count; i++) {
2116                 for (j = 0; j < context->stream_count; j++) {
2117                         if (streams[i]->stream_id == context->streams[j]->stream_id)
2118                                 streams[i]->out.otg_offset = context->stream_status[j].primary_otg_inst;
2119
2120                         if (dc_is_embedded_signal(streams[i]->signal)) {
2121                                 struct dc_stream_status *status = dc_state_get_stream_status(context, streams[i]);
2122
2123                                 if (dc->hwss.is_abm_supported)
2124                                         status->is_abm_supported = dc->hwss.is_abm_supported(dc, context, streams[i]);
2125                                 else
2126                                         status->is_abm_supported = true;
2127                         }
2128                 }
2129         }
2130
2131 fail:
2132         dc_state_release(context);
2133
2134 context_alloc_fail:
2135
2136         DC_LOG_DC("%s Finished.\n", __func__);
2137
2138         return res;
2139 }
2140
2141 bool dc_acquire_release_mpc_3dlut(
2142                 struct dc *dc, bool acquire,
2143                 struct dc_stream_state *stream,
2144                 struct dc_3dlut **lut,
2145                 struct dc_transfer_func **shaper)
2146 {
2147         int pipe_idx;
2148         bool ret = false;
2149         bool found_pipe_idx = false;
2150         const struct resource_pool *pool = dc->res_pool;
2151         struct resource_context *res_ctx = &dc->current_state->res_ctx;
2152         int mpcc_id = 0;
2153
2154         if (pool && res_ctx) {
2155                 if (acquire) {
2156                         /*find pipe idx for the given stream*/
2157                         for (pipe_idx = 0; pipe_idx < pool->pipe_count; pipe_idx++) {
2158                                 if (res_ctx->pipe_ctx[pipe_idx].stream == stream) {
2159                                         found_pipe_idx = true;
2160                                         mpcc_id = res_ctx->pipe_ctx[pipe_idx].plane_res.hubp->inst;
2161                                         break;
2162                                 }
2163                         }
2164                 } else
2165                         found_pipe_idx = true;/*for release pipe_idx is not required*/
2166
2167                 if (found_pipe_idx) {
2168                         if (acquire && pool->funcs->acquire_post_bldn_3dlut)
2169                                 ret = pool->funcs->acquire_post_bldn_3dlut(res_ctx, pool, mpcc_id, lut, shaper);
2170                         else if (!acquire && pool->funcs->release_post_bldn_3dlut)
2171                                 ret = pool->funcs->release_post_bldn_3dlut(res_ctx, pool, lut, shaper);
2172                 }
2173         }
2174         return ret;
2175 }
2176
2177 static bool is_flip_pending_in_pipes(struct dc *dc, struct dc_state *context)
2178 {
2179         int i;
2180         struct pipe_ctx *pipe;
2181
2182         for (i = 0; i < MAX_PIPES; i++) {
2183                 pipe = &context->res_ctx.pipe_ctx[i];
2184
2185                 // Don't check flip pending on phantom pipes
2186                 if (!pipe->plane_state || (dc_state_get_pipe_subvp_type(context, pipe) == SUBVP_PHANTOM))
2187                         continue;
2188
2189                 /* Must set to false to start with, due to OR in update function */
2190                 pipe->plane_state->status.is_flip_pending = false;
2191                 dc->hwss.update_pending_status(pipe);
2192                 if (pipe->plane_state->status.is_flip_pending)
2193                         return true;
2194         }
2195         return false;
2196 }
2197
2198 /* Perform updates here which need to be deferred until next vupdate
2199  *
2200  * i.e. blnd lut, 3dlut, and shaper lut bypass regs are double buffered
2201  * but forcing lut memory to shutdown state is immediate. This causes
2202  * single frame corruption as lut gets disabled mid-frame unless shutdown
2203  * is deferred until after entering bypass.
2204  */
2205 static void process_deferred_updates(struct dc *dc)
2206 {
2207         int i = 0;
2208
2209         if (dc->debug.enable_mem_low_power.bits.cm) {
2210                 ASSERT(dc->dcn_ip->max_num_dpp);
2211                 for (i = 0; i < dc->dcn_ip->max_num_dpp; i++)
2212                         if (dc->res_pool->dpps[i]->funcs->dpp_deferred_update)
2213                                 dc->res_pool->dpps[i]->funcs->dpp_deferred_update(dc->res_pool->dpps[i]);
2214         }
2215 }
2216
2217 void dc_post_update_surfaces_to_stream(struct dc *dc)
2218 {
2219         int i;
2220         struct dc_state *context = dc->current_state;
2221
2222         if ((!dc->optimized_required) || get_seamless_boot_stream_count(context) > 0)
2223                 return;
2224
2225         post_surface_trace(dc);
2226
2227         /*
2228          * Only relevant for DCN behavior where we can guarantee the optimization
2229          * is safe to apply - retain the legacy behavior for DCE.
2230          */
2231
2232         if (dc->ctx->dce_version < DCE_VERSION_MAX)
2233                 TRACE_DCE_CLOCK_STATE(&context->bw_ctx.bw.dce);
2234         else {
2235                 TRACE_DCN_CLOCK_STATE(&context->bw_ctx.bw.dcn.clk);
2236
2237                 if (is_flip_pending_in_pipes(dc, context))
2238                         return;
2239
2240                 for (i = 0; i < dc->res_pool->pipe_count; i++)
2241                         if (context->res_ctx.pipe_ctx[i].stream == NULL ||
2242                                         context->res_ctx.pipe_ctx[i].plane_state == NULL) {
2243                                 context->res_ctx.pipe_ctx[i].pipe_idx = i;
2244                                 dc->hwss.disable_plane(dc, context, &context->res_ctx.pipe_ctx[i]);
2245                         }
2246
2247                 process_deferred_updates(dc);
2248
2249                 dc->hwss.optimize_bandwidth(dc, context);
2250
2251                 if (dc->hwss.update_dsc_pg)
2252                         dc->hwss.update_dsc_pg(dc, context, true);
2253         }
2254
2255         dc->optimized_required = false;
2256         dc->wm_optimized_required = false;
2257 }
2258
2259 bool dc_set_generic_gpio_for_stereo(bool enable,
2260                 struct gpio_service *gpio_service)
2261 {
2262         enum gpio_result gpio_result = GPIO_RESULT_NON_SPECIFIC_ERROR;
2263         struct gpio_pin_info pin_info;
2264         struct gpio *generic;
2265         struct gpio_generic_mux_config *config = kzalloc(sizeof(struct gpio_generic_mux_config),
2266                            GFP_KERNEL);
2267
2268         if (!config)
2269                 return false;
2270         pin_info = dal_gpio_get_generic_pin_info(gpio_service, GPIO_ID_GENERIC, 0);
2271
2272         if (pin_info.mask == 0xFFFFFFFF || pin_info.offset == 0xFFFFFFFF) {
2273                 kfree(config);
2274                 return false;
2275         } else {
2276                 generic = dal_gpio_service_create_generic_mux(
2277                         gpio_service,
2278                         pin_info.offset,
2279                         pin_info.mask);
2280         }
2281
2282         if (!generic) {
2283                 kfree(config);
2284                 return false;
2285         }
2286
2287         gpio_result = dal_gpio_open(generic, GPIO_MODE_OUTPUT);
2288
2289         config->enable_output_from_mux = enable;
2290         config->mux_select = GPIO_SIGNAL_SOURCE_PASS_THROUGH_STEREO_SYNC;
2291
2292         if (gpio_result == GPIO_RESULT_OK)
2293                 gpio_result = dal_mux_setup_config(generic, config);
2294
2295         if (gpio_result == GPIO_RESULT_OK) {
2296                 dal_gpio_close(generic);
2297                 dal_gpio_destroy_generic_mux(&generic);
2298                 kfree(config);
2299                 return true;
2300         } else {
2301                 dal_gpio_close(generic);
2302                 dal_gpio_destroy_generic_mux(&generic);
2303                 kfree(config);
2304                 return false;
2305         }
2306 }
2307
2308 static bool is_surface_in_context(
2309                 const struct dc_state *context,
2310                 const struct dc_plane_state *plane_state)
2311 {
2312         int j;
2313
2314         for (j = 0; j < MAX_PIPES; j++) {
2315                 const struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[j];
2316
2317                 if (plane_state == pipe_ctx->plane_state) {
2318                         return true;
2319                 }
2320         }
2321
2322         return false;
2323 }
2324
2325 static enum surface_update_type get_plane_info_update_type(const struct dc_surface_update *u)
2326 {
2327         union surface_update_flags *update_flags = &u->surface->update_flags;
2328         enum surface_update_type update_type = UPDATE_TYPE_FAST;
2329
2330         if (!u->plane_info)
2331                 return UPDATE_TYPE_FAST;
2332
2333         if (u->plane_info->color_space != u->surface->color_space) {
2334                 update_flags->bits.color_space_change = 1;
2335                 elevate_update_type(&update_type, UPDATE_TYPE_MED);
2336         }
2337
2338         if (u->plane_info->horizontal_mirror != u->surface->horizontal_mirror) {
2339                 update_flags->bits.horizontal_mirror_change = 1;
2340                 elevate_update_type(&update_type, UPDATE_TYPE_MED);
2341         }
2342
2343         if (u->plane_info->rotation != u->surface->rotation) {
2344                 update_flags->bits.rotation_change = 1;
2345                 elevate_update_type(&update_type, UPDATE_TYPE_FULL);
2346         }
2347
2348         if (u->plane_info->format != u->surface->format) {
2349                 update_flags->bits.pixel_format_change = 1;
2350                 elevate_update_type(&update_type, UPDATE_TYPE_FULL);
2351         }
2352
2353         if (u->plane_info->stereo_format != u->surface->stereo_format) {
2354                 update_flags->bits.stereo_format_change = 1;
2355                 elevate_update_type(&update_type, UPDATE_TYPE_FULL);
2356         }
2357
2358         if (u->plane_info->per_pixel_alpha != u->surface->per_pixel_alpha) {
2359                 update_flags->bits.per_pixel_alpha_change = 1;
2360                 elevate_update_type(&update_type, UPDATE_TYPE_MED);
2361         }
2362
2363         if (u->plane_info->global_alpha_value != u->surface->global_alpha_value) {
2364                 update_flags->bits.global_alpha_change = 1;
2365                 elevate_update_type(&update_type, UPDATE_TYPE_MED);
2366         }
2367
2368         if (u->plane_info->dcc.enable != u->surface->dcc.enable
2369                         || u->plane_info->dcc.dcc_ind_blk != u->surface->dcc.dcc_ind_blk
2370                         || u->plane_info->dcc.meta_pitch != u->surface->dcc.meta_pitch) {
2371                 /* During DCC on/off, stutter period is calculated before
2372                  * DCC has fully transitioned. This results in incorrect
2373                  * stutter period calculation. Triggering a full update will
2374                  * recalculate stutter period.
2375                  */
2376                 update_flags->bits.dcc_change = 1;
2377                 elevate_update_type(&update_type, UPDATE_TYPE_FULL);
2378         }
2379
2380         if (resource_pixel_format_to_bpp(u->plane_info->format) !=
2381                         resource_pixel_format_to_bpp(u->surface->format)) {
2382                 /* different bytes per element will require full bandwidth
2383                  * and DML calculation
2384                  */
2385                 update_flags->bits.bpp_change = 1;
2386                 elevate_update_type(&update_type, UPDATE_TYPE_FULL);
2387         }
2388
2389         if (u->plane_info->plane_size.surface_pitch != u->surface->plane_size.surface_pitch
2390                         || u->plane_info->plane_size.chroma_pitch != u->surface->plane_size.chroma_pitch) {
2391                 update_flags->bits.plane_size_change = 1;
2392                 elevate_update_type(&update_type, UPDATE_TYPE_MED);
2393         }
2394
2395
2396         if (memcmp(&u->plane_info->tiling_info, &u->surface->tiling_info,
2397                         sizeof(union dc_tiling_info)) != 0) {
2398                 update_flags->bits.swizzle_change = 1;
2399                 elevate_update_type(&update_type, UPDATE_TYPE_MED);
2400
2401                 /* todo: below are HW dependent, we should add a hook to
2402                  * DCE/N resource and validated there.
2403                  */
2404                 if (u->plane_info->tiling_info.gfx9.swizzle != DC_SW_LINEAR) {
2405                         /* swizzled mode requires RQ to be setup properly,
2406                          * thus need to run DML to calculate RQ settings
2407                          */
2408                         update_flags->bits.bandwidth_change = 1;
2409                         elevate_update_type(&update_type, UPDATE_TYPE_FULL);
2410                 }
2411         }
2412
2413         /* This should be UPDATE_TYPE_FAST if nothing has changed. */
2414         return update_type;
2415 }
2416
2417 static enum surface_update_type get_scaling_info_update_type(
2418                 const struct dc *dc,
2419                 const struct dc_surface_update *u)
2420 {
2421         union surface_update_flags *update_flags = &u->surface->update_flags;
2422
2423         if (!u->scaling_info)
2424                 return UPDATE_TYPE_FAST;
2425
2426         if (u->scaling_info->dst_rect.width != u->surface->dst_rect.width
2427                         || u->scaling_info->dst_rect.height != u->surface->dst_rect.height
2428                         || u->scaling_info->scaling_quality.integer_scaling !=
2429                                 u->surface->scaling_quality.integer_scaling
2430                         ) {
2431                 update_flags->bits.scaling_change = 1;
2432
2433                 if ((u->scaling_info->dst_rect.width < u->surface->dst_rect.width
2434                         || u->scaling_info->dst_rect.height < u->surface->dst_rect.height)
2435                                 && (u->scaling_info->dst_rect.width < u->surface->src_rect.width
2436                                         || u->scaling_info->dst_rect.height < u->surface->src_rect.height))
2437                         /* Making dst rect smaller requires a bandwidth change */
2438                         update_flags->bits.bandwidth_change = 1;
2439         }
2440
2441         if (u->scaling_info->src_rect.width != u->surface->src_rect.width
2442                 || u->scaling_info->src_rect.height != u->surface->src_rect.height) {
2443
2444                 update_flags->bits.scaling_change = 1;
2445                 if (u->scaling_info->src_rect.width > u->surface->src_rect.width
2446                                 || u->scaling_info->src_rect.height > u->surface->src_rect.height)
2447                         /* Making src rect bigger requires a bandwidth change */
2448                         update_flags->bits.clock_change = 1;
2449         }
2450
2451         if (u->scaling_info->src_rect.width > dc->caps.max_optimizable_video_width &&
2452                 (u->scaling_info->clip_rect.width > u->surface->clip_rect.width ||
2453                  u->scaling_info->clip_rect.height > u->surface->clip_rect.height))
2454                  /* Changing clip size of a large surface may result in MPC slice count change */
2455                 update_flags->bits.bandwidth_change = 1;
2456
2457         if (u->scaling_info->clip_rect.width != u->surface->clip_rect.width ||
2458                         u->scaling_info->clip_rect.height != u->surface->clip_rect.height)
2459                 update_flags->bits.clip_size_change = 1;
2460
2461         if (u->scaling_info->src_rect.x != u->surface->src_rect.x
2462                         || u->scaling_info->src_rect.y != u->surface->src_rect.y
2463                         || u->scaling_info->clip_rect.x != u->surface->clip_rect.x
2464                         || u->scaling_info->clip_rect.y != u->surface->clip_rect.y
2465                         || u->scaling_info->dst_rect.x != u->surface->dst_rect.x
2466                         || u->scaling_info->dst_rect.y != u->surface->dst_rect.y)
2467                 update_flags->bits.position_change = 1;
2468
2469         if (update_flags->bits.clock_change
2470                         || update_flags->bits.bandwidth_change
2471                         || update_flags->bits.scaling_change)
2472                 return UPDATE_TYPE_FULL;
2473
2474         if (update_flags->bits.position_change ||
2475                         update_flags->bits.clip_size_change)
2476                 return UPDATE_TYPE_MED;
2477
2478         return UPDATE_TYPE_FAST;
2479 }
2480
2481 static enum surface_update_type det_surface_update(const struct dc *dc,
2482                 const struct dc_surface_update *u)
2483 {
2484         const struct dc_state *context = dc->current_state;
2485         enum surface_update_type type;
2486         enum surface_update_type overall_type = UPDATE_TYPE_FAST;
2487         union surface_update_flags *update_flags = &u->surface->update_flags;
2488
2489         if (!is_surface_in_context(context, u->surface) || u->surface->force_full_update) {
2490                 update_flags->raw = 0xFFFFFFFF;
2491                 return UPDATE_TYPE_FULL;
2492         }
2493
2494         update_flags->raw = 0; // Reset all flags
2495
2496         type = get_plane_info_update_type(u);
2497         elevate_update_type(&overall_type, type);
2498
2499         type = get_scaling_info_update_type(dc, u);
2500         elevate_update_type(&overall_type, type);
2501
2502         if (u->flip_addr) {
2503                 update_flags->bits.addr_update = 1;
2504                 if (u->flip_addr->address.tmz_surface != u->surface->address.tmz_surface) {
2505                         update_flags->bits.tmz_changed = 1;
2506                         elevate_update_type(&overall_type, UPDATE_TYPE_FULL);
2507                 }
2508         }
2509         if (u->in_transfer_func)
2510                 update_flags->bits.in_transfer_func_change = 1;
2511
2512         if (u->input_csc_color_matrix)
2513                 update_flags->bits.input_csc_change = 1;
2514
2515         if (u->coeff_reduction_factor)
2516                 update_flags->bits.coeff_reduction_change = 1;
2517
2518         if (u->gamut_remap_matrix)
2519                 update_flags->bits.gamut_remap_change = 1;
2520
2521         if (u->blend_tf)
2522                 update_flags->bits.gamma_change = 1;
2523
2524         if (u->gamma) {
2525                 enum surface_pixel_format format = SURFACE_PIXEL_FORMAT_GRPH_BEGIN;
2526
2527                 if (u->plane_info)
2528                         format = u->plane_info->format;
2529                 else if (u->surface)
2530                         format = u->surface->format;
2531
2532                 if (dce_use_lut(format))
2533                         update_flags->bits.gamma_change = 1;
2534         }
2535
2536         if (u->lut3d_func || u->func_shaper)
2537                 update_flags->bits.lut_3d = 1;
2538
2539         if (u->hdr_mult.value)
2540                 if (u->hdr_mult.value != u->surface->hdr_mult.value) {
2541                         update_flags->bits.hdr_mult = 1;
2542                         elevate_update_type(&overall_type, UPDATE_TYPE_MED);
2543                 }
2544
2545         if (update_flags->bits.in_transfer_func_change) {
2546                 type = UPDATE_TYPE_MED;
2547                 elevate_update_type(&overall_type, type);
2548         }
2549
2550         if (update_flags->bits.lut_3d) {
2551                 type = UPDATE_TYPE_FULL;
2552                 elevate_update_type(&overall_type, type);
2553         }
2554
2555         if (dc->debug.enable_legacy_fast_update &&
2556                         (update_flags->bits.gamma_change ||
2557                         update_flags->bits.gamut_remap_change ||
2558                         update_flags->bits.input_csc_change ||
2559                         update_flags->bits.coeff_reduction_change)) {
2560                 type = UPDATE_TYPE_FULL;
2561                 elevate_update_type(&overall_type, type);
2562         }
2563         return overall_type;
2564 }
2565
2566 static enum surface_update_type check_update_surfaces_for_stream(
2567                 struct dc *dc,
2568                 struct dc_surface_update *updates,
2569                 int surface_count,
2570                 struct dc_stream_update *stream_update,
2571                 const struct dc_stream_status *stream_status)
2572 {
2573         int i;
2574         enum surface_update_type overall_type = UPDATE_TYPE_FAST;
2575
2576         if (dc->idle_optimizations_allowed)
2577                 overall_type = UPDATE_TYPE_FULL;
2578
2579         if (stream_status == NULL || stream_status->plane_count != surface_count)
2580                 overall_type = UPDATE_TYPE_FULL;
2581
2582         if (stream_update && stream_update->pending_test_pattern) {
2583                 overall_type = UPDATE_TYPE_FULL;
2584         }
2585
2586         /* some stream updates require passive update */
2587         if (stream_update) {
2588                 union stream_update_flags *su_flags = &stream_update->stream->update_flags;
2589
2590                 if ((stream_update->src.height != 0 && stream_update->src.width != 0) ||
2591                         (stream_update->dst.height != 0 && stream_update->dst.width != 0) ||
2592                         stream_update->integer_scaling_update)
2593                         su_flags->bits.scaling = 1;
2594
2595                 if (dc->debug.enable_legacy_fast_update && stream_update->out_transfer_func)
2596                         su_flags->bits.out_tf = 1;
2597
2598                 if (stream_update->abm_level)
2599                         su_flags->bits.abm_level = 1;
2600
2601                 if (stream_update->dpms_off)
2602                         su_flags->bits.dpms_off = 1;
2603
2604                 if (stream_update->gamut_remap)
2605                         su_flags->bits.gamut_remap = 1;
2606
2607                 if (stream_update->wb_update)
2608                         su_flags->bits.wb_update = 1;
2609
2610                 if (stream_update->dsc_config)
2611                         su_flags->bits.dsc_changed = 1;
2612
2613                 if (stream_update->mst_bw_update)
2614                         su_flags->bits.mst_bw = 1;
2615
2616                 if (stream_update->stream && stream_update->stream->freesync_on_desktop &&
2617                         (stream_update->vrr_infopacket || stream_update->allow_freesync ||
2618                                 stream_update->vrr_active_variable || stream_update->vrr_active_fixed))
2619                         su_flags->bits.fams_changed = 1;
2620
2621                 if (su_flags->raw != 0)
2622                         overall_type = UPDATE_TYPE_FULL;
2623
2624                 if (stream_update->output_csc_transform || stream_update->output_color_space)
2625                         su_flags->bits.out_csc = 1;
2626
2627                 /* Output transfer function changes do not require bandwidth recalculation,
2628                  * so don't trigger a full update
2629                  */
2630                 if (!dc->debug.enable_legacy_fast_update && stream_update->out_transfer_func)
2631                         su_flags->bits.out_tf = 1;
2632         }
2633
2634         for (i = 0 ; i < surface_count; i++) {
2635                 enum surface_update_type type =
2636                                 det_surface_update(dc, &updates[i]);
2637
2638                 elevate_update_type(&overall_type, type);
2639         }
2640
2641         return overall_type;
2642 }
2643
2644 /*
2645  * dc_check_update_surfaces_for_stream() - Determine update type (fast, med, or full)
2646  *
2647  * See :c:type:`enum surface_update_type <surface_update_type>` for explanation of update types
2648  */
2649 enum surface_update_type dc_check_update_surfaces_for_stream(
2650                 struct dc *dc,
2651                 struct dc_surface_update *updates,
2652                 int surface_count,
2653                 struct dc_stream_update *stream_update,
2654                 const struct dc_stream_status *stream_status)
2655 {
2656         int i;
2657         enum surface_update_type type;
2658
2659         if (stream_update)
2660                 stream_update->stream->update_flags.raw = 0;
2661         for (i = 0; i < surface_count; i++)
2662                 updates[i].surface->update_flags.raw = 0;
2663
2664         type = check_update_surfaces_for_stream(dc, updates, surface_count, stream_update, stream_status);
2665         if (type == UPDATE_TYPE_FULL) {
2666                 if (stream_update) {
2667                         uint32_t dsc_changed = stream_update->stream->update_flags.bits.dsc_changed;
2668                         stream_update->stream->update_flags.raw = 0xFFFFFFFF;
2669                         stream_update->stream->update_flags.bits.dsc_changed = dsc_changed;
2670                 }
2671                 for (i = 0; i < surface_count; i++)
2672                         updates[i].surface->update_flags.raw = 0xFFFFFFFF;
2673         }
2674
2675         if (type == UPDATE_TYPE_FAST) {
2676                 // If there's an available clock comparator, we use that.
2677                 if (dc->clk_mgr->funcs->are_clock_states_equal) {
2678                         if (!dc->clk_mgr->funcs->are_clock_states_equal(&dc->clk_mgr->clks, &dc->current_state->bw_ctx.bw.dcn.clk))
2679                                 dc->optimized_required = true;
2680                 // Else we fallback to mem compare.
2681                 } else if (memcmp(&dc->current_state->bw_ctx.bw.dcn.clk, &dc->clk_mgr->clks, offsetof(struct dc_clocks, prev_p_state_change_support)) != 0) {
2682                         dc->optimized_required = true;
2683                 }
2684
2685                 dc->optimized_required |= dc->wm_optimized_required;
2686         }
2687
2688         return type;
2689 }
2690
2691 static struct dc_stream_status *stream_get_status(
2692         struct dc_state *ctx,
2693         struct dc_stream_state *stream)
2694 {
2695         uint8_t i;
2696
2697         for (i = 0; i < ctx->stream_count; i++) {
2698                 if (stream == ctx->streams[i]) {
2699                         return &ctx->stream_status[i];
2700                 }
2701         }
2702
2703         return NULL;
2704 }
2705
2706 static const enum surface_update_type update_surface_trace_level = UPDATE_TYPE_FULL;
2707
2708 static void copy_surface_update_to_plane(
2709                 struct dc_plane_state *surface,
2710                 struct dc_surface_update *srf_update)
2711 {
2712         if (srf_update->flip_addr) {
2713                 surface->address = srf_update->flip_addr->address;
2714                 surface->flip_immediate =
2715                         srf_update->flip_addr->flip_immediate;
2716                 surface->time.time_elapsed_in_us[surface->time.index] =
2717                         srf_update->flip_addr->flip_timestamp_in_us -
2718                                 surface->time.prev_update_time_in_us;
2719                 surface->time.prev_update_time_in_us =
2720                         srf_update->flip_addr->flip_timestamp_in_us;
2721                 surface->time.index++;
2722                 if (surface->time.index >= DC_PLANE_UPDATE_TIMES_MAX)
2723                         surface->time.index = 0;
2724
2725                 surface->triplebuffer_flips = srf_update->flip_addr->triplebuffer_flips;
2726         }
2727
2728         if (srf_update->scaling_info) {
2729                 surface->scaling_quality =
2730                                 srf_update->scaling_info->scaling_quality;
2731                 surface->dst_rect =
2732                                 srf_update->scaling_info->dst_rect;
2733                 surface->src_rect =
2734                                 srf_update->scaling_info->src_rect;
2735                 surface->clip_rect =
2736                                 srf_update->scaling_info->clip_rect;
2737         }
2738
2739         if (srf_update->plane_info) {
2740                 surface->color_space =
2741                                 srf_update->plane_info->color_space;
2742                 surface->format =
2743                                 srf_update->plane_info->format;
2744                 surface->plane_size =
2745                                 srf_update->plane_info->plane_size;
2746                 surface->rotation =
2747                                 srf_update->plane_info->rotation;
2748                 surface->horizontal_mirror =
2749                                 srf_update->plane_info->horizontal_mirror;
2750                 surface->stereo_format =
2751                                 srf_update->plane_info->stereo_format;
2752                 surface->tiling_info =
2753                                 srf_update->plane_info->tiling_info;
2754                 surface->visible =
2755                                 srf_update->plane_info->visible;
2756                 surface->per_pixel_alpha =
2757                                 srf_update->plane_info->per_pixel_alpha;
2758                 surface->global_alpha =
2759                                 srf_update->plane_info->global_alpha;
2760                 surface->global_alpha_value =
2761                                 srf_update->plane_info->global_alpha_value;
2762                 surface->dcc =
2763                                 srf_update->plane_info->dcc;
2764                 surface->layer_index =
2765                                 srf_update->plane_info->layer_index;
2766         }
2767
2768         if (srf_update->gamma &&
2769                         (surface->gamma_correction !=
2770                                         srf_update->gamma)) {
2771                 memcpy(&surface->gamma_correction->entries,
2772                         &srf_update->gamma->entries,
2773                         sizeof(struct dc_gamma_entries));
2774                 surface->gamma_correction->is_identity =
2775                         srf_update->gamma->is_identity;
2776                 surface->gamma_correction->num_entries =
2777                         srf_update->gamma->num_entries;
2778                 surface->gamma_correction->type =
2779                         srf_update->gamma->type;
2780         }
2781
2782         if (srf_update->in_transfer_func &&
2783                         (surface->in_transfer_func !=
2784                                 srf_update->in_transfer_func)) {
2785                 surface->in_transfer_func->sdr_ref_white_level =
2786                         srf_update->in_transfer_func->sdr_ref_white_level;
2787                 surface->in_transfer_func->tf =
2788                         srf_update->in_transfer_func->tf;
2789                 surface->in_transfer_func->type =
2790                         srf_update->in_transfer_func->type;
2791                 memcpy(&surface->in_transfer_func->tf_pts,
2792                         &srf_update->in_transfer_func->tf_pts,
2793                         sizeof(struct dc_transfer_func_distributed_points));
2794         }
2795
2796         if (srf_update->func_shaper &&
2797                         (surface->in_shaper_func !=
2798                         srf_update->func_shaper))
2799                 memcpy(surface->in_shaper_func, srf_update->func_shaper,
2800                 sizeof(*surface->in_shaper_func));
2801
2802         if (srf_update->lut3d_func &&
2803                         (surface->lut3d_func !=
2804                         srf_update->lut3d_func))
2805                 memcpy(surface->lut3d_func, srf_update->lut3d_func,
2806                 sizeof(*surface->lut3d_func));
2807
2808         if (srf_update->hdr_mult.value)
2809                 surface->hdr_mult =
2810                                 srf_update->hdr_mult;
2811
2812         if (srf_update->blend_tf &&
2813                         (surface->blend_tf !=
2814                         srf_update->blend_tf))
2815                 memcpy(surface->blend_tf, srf_update->blend_tf,
2816                 sizeof(*surface->blend_tf));
2817
2818         if (srf_update->input_csc_color_matrix)
2819                 surface->input_csc_color_matrix =
2820                         *srf_update->input_csc_color_matrix;
2821
2822         if (srf_update->coeff_reduction_factor)
2823                 surface->coeff_reduction_factor =
2824                         *srf_update->coeff_reduction_factor;
2825
2826         if (srf_update->gamut_remap_matrix)
2827                 surface->gamut_remap_matrix =
2828                         *srf_update->gamut_remap_matrix;
2829 }
2830
2831 static void copy_stream_update_to_stream(struct dc *dc,
2832                                          struct dc_state *context,
2833                                          struct dc_stream_state *stream,
2834                                          struct dc_stream_update *update)
2835 {
2836         struct dc_context *dc_ctx = dc->ctx;
2837
2838         if (update == NULL || stream == NULL)
2839                 return;
2840
2841         if (update->src.height && update->src.width)
2842                 stream->src = update->src;
2843
2844         if (update->dst.height && update->dst.width)
2845                 stream->dst = update->dst;
2846
2847         if (update->out_transfer_func &&
2848             stream->out_transfer_func != update->out_transfer_func) {
2849                 stream->out_transfer_func->sdr_ref_white_level =
2850                         update->out_transfer_func->sdr_ref_white_level;
2851                 stream->out_transfer_func->tf = update->out_transfer_func->tf;
2852                 stream->out_transfer_func->type =
2853                         update->out_transfer_func->type;
2854                 memcpy(&stream->out_transfer_func->tf_pts,
2855                        &update->out_transfer_func->tf_pts,
2856                        sizeof(struct dc_transfer_func_distributed_points));
2857         }
2858
2859         if (update->hdr_static_metadata)
2860                 stream->hdr_static_metadata = *update->hdr_static_metadata;
2861
2862         if (update->abm_level)
2863                 stream->abm_level = *update->abm_level;
2864
2865         if (update->periodic_interrupt)
2866                 stream->periodic_interrupt = *update->periodic_interrupt;
2867
2868         if (update->gamut_remap)
2869                 stream->gamut_remap_matrix = *update->gamut_remap;
2870
2871         /* Note: this being updated after mode set is currently not a use case
2872          * however if it arises OCSC would need to be reprogrammed at the
2873          * minimum
2874          */
2875         if (update->output_color_space)
2876                 stream->output_color_space = *update->output_color_space;
2877
2878         if (update->output_csc_transform)
2879                 stream->csc_color_matrix = *update->output_csc_transform;
2880
2881         if (update->vrr_infopacket)
2882                 stream->vrr_infopacket = *update->vrr_infopacket;
2883
2884         if (update->allow_freesync)
2885                 stream->allow_freesync = *update->allow_freesync;
2886
2887         if (update->vrr_active_variable)
2888                 stream->vrr_active_variable = *update->vrr_active_variable;
2889
2890         if (update->vrr_active_fixed)
2891                 stream->vrr_active_fixed = *update->vrr_active_fixed;
2892
2893         if (update->crtc_timing_adjust)
2894                 stream->adjust = *update->crtc_timing_adjust;
2895
2896         if (update->dpms_off)
2897                 stream->dpms_off = *update->dpms_off;
2898
2899         if (update->hfvsif_infopacket)
2900                 stream->hfvsif_infopacket = *update->hfvsif_infopacket;
2901
2902         if (update->vtem_infopacket)
2903                 stream->vtem_infopacket = *update->vtem_infopacket;
2904
2905         if (update->vsc_infopacket)
2906                 stream->vsc_infopacket = *update->vsc_infopacket;
2907
2908         if (update->vsp_infopacket)
2909                 stream->vsp_infopacket = *update->vsp_infopacket;
2910
2911         if (update->adaptive_sync_infopacket)
2912                 stream->adaptive_sync_infopacket = *update->adaptive_sync_infopacket;
2913
2914         if (update->dither_option)
2915                 stream->dither_option = *update->dither_option;
2916
2917         if (update->pending_test_pattern)
2918                 stream->test_pattern = *update->pending_test_pattern;
2919         /* update current stream with writeback info */
2920         if (update->wb_update) {
2921                 int i;
2922
2923                 stream->num_wb_info = update->wb_update->num_wb_info;
2924                 ASSERT(stream->num_wb_info <= MAX_DWB_PIPES);
2925                 for (i = 0; i < stream->num_wb_info; i++)
2926                         stream->writeback_info[i] =
2927                                 update->wb_update->writeback_info[i];
2928         }
2929         if (update->dsc_config) {
2930                 struct dc_dsc_config old_dsc_cfg = stream->timing.dsc_cfg;
2931                 uint32_t old_dsc_enabled = stream->timing.flags.DSC;
2932                 uint32_t enable_dsc = (update->dsc_config->num_slices_h != 0 &&
2933                                        update->dsc_config->num_slices_v != 0);
2934
2935                 /* Use temporarry context for validating new DSC config */
2936                 struct dc_state *dsc_validate_context = dc_state_create_copy(dc->current_state);
2937
2938                 if (dsc_validate_context) {
2939                         stream->timing.dsc_cfg = *update->dsc_config;
2940                         stream->timing.flags.DSC = enable_dsc;
2941                         if (!dc->res_pool->funcs->validate_bandwidth(dc, dsc_validate_context, true)) {
2942                                 stream->timing.dsc_cfg = old_dsc_cfg;
2943                                 stream->timing.flags.DSC = old_dsc_enabled;
2944                                 update->dsc_config = NULL;
2945                         }
2946
2947                         dc_state_release(dsc_validate_context);
2948                 } else {
2949                         DC_ERROR("Failed to allocate new validate context for DSC change\n");
2950                         update->dsc_config = NULL;
2951                 }
2952         }
2953 }
2954
2955 static void backup_planes_and_stream_state(
2956                 struct dc_scratch_space *scratch,
2957                 struct dc_stream_state *stream)
2958 {
2959         int i;
2960         struct dc_stream_status *status = dc_stream_get_status(stream);
2961
2962         if (!status)
2963                 return;
2964
2965         for (i = 0; i < status->plane_count; i++) {
2966                 scratch->plane_states[i] = *status->plane_states[i];
2967                 scratch->gamma_correction[i] = *status->plane_states[i]->gamma_correction;
2968                 scratch->in_transfer_func[i] = *status->plane_states[i]->in_transfer_func;
2969                 scratch->lut3d_func[i] = *status->plane_states[i]->lut3d_func;
2970                 scratch->in_shaper_func[i] = *status->plane_states[i]->in_shaper_func;
2971                 scratch->blend_tf[i] = *status->plane_states[i]->blend_tf;
2972         }
2973         scratch->stream_state = *stream;
2974         scratch->out_transfer_func = *stream->out_transfer_func;
2975 }
2976
2977 static void restore_planes_and_stream_state(
2978                 struct dc_scratch_space *scratch,
2979                 struct dc_stream_state *stream)
2980 {
2981         int i;
2982         struct dc_stream_status *status = dc_stream_get_status(stream);
2983
2984         if (!status)
2985                 return;
2986
2987         for (i = 0; i < status->plane_count; i++) {
2988                 *status->plane_states[i] = scratch->plane_states[i];
2989                 *status->plane_states[i]->gamma_correction = scratch->gamma_correction[i];
2990                 *status->plane_states[i]->in_transfer_func = scratch->in_transfer_func[i];
2991                 *status->plane_states[i]->lut3d_func = scratch->lut3d_func[i];
2992                 *status->plane_states[i]->in_shaper_func = scratch->in_shaper_func[i];
2993                 *status->plane_states[i]->blend_tf = scratch->blend_tf[i];
2994         }
2995         *stream = scratch->stream_state;
2996         *stream->out_transfer_func = scratch->out_transfer_func;
2997 }
2998
2999 static bool update_planes_and_stream_state(struct dc *dc,
3000                 struct dc_surface_update *srf_updates, int surface_count,
3001                 struct dc_stream_state *stream,
3002                 struct dc_stream_update *stream_update,
3003                 enum surface_update_type *new_update_type,
3004                 struct dc_state **new_context)
3005 {
3006         struct dc_state *context;
3007         int i, j;
3008         enum surface_update_type update_type;
3009         const struct dc_stream_status *stream_status;
3010         struct dc_context *dc_ctx = dc->ctx;
3011
3012         stream_status = dc_stream_get_status(stream);
3013
3014         if (!stream_status) {
3015                 if (surface_count) /* Only an error condition if surf_count non-zero*/
3016                         ASSERT(false);
3017
3018                 return false; /* Cannot commit surface to stream that is not committed */
3019         }
3020
3021         context = dc->current_state;
3022         backup_planes_and_stream_state(&dc->current_state->scratch, stream);
3023         update_type = dc_check_update_surfaces_for_stream(
3024                         dc, srf_updates, surface_count, stream_update, stream_status);
3025
3026         /* update current stream with the new updates */
3027         copy_stream_update_to_stream(dc, context, stream, stream_update);
3028
3029         /* do not perform surface update if surface has invalid dimensions
3030          * (all zero) and no scaling_info is provided
3031          */
3032         if (surface_count > 0) {
3033                 for (i = 0; i < surface_count; i++) {
3034                         if ((srf_updates[i].surface->src_rect.width == 0 ||
3035                                  srf_updates[i].surface->src_rect.height == 0 ||
3036                                  srf_updates[i].surface->dst_rect.width == 0 ||
3037                                  srf_updates[i].surface->dst_rect.height == 0) &&
3038                                 (!srf_updates[i].scaling_info ||
3039                                   srf_updates[i].scaling_info->src_rect.width == 0 ||
3040                                   srf_updates[i].scaling_info->src_rect.height == 0 ||
3041                                   srf_updates[i].scaling_info->dst_rect.width == 0 ||
3042                                   srf_updates[i].scaling_info->dst_rect.height == 0)) {
3043                                 DC_ERROR("Invalid src/dst rects in surface update!\n");
3044                                 return false;
3045                         }
3046                 }
3047         }
3048
3049         if (update_type >= update_surface_trace_level)
3050                 update_surface_trace(dc, srf_updates, surface_count);
3051
3052         for (i = 0; i < surface_count; i++)
3053                 copy_surface_update_to_plane(srf_updates[i].surface, &srf_updates[i]);
3054
3055         if (update_type >= UPDATE_TYPE_FULL) {
3056                 struct dc_plane_state *new_planes[MAX_SURFACES] = {0};
3057
3058                 for (i = 0; i < surface_count; i++)
3059                         new_planes[i] = srf_updates[i].surface;
3060
3061                 /* initialize scratch memory for building context */
3062                 context = dc_state_create_copy(dc->current_state);
3063                 if (context == NULL) {
3064                         DC_ERROR("Failed to allocate new validate context!\n");
3065                         return false;
3066                 }
3067
3068                 /* For each full update, remove all existing phantom pipes first.
3069                  * Ensures that we have enough pipes for newly added MPO planes
3070                  */
3071                 dc_state_remove_phantom_streams_and_planes(dc, context);
3072                 dc_state_release_phantom_streams_and_planes(dc, context);
3073
3074                 /*remove old surfaces from context */
3075                 if (!dc_state_rem_all_planes_for_stream(dc, stream, context)) {
3076
3077                         BREAK_TO_DEBUGGER();
3078                         goto fail;
3079                 }
3080
3081                 /* add surface to context */
3082                 if (!dc_state_add_all_planes_for_stream(dc, stream, new_planes, surface_count, context)) {
3083
3084                         BREAK_TO_DEBUGGER();
3085                         goto fail;
3086                 }
3087         }
3088
3089         /* save update parameters into surface */
3090         for (i = 0; i < surface_count; i++) {
3091                 struct dc_plane_state *surface = srf_updates[i].surface;
3092
3093                 if (update_type >= UPDATE_TYPE_MED) {
3094                         for (j = 0; j < dc->res_pool->pipe_count; j++) {
3095                                 struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[j];
3096
3097                                 if (pipe_ctx->plane_state != surface)
3098                                         continue;
3099
3100                                 resource_build_scaling_params(pipe_ctx);
3101                         }
3102                 }
3103         }
3104
3105         if (update_type == UPDATE_TYPE_FULL) {
3106                 if (!dc->res_pool->funcs->validate_bandwidth(dc, context, false)) {
3107                         BREAK_TO_DEBUGGER();
3108                         goto fail;
3109                 }
3110
3111                 for (i = 0; i < context->stream_count; i++) {
3112                         struct pipe_ctx *otg_master = resource_get_otg_master_for_stream(&context->res_ctx,
3113                                         context->streams[i]);
3114
3115                         if (otg_master && otg_master->stream->test_pattern.type != DP_TEST_PATTERN_VIDEO_MODE)
3116                                 resource_build_test_pattern_params(&context->res_ctx, otg_master);
3117                 }
3118         }
3119
3120         *new_context = context;
3121         *new_update_type = update_type;
3122         backup_planes_and_stream_state(&context->scratch, stream);
3123
3124         return true;
3125
3126 fail:
3127         dc_state_release(context);
3128
3129         return false;
3130
3131 }
3132
3133 static void commit_planes_do_stream_update(struct dc *dc,
3134                 struct dc_stream_state *stream,
3135                 struct dc_stream_update *stream_update,
3136                 enum surface_update_type update_type,
3137                 struct dc_state *context)
3138 {
3139         int j;
3140
3141         // Stream updates
3142         for (j = 0; j < dc->res_pool->pipe_count; j++) {
3143                 struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[j];
3144
3145                 if (resource_is_pipe_type(pipe_ctx, OTG_MASTER) && pipe_ctx->stream == stream) {
3146
3147                         if (stream_update->periodic_interrupt && dc->hwss.setup_periodic_interrupt)
3148                                 dc->hwss.setup_periodic_interrupt(dc, pipe_ctx);
3149
3150                         if ((stream_update->hdr_static_metadata && !stream->use_dynamic_meta) ||
3151                                         stream_update->vrr_infopacket ||
3152                                         stream_update->vsc_infopacket ||
3153                                         stream_update->vsp_infopacket ||
3154                                         stream_update->hfvsif_infopacket ||
3155                                         stream_update->adaptive_sync_infopacket ||
3156                                         stream_update->vtem_infopacket) {
3157                                 resource_build_info_frame(pipe_ctx);
3158                                 dc->hwss.update_info_frame(pipe_ctx);
3159
3160                                 if (dc_is_dp_signal(pipe_ctx->stream->signal))
3161                                         dc->link_srv->dp_trace_source_sequence(
3162                                                         pipe_ctx->stream->link,
3163                                                         DPCD_SOURCE_SEQ_AFTER_UPDATE_INFO_FRAME);
3164                         }
3165
3166                         if (stream_update->hdr_static_metadata &&
3167                                         stream->use_dynamic_meta &&
3168                                         dc->hwss.set_dmdata_attributes &&
3169                                         pipe_ctx->stream->dmdata_address.quad_part != 0)
3170                                 dc->hwss.set_dmdata_attributes(pipe_ctx);
3171
3172                         if (stream_update->gamut_remap)
3173                                 dc_stream_set_gamut_remap(dc, stream);
3174
3175                         if (stream_update->output_csc_transform)
3176                                 dc_stream_program_csc_matrix(dc, stream);
3177
3178                         if (stream_update->dither_option) {
3179                                 struct pipe_ctx *odm_pipe = pipe_ctx->next_odm_pipe;
3180                                 resource_build_bit_depth_reduction_params(pipe_ctx->stream,
3181                                                                         &pipe_ctx->stream->bit_depth_params);
3182                                 pipe_ctx->stream_res.opp->funcs->opp_program_fmt(pipe_ctx->stream_res.opp,
3183                                                 &stream->bit_depth_params,
3184                                                 &stream->clamping);
3185                                 while (odm_pipe) {
3186                                         odm_pipe->stream_res.opp->funcs->opp_program_fmt(odm_pipe->stream_res.opp,
3187                                                         &stream->bit_depth_params,
3188                                                         &stream->clamping);
3189                                         odm_pipe = odm_pipe->next_odm_pipe;
3190                                 }
3191                         }
3192
3193
3194                         /* Full fe update*/
3195                         if (update_type == UPDATE_TYPE_FAST)
3196                                 continue;
3197
3198                         if (stream_update->dsc_config)
3199                                 dc->link_srv->update_dsc_config(pipe_ctx);
3200
3201                         if (stream_update->mst_bw_update) {
3202                                 if (stream_update->mst_bw_update->is_increase)
3203                                         dc->link_srv->increase_mst_payload(pipe_ctx,
3204                                                         stream_update->mst_bw_update->mst_stream_bw);
3205                                 else
3206                                         dc->link_srv->reduce_mst_payload(pipe_ctx,
3207                                                         stream_update->mst_bw_update->mst_stream_bw);
3208                         }
3209
3210                         if (stream_update->pending_test_pattern) {
3211                                 dc_link_dp_set_test_pattern(stream->link,
3212                                         stream->test_pattern.type,
3213                                         stream->test_pattern.color_space,
3214                                         stream->test_pattern.p_link_settings,
3215                                         stream->test_pattern.p_custom_pattern,
3216                                         stream->test_pattern.cust_pattern_size);
3217                         }
3218
3219                         if (stream_update->dpms_off) {
3220                                 if (*stream_update->dpms_off) {
3221                                         dc->link_srv->set_dpms_off(pipe_ctx);
3222                                         /* for dpms, keep acquired resources*/
3223                                         if (pipe_ctx->stream_res.audio && !dc->debug.az_endpoint_mute_only)
3224                                                 pipe_ctx->stream_res.audio->funcs->az_disable(pipe_ctx->stream_res.audio);
3225
3226                                         dc->optimized_required = true;
3227
3228                                 } else {
3229                                         if (get_seamless_boot_stream_count(context) == 0)
3230                                                 dc->hwss.prepare_bandwidth(dc, dc->current_state);
3231                                         dc->link_srv->set_dpms_on(dc->current_state, pipe_ctx);
3232                                 }
3233                         } else if (pipe_ctx->stream->link->wa_flags.blank_stream_on_ocs_change && stream_update->output_color_space
3234                                         && !stream->dpms_off && dc_is_dp_signal(pipe_ctx->stream->signal)) {
3235                                 /*
3236                                  * Workaround for firmware issue in some receivers where they don't pick up
3237                                  * correct output color space unless DP link is disabled/re-enabled
3238                                  */
3239                                 dc->link_srv->set_dpms_on(dc->current_state, pipe_ctx);
3240                         }
3241
3242                         if (stream_update->abm_level && pipe_ctx->stream_res.abm) {
3243                                 bool should_program_abm = true;
3244
3245                                 // if otg funcs defined check if blanked before programming
3246                                 if (pipe_ctx->stream_res.tg->funcs->is_blanked)
3247                                         if (pipe_ctx->stream_res.tg->funcs->is_blanked(pipe_ctx->stream_res.tg))
3248                                                 should_program_abm = false;
3249
3250                                 if (should_program_abm) {
3251                                         if (*stream_update->abm_level == ABM_LEVEL_IMMEDIATE_DISABLE) {
3252                                                 dc->hwss.set_abm_immediate_disable(pipe_ctx);
3253                                         } else {
3254                                                 pipe_ctx->stream_res.abm->funcs->set_abm_level(
3255                                                         pipe_ctx->stream_res.abm, stream->abm_level);
3256                                         }
3257                                 }
3258                         }
3259                 }
3260         }
3261 }
3262
3263 static bool dc_dmub_should_send_dirty_rect_cmd(struct dc *dc, struct dc_stream_state *stream)
3264 {
3265         if ((stream->link->psr_settings.psr_version == DC_PSR_VERSION_SU_1
3266                         || stream->link->psr_settings.psr_version == DC_PSR_VERSION_1)
3267                         && stream->ctx->dce_version >= DCN_VERSION_3_1)
3268                 return true;
3269
3270         if (stream->link->replay_settings.config.replay_supported)
3271                 return true;
3272
3273         return false;
3274 }
3275
3276 void dc_dmub_update_dirty_rect(struct dc *dc,
3277                                int surface_count,
3278                                struct dc_stream_state *stream,
3279                                struct dc_surface_update *srf_updates,
3280                                struct dc_state *context)
3281 {
3282         union dmub_rb_cmd cmd;
3283         struct dmub_cmd_update_dirty_rect_data *update_dirty_rect;
3284         unsigned int i, j;
3285         unsigned int panel_inst = 0;
3286
3287         if (!dc_dmub_should_send_dirty_rect_cmd(dc, stream))
3288                 return;
3289
3290         if (!dc_get_edp_link_panel_inst(dc, stream->link, &panel_inst))
3291                 return;
3292
3293         memset(&cmd, 0x0, sizeof(cmd));
3294         cmd.update_dirty_rect.header.type = DMUB_CMD__UPDATE_DIRTY_RECT;
3295         cmd.update_dirty_rect.header.sub_type = 0;
3296         cmd.update_dirty_rect.header.payload_bytes =
3297                 sizeof(cmd.update_dirty_rect) -
3298                 sizeof(cmd.update_dirty_rect.header);
3299         update_dirty_rect = &cmd.update_dirty_rect.update_dirty_rect_data;
3300         for (i = 0; i < surface_count; i++) {
3301                 struct dc_plane_state *plane_state = srf_updates[i].surface;
3302                 const struct dc_flip_addrs *flip_addr = srf_updates[i].flip_addr;
3303
3304                 if (!srf_updates[i].surface || !flip_addr)
3305                         continue;
3306                 /* Do not send in immediate flip mode */
3307                 if (srf_updates[i].surface->flip_immediate)
3308                         continue;
3309
3310                 update_dirty_rect->dirty_rect_count = flip_addr->dirty_rect_count;
3311                 memcpy(update_dirty_rect->src_dirty_rects, flip_addr->dirty_rects,
3312                                 sizeof(flip_addr->dirty_rects));
3313                 for (j = 0; j < dc->res_pool->pipe_count; j++) {
3314                         struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[j];
3315
3316                         if (pipe_ctx->stream != stream)
3317                                 continue;
3318                         if (pipe_ctx->plane_state != plane_state)
3319                                 continue;
3320
3321                         update_dirty_rect->panel_inst = panel_inst;
3322                         update_dirty_rect->pipe_idx = j;
3323                         dc_wake_and_execute_dmub_cmd(dc->ctx, &cmd, DM_DMUB_WAIT_TYPE_NO_WAIT);
3324                 }
3325         }
3326 }
3327
3328 static void build_dmub_update_dirty_rect(
3329                 struct dc *dc,
3330                 int surface_count,
3331                 struct dc_stream_state *stream,
3332                 struct dc_surface_update *srf_updates,
3333                 struct dc_state *context,
3334                 struct dc_dmub_cmd dc_dmub_cmd[],
3335                 unsigned int *dmub_cmd_count)
3336 {
3337         union dmub_rb_cmd cmd;
3338         struct dmub_cmd_update_dirty_rect_data *update_dirty_rect;
3339         unsigned int i, j;
3340         unsigned int panel_inst = 0;
3341
3342         if (!dc_dmub_should_send_dirty_rect_cmd(dc, stream))
3343                 return;
3344
3345         if (!dc_get_edp_link_panel_inst(dc, stream->link, &panel_inst))
3346                 return;
3347
3348         memset(&cmd, 0x0, sizeof(cmd));
3349         cmd.update_dirty_rect.header.type = DMUB_CMD__UPDATE_DIRTY_RECT;
3350         cmd.update_dirty_rect.header.sub_type = 0;
3351         cmd.update_dirty_rect.header.payload_bytes =
3352                 sizeof(cmd.update_dirty_rect) -
3353                 sizeof(cmd.update_dirty_rect.header);
3354         update_dirty_rect = &cmd.update_dirty_rect.update_dirty_rect_data;
3355         for (i = 0; i < surface_count; i++) {
3356                 struct dc_plane_state *plane_state = srf_updates[i].surface;
3357                 const struct dc_flip_addrs *flip_addr = srf_updates[i].flip_addr;
3358
3359                 if (!srf_updates[i].surface || !flip_addr)
3360                         continue;
3361                 /* Do not send in immediate flip mode */
3362                 if (srf_updates[i].surface->flip_immediate)
3363                         continue;
3364                 update_dirty_rect->cmd_version = DMUB_CMD_PSR_CONTROL_VERSION_1;
3365                 update_dirty_rect->dirty_rect_count = flip_addr->dirty_rect_count;
3366                 memcpy(update_dirty_rect->src_dirty_rects, flip_addr->dirty_rects,
3367                                 sizeof(flip_addr->dirty_rects));
3368                 for (j = 0; j < dc->res_pool->pipe_count; j++) {
3369                         struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[j];
3370
3371                         if (pipe_ctx->stream != stream)
3372                                 continue;
3373                         if (pipe_ctx->plane_state != plane_state)
3374                                 continue;
3375                         update_dirty_rect->panel_inst = panel_inst;
3376                         update_dirty_rect->pipe_idx = j;
3377                         dc_dmub_cmd[*dmub_cmd_count].dmub_cmd = cmd;
3378                         dc_dmub_cmd[*dmub_cmd_count].wait_type = DM_DMUB_WAIT_TYPE_NO_WAIT;
3379                         (*dmub_cmd_count)++;
3380                 }
3381         }
3382 }
3383
3384
3385 /**
3386  * build_dmub_cmd_list() - Build an array of DMCUB commands to be sent to DMCUB
3387  *
3388  * @dc: Current DC state
3389  * @srf_updates: Array of surface updates
3390  * @surface_count: Number of surfaces that have an updated
3391  * @stream: Corresponding stream to be updated in the current flip
3392  * @context: New DC state to be programmed
3393  *
3394  * @dc_dmub_cmd: Array of DMCUB commands to be sent to DMCUB
3395  * @dmub_cmd_count: Count indicating the number of DMCUB commands in dc_dmub_cmd array
3396  *
3397  * This function builds an array of DMCUB commands to be sent to DMCUB. This function is required
3398  * to build an array of commands and have them sent while the OTG lock is acquired.
3399  *
3400  * Return: void
3401  */
3402 static void build_dmub_cmd_list(struct dc *dc,
3403                 struct dc_surface_update *srf_updates,
3404                 int surface_count,
3405                 struct dc_stream_state *stream,
3406                 struct dc_state *context,
3407                 struct dc_dmub_cmd dc_dmub_cmd[],
3408                 unsigned int *dmub_cmd_count)
3409 {
3410         // Initialize cmd count to 0
3411         *dmub_cmd_count = 0;
3412         build_dmub_update_dirty_rect(dc, surface_count, stream, srf_updates, context, dc_dmub_cmd, dmub_cmd_count);
3413 }
3414
3415 static void commit_planes_for_stream_fast(struct dc *dc,
3416                 struct dc_surface_update *srf_updates,
3417                 int surface_count,
3418                 struct dc_stream_state *stream,
3419                 struct dc_stream_update *stream_update,
3420                 enum surface_update_type update_type,
3421                 struct dc_state *context)
3422 {
3423         int i, j;
3424         struct pipe_ctx *top_pipe_to_program = NULL;
3425         struct dc_stream_status *stream_status = NULL;
3426         dc_exit_ips_for_hw_access(dc);
3427
3428         dc_z10_restore(dc);
3429
3430         top_pipe_to_program = resource_get_otg_master_for_stream(
3431                         &context->res_ctx,
3432                         stream);
3433
3434         if (!top_pipe_to_program)
3435                 return;
3436
3437         for (i = 0; i < dc->res_pool->pipe_count; i++) {
3438                 struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i];
3439
3440                 if (pipe->stream && pipe->plane_state) {
3441                         set_p_state_switch_method(dc, context, pipe);
3442
3443                         if (dc->debug.visual_confirm)
3444                                 dc_update_visual_confirm_color(dc, context, pipe);
3445                 }
3446         }
3447
3448         for (i = 0; i < surface_count; i++) {
3449                 struct dc_plane_state *plane_state = srf_updates[i].surface;
3450                 /*set logical flag for lock/unlock use*/
3451                 for (j = 0; j < dc->res_pool->pipe_count; j++) {
3452                         struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[j];
3453
3454                         if (!pipe_ctx->plane_state)
3455                                 continue;
3456                         if (should_update_pipe_for_plane(context, pipe_ctx, plane_state))
3457                                 continue;
3458                         pipe_ctx->plane_state->triplebuffer_flips = false;
3459                         if (update_type == UPDATE_TYPE_FAST &&
3460                             dc->hwss.program_triplebuffer &&
3461                             !pipe_ctx->plane_state->flip_immediate && dc->debug.enable_tri_buf) {
3462                                 /*triple buffer for VUpdate  only*/
3463                                 pipe_ctx->plane_state->triplebuffer_flips = true;
3464                         }
3465                 }
3466         }
3467
3468         stream_status = dc_state_get_stream_status(context, stream);
3469
3470         build_dmub_cmd_list(dc,
3471                         srf_updates,
3472                         surface_count,
3473                         stream,
3474                         context,
3475                         context->dc_dmub_cmd,
3476                         &(context->dmub_cmd_count));
3477         hwss_build_fast_sequence(dc,
3478                         context->dc_dmub_cmd,
3479                         context->dmub_cmd_count,
3480                         context->block_sequence,
3481                         &(context->block_sequence_steps),
3482                         top_pipe_to_program,
3483                         stream_status);
3484         hwss_execute_sequence(dc,
3485                         context->block_sequence,
3486                         context->block_sequence_steps);
3487         /* Clear update flags so next flip doesn't have redundant programming
3488          * (if there's no stream update, the update flags are not cleared).
3489          * Surface updates are cleared unconditionally at the beginning of each flip,
3490          * so no need to clear here.
3491          */
3492         if (top_pipe_to_program->stream)
3493                 top_pipe_to_program->stream->update_flags.raw = 0;
3494 }
3495
3496 static void wait_for_outstanding_hw_updates(struct dc *dc, const struct dc_state *dc_context)
3497 {
3498 /*
3499  * This function calls HWSS to wait for any potentially double buffered
3500  * operations to complete. It should be invoked as a pre-amble prior
3501  * to full update programming before asserting any HW locks.
3502  */
3503         int pipe_idx;
3504         int opp_inst;
3505         int opp_count = dc->res_pool->res_cap->num_opp;
3506         struct hubp *hubp;
3507         int mpcc_inst;
3508         const struct pipe_ctx *pipe_ctx;
3509
3510         for (pipe_idx = 0; pipe_idx < dc->res_pool->pipe_count; pipe_idx++) {
3511                 pipe_ctx = &dc_context->res_ctx.pipe_ctx[pipe_idx];
3512
3513                 if (!pipe_ctx->stream)
3514                         continue;
3515
3516                 if (pipe_ctx->stream_res.tg->funcs->wait_drr_doublebuffer_pending_clear)
3517                         pipe_ctx->stream_res.tg->funcs->wait_drr_doublebuffer_pending_clear(pipe_ctx->stream_res.tg);
3518
3519                 hubp = pipe_ctx->plane_res.hubp;
3520                 if (!hubp)
3521                         continue;
3522
3523                 mpcc_inst = hubp->inst;
3524                 // MPCC inst is equal to pipe index in practice
3525                 for (opp_inst = 0; opp_inst < opp_count; opp_inst++) {
3526                         if ((dc->res_pool->opps[opp_inst] != NULL) &&
3527                                 (dc->res_pool->opps[opp_inst]->mpcc_disconnect_pending[mpcc_inst])) {
3528                                 dc->res_pool->mpc->funcs->wait_for_idle(dc->res_pool->mpc, mpcc_inst);
3529                                 dc->res_pool->opps[opp_inst]->mpcc_disconnect_pending[mpcc_inst] = false;
3530                                 break;
3531                         }
3532                 }
3533         }
3534 }
3535
3536 static void commit_planes_for_stream(struct dc *dc,
3537                 struct dc_surface_update *srf_updates,
3538                 int surface_count,
3539                 struct dc_stream_state *stream,
3540                 struct dc_stream_update *stream_update,
3541                 enum surface_update_type update_type,
3542                 struct dc_state *context)
3543 {
3544         int i, j;
3545         struct pipe_ctx *top_pipe_to_program = NULL;
3546         bool should_lock_all_pipes = (update_type != UPDATE_TYPE_FAST);
3547         bool subvp_prev_use = false;
3548         bool subvp_curr_use = false;
3549         uint8_t current_stream_mask = 0;
3550
3551         // Once we apply the new subvp context to hardware it won't be in the
3552         // dc->current_state anymore, so we have to cache it before we apply
3553         // the new SubVP context
3554         subvp_prev_use = false;
3555         dc_exit_ips_for_hw_access(dc);
3556
3557         dc_z10_restore(dc);
3558         if (update_type == UPDATE_TYPE_FULL)
3559                 wait_for_outstanding_hw_updates(dc, context);
3560
3561         for (i = 0; i < dc->res_pool->pipe_count; i++) {
3562                 struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i];
3563
3564                 if (pipe->stream && pipe->plane_state) {
3565                         set_p_state_switch_method(dc, context, pipe);
3566
3567                         if (dc->debug.visual_confirm)
3568                                 dc_update_visual_confirm_color(dc, context, pipe);
3569                 }
3570         }
3571
3572         if (update_type == UPDATE_TYPE_FULL) {
3573                 dc_allow_idle_optimizations(dc, false);
3574
3575                 if (get_seamless_boot_stream_count(context) == 0)
3576                         dc->hwss.prepare_bandwidth(dc, context);
3577
3578                 if (dc->hwss.update_dsc_pg)
3579                         dc->hwss.update_dsc_pg(dc, context, false);
3580
3581                 context_clock_trace(dc, context);
3582         }
3583
3584         top_pipe_to_program = resource_get_otg_master_for_stream(
3585                                 &context->res_ctx,
3586                                 stream);
3587         ASSERT(top_pipe_to_program != NULL);
3588         for (i = 0; i < dc->res_pool->pipe_count; i++) {
3589                 struct pipe_ctx *old_pipe = &dc->current_state->res_ctx.pipe_ctx[i];
3590
3591                 // Check old context for SubVP
3592                 subvp_prev_use |= (dc_state_get_pipe_subvp_type(dc->current_state, old_pipe) == SUBVP_PHANTOM);
3593                 if (subvp_prev_use)
3594                         break;
3595         }
3596
3597         for (i = 0; i < dc->res_pool->pipe_count; i++) {
3598                 struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i];
3599
3600                 if (dc_state_get_pipe_subvp_type(context, pipe) == SUBVP_PHANTOM) {
3601                         subvp_curr_use = true;
3602                         break;
3603                 }
3604         }
3605
3606         if (stream->test_pattern.type != DP_TEST_PATTERN_VIDEO_MODE) {
3607                 struct pipe_ctx *mpcc_pipe;
3608                 struct pipe_ctx *odm_pipe;
3609
3610                 for (mpcc_pipe = top_pipe_to_program; mpcc_pipe; mpcc_pipe = mpcc_pipe->bottom_pipe)
3611                         for (odm_pipe = mpcc_pipe; odm_pipe; odm_pipe = odm_pipe->next_odm_pipe)
3612                                 odm_pipe->ttu_regs.min_ttu_vblank = MAX_TTU;
3613         }
3614
3615         if ((update_type != UPDATE_TYPE_FAST) && stream->update_flags.bits.dsc_changed)
3616                 if (top_pipe_to_program &&
3617                         top_pipe_to_program->stream_res.tg->funcs->lock_doublebuffer_enable) {
3618                         if (should_use_dmub_lock(stream->link)) {
3619                                 union dmub_hw_lock_flags hw_locks = { 0 };
3620                                 struct dmub_hw_lock_inst_flags inst_flags = { 0 };
3621
3622                                 hw_locks.bits.lock_dig = 1;
3623                                 inst_flags.dig_inst = top_pipe_to_program->stream_res.tg->inst;
3624
3625                                 dmub_hw_lock_mgr_cmd(dc->ctx->dmub_srv,
3626                                                         true,
3627                                                         &hw_locks,
3628                                                         &inst_flags);
3629                         } else
3630                                 top_pipe_to_program->stream_res.tg->funcs->lock_doublebuffer_enable(
3631                                                 top_pipe_to_program->stream_res.tg);
3632                 }
3633
3634         if (should_lock_all_pipes && dc->hwss.interdependent_update_lock) {
3635                 if (dc->hwss.subvp_pipe_control_lock)
3636                                 dc->hwss.subvp_pipe_control_lock(dc, context, true, should_lock_all_pipes, NULL, subvp_prev_use);
3637                 dc->hwss.interdependent_update_lock(dc, context, true);
3638
3639         } else {
3640                 if (dc->hwss.subvp_pipe_control_lock)
3641                         dc->hwss.subvp_pipe_control_lock(dc, context, true, should_lock_all_pipes, top_pipe_to_program, subvp_prev_use);
3642                 /* Lock the top pipe while updating plane addrs, since freesync requires
3643                  *  plane addr update event triggers to be synchronized.
3644                  *  top_pipe_to_program is expected to never be NULL
3645                  */
3646                 dc->hwss.pipe_control_lock(dc, top_pipe_to_program, true);
3647         }
3648
3649         dc_dmub_update_dirty_rect(dc, surface_count, stream, srf_updates, context);
3650
3651         // Stream updates
3652         if (stream_update)
3653                 commit_planes_do_stream_update(dc, stream, stream_update, update_type, context);
3654
3655         if (surface_count == 0) {
3656                 /*
3657                  * In case of turning off screen, no need to program front end a second time.
3658                  * just return after program blank.
3659                  */
3660                 if (dc->hwss.apply_ctx_for_surface)
3661                         dc->hwss.apply_ctx_for_surface(dc, stream, 0, context);
3662                 if (dc->hwss.program_front_end_for_ctx)
3663                         dc->hwss.program_front_end_for_ctx(dc, context);
3664
3665                 if (should_lock_all_pipes && dc->hwss.interdependent_update_lock) {
3666                         dc->hwss.interdependent_update_lock(dc, context, false);
3667                 } else {
3668                         dc->hwss.pipe_control_lock(dc, top_pipe_to_program, false);
3669                 }
3670                 dc->hwss.post_unlock_program_front_end(dc, context);
3671
3672                 if (update_type != UPDATE_TYPE_FAST)
3673                         if (dc->hwss.commit_subvp_config)
3674                                 dc->hwss.commit_subvp_config(dc, context);
3675
3676                 /* Since phantom pipe programming is moved to post_unlock_program_front_end,
3677                  * move the SubVP lock to after the phantom pipes have been setup
3678                  */
3679                 if (dc->hwss.subvp_pipe_control_lock)
3680                         dc->hwss.subvp_pipe_control_lock(dc, context, false, should_lock_all_pipes,
3681                                                          NULL, subvp_prev_use);
3682                 return;
3683         }
3684
3685         if (update_type != UPDATE_TYPE_FAST) {
3686                 for (j = 0; j < dc->res_pool->pipe_count; j++) {
3687                         struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[j];
3688
3689                         if ((dc->debug.visual_confirm == VISUAL_CONFIRM_SUBVP ||
3690                                 dc->debug.visual_confirm == VISUAL_CONFIRM_MCLK_SWITCH) &&
3691                                 pipe_ctx->stream && pipe_ctx->plane_state) {
3692                                 /* Only update visual confirm for SUBVP and Mclk switching here.
3693                                  * The bar appears on all pipes, so we need to update the bar on all displays,
3694                                  * so the information doesn't get stale.
3695                                  */
3696                                 dc->hwss.update_visual_confirm_color(dc, pipe_ctx,
3697                                                 pipe_ctx->plane_res.hubp->inst);
3698                         }
3699                 }
3700         }
3701
3702         for (i = 0; i < surface_count; i++) {
3703                 struct dc_plane_state *plane_state = srf_updates[i].surface;
3704                 /*set logical flag for lock/unlock use*/
3705                 for (j = 0; j < dc->res_pool->pipe_count; j++) {
3706                         struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[j];
3707                         if (!pipe_ctx->plane_state)
3708                                 continue;
3709                         if (should_update_pipe_for_plane(context, pipe_ctx, plane_state))
3710                                 continue;
3711                         pipe_ctx->plane_state->triplebuffer_flips = false;
3712                         if (update_type == UPDATE_TYPE_FAST &&
3713                                 dc->hwss.program_triplebuffer != NULL &&
3714                                 !pipe_ctx->plane_state->flip_immediate && dc->debug.enable_tri_buf) {
3715                                         /*triple buffer for VUpdate  only*/
3716                                         pipe_ctx->plane_state->triplebuffer_flips = true;
3717                         }
3718                 }
3719                 if (update_type == UPDATE_TYPE_FULL) {
3720                         /* force vsync flip when reconfiguring pipes to prevent underflow */
3721                         plane_state->flip_immediate = false;
3722                 }
3723         }
3724
3725         // Update Type FULL, Surface updates
3726         for (j = 0; j < dc->res_pool->pipe_count; j++) {
3727                 struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[j];
3728
3729                 if (!pipe_ctx->top_pipe &&
3730                         !pipe_ctx->prev_odm_pipe &&
3731                         should_update_pipe_for_stream(context, pipe_ctx, stream)) {
3732                         struct dc_stream_status *stream_status = NULL;
3733
3734                         if (!pipe_ctx->plane_state)
3735                                 continue;
3736
3737                         /* Full fe update*/
3738                         if (update_type == UPDATE_TYPE_FAST)
3739                                 continue;
3740
3741                         ASSERT(!pipe_ctx->plane_state->triplebuffer_flips);
3742
3743                         if (dc->hwss.program_triplebuffer != NULL && dc->debug.enable_tri_buf) {
3744                                 /*turn off triple buffer for full update*/
3745                                 dc->hwss.program_triplebuffer(
3746                                         dc, pipe_ctx, pipe_ctx->plane_state->triplebuffer_flips);
3747                         }
3748                         stream_status =
3749                                 stream_get_status(context, pipe_ctx->stream);
3750
3751                         if (dc->hwss.apply_ctx_for_surface)
3752                                 dc->hwss.apply_ctx_for_surface(
3753                                         dc, pipe_ctx->stream, stream_status->plane_count, context);
3754                 }
3755         }
3756         if (dc->hwss.program_front_end_for_ctx && update_type != UPDATE_TYPE_FAST) {
3757                 dc->hwss.program_front_end_for_ctx(dc, context);
3758                 if (dc->debug.validate_dml_output) {
3759                         for (i = 0; i < dc->res_pool->pipe_count; i++) {
3760                                 struct pipe_ctx *cur_pipe = &context->res_ctx.pipe_ctx[i];
3761                                 if (cur_pipe->stream == NULL)
3762                                         continue;
3763
3764                                 cur_pipe->plane_res.hubp->funcs->validate_dml_output(
3765                                                 cur_pipe->plane_res.hubp, dc->ctx,
3766                                                 &context->res_ctx.pipe_ctx[i].rq_regs,
3767                                                 &context->res_ctx.pipe_ctx[i].dlg_regs,
3768                                                 &context->res_ctx.pipe_ctx[i].ttu_regs);
3769                         }
3770                 }
3771         }
3772
3773         // Update Type FAST, Surface updates
3774         if (update_type == UPDATE_TYPE_FAST) {
3775                 if (dc->hwss.set_flip_control_gsl)
3776                         for (i = 0; i < surface_count; i++) {
3777                                 struct dc_plane_state *plane_state = srf_updates[i].surface;
3778
3779                                 for (j = 0; j < dc->res_pool->pipe_count; j++) {
3780                                         struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[j];
3781
3782                                         if (!should_update_pipe_for_stream(context, pipe_ctx, stream))
3783                                                 continue;
3784
3785                                         if (!should_update_pipe_for_plane(context, pipe_ctx, plane_state))
3786                                                 continue;
3787
3788                                         // GSL has to be used for flip immediate
3789                                         dc->hwss.set_flip_control_gsl(pipe_ctx,
3790                                                         pipe_ctx->plane_state->flip_immediate);
3791                                 }
3792                         }
3793
3794                 /* Perform requested Updates */
3795                 for (i = 0; i < surface_count; i++) {
3796                         struct dc_plane_state *plane_state = srf_updates[i].surface;
3797
3798                         for (j = 0; j < dc->res_pool->pipe_count; j++) {
3799                                 struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[j];
3800
3801                                 if (!should_update_pipe_for_stream(context, pipe_ctx, stream))
3802                                         continue;
3803
3804                                 if (!should_update_pipe_for_plane(context, pipe_ctx, plane_state))
3805                                         continue;
3806
3807                                 /*program triple buffer after lock based on flip type*/
3808                                 if (dc->hwss.program_triplebuffer != NULL && dc->debug.enable_tri_buf) {
3809                                         /*only enable triplebuffer for  fast_update*/
3810                                         dc->hwss.program_triplebuffer(
3811                                                 dc, pipe_ctx, pipe_ctx->plane_state->triplebuffer_flips);
3812                                 }
3813                                 if (pipe_ctx->plane_state->update_flags.bits.addr_update)
3814                                         dc->hwss.update_plane_addr(dc, pipe_ctx);
3815                         }
3816                 }
3817         }
3818
3819         if (should_lock_all_pipes && dc->hwss.interdependent_update_lock) {
3820                 dc->hwss.interdependent_update_lock(dc, context, false);
3821         } else {
3822                 dc->hwss.pipe_control_lock(dc, top_pipe_to_program, false);
3823         }
3824
3825         if ((update_type != UPDATE_TYPE_FAST) && stream->update_flags.bits.dsc_changed)
3826                 if (top_pipe_to_program->stream_res.tg->funcs->lock_doublebuffer_enable) {
3827                         top_pipe_to_program->stream_res.tg->funcs->wait_for_state(
3828                                 top_pipe_to_program->stream_res.tg,
3829                                 CRTC_STATE_VACTIVE);
3830                         top_pipe_to_program->stream_res.tg->funcs->wait_for_state(
3831                                 top_pipe_to_program->stream_res.tg,
3832                                 CRTC_STATE_VBLANK);
3833                         top_pipe_to_program->stream_res.tg->funcs->wait_for_state(
3834                                 top_pipe_to_program->stream_res.tg,
3835                                 CRTC_STATE_VACTIVE);
3836
3837                         if (should_use_dmub_lock(stream->link)) {
3838                                 union dmub_hw_lock_flags hw_locks = { 0 };
3839                                 struct dmub_hw_lock_inst_flags inst_flags = { 0 };
3840
3841                                 hw_locks.bits.lock_dig = 1;
3842                                 inst_flags.dig_inst = top_pipe_to_program->stream_res.tg->inst;
3843
3844                                 dmub_hw_lock_mgr_cmd(dc->ctx->dmub_srv,
3845                                                         false,
3846                                                         &hw_locks,
3847                                                         &inst_flags);
3848                         } else
3849                                 top_pipe_to_program->stream_res.tg->funcs->lock_doublebuffer_disable(
3850                                         top_pipe_to_program->stream_res.tg);
3851                 }
3852
3853         if (subvp_curr_use) {
3854                 /* If enabling subvp or transitioning from subvp->subvp, enable the
3855                  * phantom streams before we program front end for the phantom pipes.
3856                  */
3857                 if (update_type != UPDATE_TYPE_FAST) {
3858                         if (dc->hwss.enable_phantom_streams)
3859                                 dc->hwss.enable_phantom_streams(dc, context);
3860                 }
3861         }
3862
3863         if (update_type != UPDATE_TYPE_FAST)
3864                 dc->hwss.post_unlock_program_front_end(dc, context);
3865
3866         if (subvp_prev_use && !subvp_curr_use) {
3867                 /* If disabling subvp, disable phantom streams after front end
3868                  * programming has completed (we turn on phantom OTG in order
3869                  * to complete the plane disable for phantom pipes).
3870                  */
3871
3872                 if (dc->hwss.disable_phantom_streams)
3873                         dc->hwss.disable_phantom_streams(dc, context);
3874         }
3875
3876         if (update_type != UPDATE_TYPE_FAST)
3877                 if (dc->hwss.commit_subvp_config)
3878                         dc->hwss.commit_subvp_config(dc, context);
3879         /* Since phantom pipe programming is moved to post_unlock_program_front_end,
3880          * move the SubVP lock to after the phantom pipes have been setup
3881          */
3882         if (should_lock_all_pipes && dc->hwss.interdependent_update_lock) {
3883                 if (dc->hwss.subvp_pipe_control_lock)
3884                         dc->hwss.subvp_pipe_control_lock(dc, context, false, should_lock_all_pipes, NULL, subvp_prev_use);
3885         } else {
3886                 if (dc->hwss.subvp_pipe_control_lock)
3887                         dc->hwss.subvp_pipe_control_lock(dc, context, false, should_lock_all_pipes, top_pipe_to_program, subvp_prev_use);
3888         }
3889
3890         // Fire manual trigger only when bottom plane is flipped
3891         for (j = 0; j < dc->res_pool->pipe_count; j++) {
3892                 struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[j];
3893
3894                 if (!pipe_ctx->plane_state)
3895                         continue;
3896
3897                 if (pipe_ctx->bottom_pipe || pipe_ctx->next_odm_pipe ||
3898                                 !pipe_ctx->stream || !should_update_pipe_for_stream(context, pipe_ctx, stream) ||
3899                                 !pipe_ctx->plane_state->update_flags.bits.addr_update ||
3900                                 pipe_ctx->plane_state->skip_manual_trigger)
3901                         continue;
3902
3903                 if (pipe_ctx->stream_res.tg->funcs->program_manual_trigger)
3904                         pipe_ctx->stream_res.tg->funcs->program_manual_trigger(pipe_ctx->stream_res.tg);
3905         }
3906
3907         current_stream_mask = get_stream_mask(dc, context);
3908         if (current_stream_mask != context->stream_mask) {
3909                 context->stream_mask = current_stream_mask;
3910                 dc_dmub_srv_notify_stream_mask(dc->ctx->dmub_srv, current_stream_mask);
3911         }
3912 }
3913
3914 /**
3915  * could_mpcc_tree_change_for_active_pipes - Check if an OPP associated with MPCC might change
3916  *
3917  * @dc: Used to get the current state status
3918  * @stream: Target stream, which we want to remove the attached planes
3919  * @srf_updates: Array of surface updates
3920  * @surface_count: Number of surface update
3921  * @is_plane_addition: [in] Fill out with true if it is a plane addition case
3922  *
3923  * DCN32x and newer support a feature named Dynamic ODM which can conflict with
3924  * the MPO if used simultaneously in some specific configurations (e.g.,
3925  * 4k@144). This function checks if the incoming context requires applying a
3926  * transition state with unnecessary pipe splitting and ODM disabled to
3927  * circumvent our hardware limitations to prevent this edge case. If the OPP
3928  * associated with an MPCC might change due to plane additions, this function
3929  * returns true.
3930  *
3931  * Return:
3932  * Return true if OPP and MPCC might change, otherwise, return false.
3933  */
3934 static bool could_mpcc_tree_change_for_active_pipes(struct dc *dc,
3935                 struct dc_stream_state *stream,
3936                 struct dc_surface_update *srf_updates,
3937                 int surface_count,
3938                 bool *is_plane_addition)
3939 {
3940
3941         struct dc_stream_status *cur_stream_status = stream_get_status(dc->current_state, stream);
3942         bool force_minimal_pipe_splitting = false;
3943         bool subvp_active = false;
3944         uint32_t i;
3945
3946         *is_plane_addition = false;
3947
3948         if (cur_stream_status &&
3949                         dc->current_state->stream_count > 0 &&
3950                         dc->debug.pipe_split_policy != MPC_SPLIT_AVOID) {
3951                 /* determine if minimal transition is required due to MPC*/
3952                 if (surface_count > 0) {
3953                         if (cur_stream_status->plane_count > surface_count) {
3954                                 force_minimal_pipe_splitting = true;
3955                         } else if (cur_stream_status->plane_count < surface_count) {
3956                                 force_minimal_pipe_splitting = true;
3957                                 *is_plane_addition = true;
3958                         }
3959                 }
3960         }
3961
3962         if (cur_stream_status &&
3963                         dc->current_state->stream_count == 1 &&
3964                         dc->debug.enable_single_display_2to1_odm_policy) {
3965                 /* determine if minimal transition is required due to dynamic ODM*/
3966                 if (surface_count > 0) {
3967                         if (cur_stream_status->plane_count > 2 && cur_stream_status->plane_count > surface_count) {
3968                                 force_minimal_pipe_splitting = true;
3969                         } else if (surface_count > 2 && cur_stream_status->plane_count < surface_count) {
3970                                 force_minimal_pipe_splitting = true;
3971                                 *is_plane_addition = true;
3972                         }
3973                 }
3974         }
3975
3976         for (i = 0; i < dc->res_pool->pipe_count; i++) {
3977                 struct pipe_ctx *pipe = &dc->current_state->res_ctx.pipe_ctx[i];
3978
3979                 if (dc_state_get_pipe_subvp_type(dc->current_state, pipe) != SUBVP_NONE) {
3980                         subvp_active = true;
3981                         break;
3982                 }
3983         }
3984
3985         /* For SubVP when adding or removing planes we need to add a minimal transition
3986          * (even when disabling all planes). Whenever disabling a phantom pipe, we
3987          * must use the minimal transition path to disable the pipe correctly.
3988          *
3989          * We want to use the minimal transition whenever subvp is active, not only if
3990          * a plane is being added / removed from a subvp stream (MPO plane can be added
3991          * to a DRR pipe of SubVP + DRR config, in which case we still want to run through
3992          * a min transition to disable subvp.
3993          */
3994         if (cur_stream_status && subvp_active) {
3995                 /* determine if minimal transition is required due to SubVP*/
3996                 if (cur_stream_status->plane_count > surface_count) {
3997                         force_minimal_pipe_splitting = true;
3998                 } else if (cur_stream_status->plane_count < surface_count) {
3999                         force_minimal_pipe_splitting = true;
4000                         *is_plane_addition = true;
4001                 }
4002         }
4003
4004         return force_minimal_pipe_splitting;
4005 }
4006
4007 struct pipe_split_policy_backup {
4008         bool dynamic_odm_policy;
4009         bool subvp_policy;
4010         enum pipe_split_policy mpc_policy;
4011 };
4012
4013 static void release_minimal_transition_state(struct dc *dc,
4014                 struct dc_state *context, struct pipe_split_policy_backup *policy)
4015 {
4016         dc_state_release(context);
4017         /* restore previous pipe split and odm policy */
4018         if (!dc->config.is_vmin_only_asic)
4019                 dc->debug.pipe_split_policy = policy->mpc_policy;
4020         dc->debug.enable_single_display_2to1_odm_policy = policy->dynamic_odm_policy;
4021         dc->debug.force_disable_subvp = policy->subvp_policy;
4022 }
4023
4024 static struct dc_state *create_minimal_transition_state(struct dc *dc,
4025                 struct dc_state *base_context, struct pipe_split_policy_backup *policy)
4026 {
4027         struct dc_state *minimal_transition_context = NULL;
4028         unsigned int i, j;
4029
4030         if (!dc->config.is_vmin_only_asic) {
4031                 policy->mpc_policy = dc->debug.pipe_split_policy;
4032                 dc->debug.pipe_split_policy = MPC_SPLIT_AVOID;
4033         }
4034         policy->dynamic_odm_policy = dc->debug.enable_single_display_2to1_odm_policy;
4035         dc->debug.enable_single_display_2to1_odm_policy = false;
4036         policy->subvp_policy = dc->debug.force_disable_subvp;
4037         dc->debug.force_disable_subvp = true;
4038
4039         minimal_transition_context = dc_state_create_copy(base_context);
4040         if (!minimal_transition_context)
4041                 return NULL;
4042
4043         /* commit minimal state */
4044         if (dc->res_pool->funcs->validate_bandwidth(dc, minimal_transition_context, false)) {
4045                 for (i = 0; i < minimal_transition_context->stream_count; i++) {
4046                         struct dc_stream_status *stream_status = &minimal_transition_context->stream_status[i];
4047
4048                         for (j = 0; j < stream_status->plane_count; j++) {
4049                                 struct dc_plane_state *plane_state = stream_status->plane_states[j];
4050
4051                                 /* force vsync flip when reconfiguring pipes to prevent underflow
4052                                  * and corruption
4053                                  */
4054                                 plane_state->flip_immediate = false;
4055                         }
4056                 }
4057         } else {
4058                 /* this should never happen */
4059                 release_minimal_transition_state(dc, minimal_transition_context, policy);
4060                 BREAK_TO_DEBUGGER();
4061                 minimal_transition_context = NULL;
4062         }
4063         return minimal_transition_context;
4064 }
4065
4066
4067 /**
4068  * commit_minimal_transition_state - Commit a minimal state based on current or new context
4069  *
4070  * @dc: DC structure, used to get the current state
4071  * @context: New context
4072  * @stream: Stream getting the update for the flip
4073  *
4074  * The function takes in current state and new state and determine a minimal transition state
4075  * as the intermediate step which could make the transition between current and new states
4076  * seamless. If found, it will commit the minimal transition state and update current state to
4077  * this minimal transition state and return true, if not, it will return false.
4078  *
4079  * Return:
4080  * Return True if the minimal transition succeeded, false otherwise
4081  */
4082 static bool commit_minimal_transition_state(struct dc *dc,
4083                 struct dc_state *context,
4084                 struct dc_stream_state *stream)
4085 {
4086         bool success = false;
4087         struct dc_state *minimal_transition_context;
4088         struct pipe_split_policy_backup policy;
4089
4090         /* commit based on new context */
4091         minimal_transition_context = create_minimal_transition_state(dc,
4092                         context, &policy);
4093         if (minimal_transition_context) {
4094                 if (dc->hwss.is_pipe_topology_transition_seamless(
4095                                         dc, dc->current_state, minimal_transition_context) &&
4096                         dc->hwss.is_pipe_topology_transition_seamless(
4097                                         dc, minimal_transition_context, context)) {
4098                         DC_LOG_DC("%s base = new state\n", __func__);
4099
4100                         success = dc_commit_state_no_check(dc, minimal_transition_context) == DC_OK;
4101                 }
4102                 release_minimal_transition_state(dc, minimal_transition_context, &policy);
4103         }
4104
4105         if (!success) {
4106                 /* commit based on current context */
4107                 restore_planes_and_stream_state(&dc->current_state->scratch, stream);
4108                 minimal_transition_context = create_minimal_transition_state(dc,
4109                                 dc->current_state, &policy);
4110                 if (minimal_transition_context) {
4111                         if (dc->hwss.is_pipe_topology_transition_seamless(
4112                                         dc, dc->current_state, minimal_transition_context) &&
4113                                 dc->hwss.is_pipe_topology_transition_seamless(
4114                                                 dc, minimal_transition_context, context)) {
4115                                 DC_LOG_DC("%s base = current state\n", __func__);
4116                                 success = dc_commit_state_no_check(dc, minimal_transition_context) == DC_OK;
4117                         }
4118                         release_minimal_transition_state(dc, minimal_transition_context, &policy);
4119                 }
4120                 restore_planes_and_stream_state(&context->scratch, stream);
4121         }
4122
4123         ASSERT(success);
4124         return success;
4125 }
4126
4127 /**
4128  * commit_minimal_transition_state_legacy - Create a transition pipe split state
4129  *
4130  * @dc: Used to get the current state status
4131  * @transition_base_context: New transition state
4132  *
4133  * In some specific configurations, such as pipe split on multi-display with
4134  * MPO and/or Dynamic ODM, removing a plane may cause unsupported pipe
4135  * programming when moving to new planes. To mitigate those types of problems,
4136  * this function adds a transition state that minimizes pipe usage before
4137  * programming the new configuration. When adding a new plane, the current
4138  * state requires the least pipes, so it is applied without splitting. When
4139  * removing a plane, the new state requires the least pipes, so it is applied
4140  * without splitting.
4141  *
4142  * Return:
4143  * Return false if something is wrong in the transition state.
4144  */
4145 static bool commit_minimal_transition_state_legacy(struct dc *dc,
4146                 struct dc_state *transition_base_context)
4147 {
4148         struct dc_state *transition_context;
4149         struct pipe_split_policy_backup policy;
4150         enum dc_status ret = DC_ERROR_UNEXPECTED;
4151         unsigned int i, j;
4152         unsigned int pipe_in_use = 0;
4153         bool subvp_in_use = false;
4154         bool odm_in_use = false;
4155
4156         /* check current pipes in use*/
4157         for (i = 0; i < dc->res_pool->pipe_count; i++) {
4158                 struct pipe_ctx *pipe = &transition_base_context->res_ctx.pipe_ctx[i];
4159
4160                 if (pipe->plane_state)
4161                         pipe_in_use++;
4162         }
4163
4164         /* If SubVP is enabled and we are adding or removing planes from any main subvp
4165          * pipe, we must use the minimal transition.
4166          */
4167         for (i = 0; i < dc->res_pool->pipe_count; i++) {
4168                 struct pipe_ctx *pipe = &dc->current_state->res_ctx.pipe_ctx[i];
4169
4170                 if (pipe->stream && dc_state_get_pipe_subvp_type(dc->current_state, pipe) == SUBVP_PHANTOM) {
4171                         subvp_in_use = true;
4172                         break;
4173                 }
4174         }
4175
4176         /* If ODM is enabled and we are adding or removing planes from any ODM
4177          * pipe, we must use the minimal transition.
4178          */
4179         for (i = 0; i < dc->res_pool->pipe_count; i++) {
4180                 struct pipe_ctx *pipe = &transition_base_context->res_ctx.pipe_ctx[i];
4181
4182                 if (resource_is_pipe_type(pipe, OTG_MASTER)) {
4183                         odm_in_use = resource_get_odm_slice_count(pipe) > 1;
4184                         break;
4185                 }
4186         }
4187
4188         /* When the OS add a new surface if we have been used all of pipes with odm combine
4189          * and mpc split feature, it need use commit_minimal_transition_state to transition safely.
4190          * After OS exit MPO, it will back to use odm and mpc split with all of pipes, we need
4191          * call it again. Otherwise return true to skip.
4192          *
4193          * Reduce the scenarios to use dc_commit_state_no_check in the stage of flip. Especially
4194          * enter/exit MPO when DCN still have enough resources.
4195          */
4196         if (pipe_in_use != dc->res_pool->pipe_count && !subvp_in_use && !odm_in_use)
4197                 return true;
4198
4199         DC_LOG_DC("%s base = %s state, reason = %s\n", __func__,
4200                         dc->current_state == transition_base_context ? "current" : "new",
4201                         subvp_in_use ? "Subvp In Use" :
4202                         odm_in_use ? "ODM in Use" :
4203                         dc->debug.pipe_split_policy != MPC_SPLIT_AVOID ? "MPC in Use" :
4204                         "Unknown");
4205
4206         transition_context = create_minimal_transition_state(dc,
4207                         transition_base_context, &policy);
4208         if (transition_context) {
4209                 ret = dc_commit_state_no_check(dc, transition_context);
4210                 release_minimal_transition_state(dc, transition_context, &policy);
4211         }
4212
4213         if (ret != DC_OK) {
4214                 /* this should never happen */
4215                 BREAK_TO_DEBUGGER();
4216                 return false;
4217         }
4218
4219         /* force full surface update */
4220         for (i = 0; i < dc->current_state->stream_count; i++) {
4221                 for (j = 0; j < dc->current_state->stream_status[i].plane_count; j++) {
4222                         dc->current_state->stream_status[i].plane_states[j]->update_flags.raw = 0xFFFFFFFF;
4223                 }
4224         }
4225
4226         return true;
4227 }
4228
4229 /**
4230  * update_seamless_boot_flags() - Helper function for updating seamless boot flags
4231  *
4232  * @dc: Current DC state
4233  * @context: New DC state to be programmed
4234  * @surface_count: Number of surfaces that have an updated
4235  * @stream: Corresponding stream to be updated in the current flip
4236  *
4237  * Updating seamless boot flags do not need to be part of the commit sequence. This
4238  * helper function will update the seamless boot flags on each flip (if required)
4239  * outside of the HW commit sequence (fast or slow).
4240  *
4241  * Return: void
4242  */
4243 static void update_seamless_boot_flags(struct dc *dc,
4244                 struct dc_state *context,
4245                 int surface_count,
4246                 struct dc_stream_state *stream)
4247 {
4248         if (get_seamless_boot_stream_count(context) > 0 && surface_count > 0) {
4249                 /* Optimize seamless boot flag keeps clocks and watermarks high until
4250                  * first flip. After first flip, optimization is required to lower
4251                  * bandwidth. Important to note that it is expected UEFI will
4252                  * only light up a single display on POST, therefore we only expect
4253                  * one stream with seamless boot flag set.
4254                  */
4255                 if (stream->apply_seamless_boot_optimization) {
4256                         stream->apply_seamless_boot_optimization = false;
4257
4258                         if (get_seamless_boot_stream_count(context) == 0)
4259                                 dc->optimized_required = true;
4260                 }
4261         }
4262 }
4263
4264 static void populate_fast_updates(struct dc_fast_update *fast_update,
4265                 struct dc_surface_update *srf_updates,
4266                 int surface_count,
4267                 struct dc_stream_update *stream_update)
4268 {
4269         int i = 0;
4270
4271         if (stream_update) {
4272                 fast_update[0].out_transfer_func = stream_update->out_transfer_func;
4273                 fast_update[0].output_csc_transform = stream_update->output_csc_transform;
4274         }
4275
4276         for (i = 0; i < surface_count; i++) {
4277                 fast_update[i].flip_addr = srf_updates[i].flip_addr;
4278                 fast_update[i].gamma = srf_updates[i].gamma;
4279                 fast_update[i].gamut_remap_matrix = srf_updates[i].gamut_remap_matrix;
4280                 fast_update[i].input_csc_color_matrix = srf_updates[i].input_csc_color_matrix;
4281                 fast_update[i].coeff_reduction_factor = srf_updates[i].coeff_reduction_factor;
4282         }
4283 }
4284
4285 static bool fast_updates_exist(struct dc_fast_update *fast_update, int surface_count)
4286 {
4287         int i;
4288
4289         if (fast_update[0].out_transfer_func ||
4290                 fast_update[0].output_csc_transform)
4291                 return true;
4292
4293         for (i = 0; i < surface_count; i++) {
4294                 if (fast_update[i].flip_addr ||
4295                                 fast_update[i].gamma ||
4296                                 fast_update[i].gamut_remap_matrix ||
4297                                 fast_update[i].input_csc_color_matrix ||
4298                                 fast_update[i].coeff_reduction_factor)
4299                         return true;
4300         }
4301
4302         return false;
4303 }
4304
4305 static bool full_update_required(struct dc *dc,
4306                 struct dc_surface_update *srf_updates,
4307                 int surface_count,
4308                 struct dc_stream_update *stream_update,
4309                 struct dc_stream_state *stream)
4310 {
4311
4312         int i;
4313         struct dc_stream_status *stream_status;
4314         const struct dc_state *context = dc->current_state;
4315
4316         for (i = 0; i < surface_count; i++) {
4317                 if (srf_updates &&
4318                                 (srf_updates[i].plane_info ||
4319                                 srf_updates[i].scaling_info ||
4320                                 (srf_updates[i].hdr_mult.value &&
4321                                 srf_updates[i].hdr_mult.value != srf_updates->surface->hdr_mult.value) ||
4322                                 srf_updates[i].in_transfer_func ||
4323                                 srf_updates[i].func_shaper ||
4324                                 srf_updates[i].lut3d_func ||
4325                                 srf_updates[i].surface->force_full_update ||
4326                                 (srf_updates[i].flip_addr &&
4327                                 srf_updates[i].flip_addr->address.tmz_surface != srf_updates[i].surface->address.tmz_surface) ||
4328                                 !is_surface_in_context(context, srf_updates[i].surface)))
4329                         return true;
4330         }
4331
4332         if (stream_update &&
4333                         (((stream_update->src.height != 0 && stream_update->src.width != 0) ||
4334                         (stream_update->dst.height != 0 && stream_update->dst.width != 0) ||
4335                         stream_update->integer_scaling_update) ||
4336                         stream_update->hdr_static_metadata ||
4337                         stream_update->abm_level ||
4338                         stream_update->periodic_interrupt ||
4339                         stream_update->vrr_infopacket ||
4340                         stream_update->vsc_infopacket ||
4341                         stream_update->vsp_infopacket ||
4342                         stream_update->hfvsif_infopacket ||
4343                         stream_update->vtem_infopacket ||
4344                         stream_update->adaptive_sync_infopacket ||
4345                         stream_update->dpms_off ||
4346                         stream_update->allow_freesync ||
4347                         stream_update->vrr_active_variable ||
4348                         stream_update->vrr_active_fixed ||
4349                         stream_update->gamut_remap ||
4350                         stream_update->output_color_space ||
4351                         stream_update->dither_option ||
4352                         stream_update->wb_update ||
4353                         stream_update->dsc_config ||
4354                         stream_update->mst_bw_update ||
4355                         stream_update->func_shaper ||
4356                         stream_update->lut3d_func ||
4357                         stream_update->pending_test_pattern ||
4358                         stream_update->crtc_timing_adjust))
4359                 return true;
4360
4361         if (stream) {
4362                 stream_status = dc_stream_get_status(stream);
4363                 if (stream_status == NULL || stream_status->plane_count != surface_count)
4364                         return true;
4365         }
4366         if (dc->idle_optimizations_allowed)
4367                 return true;
4368
4369         return false;
4370 }
4371
4372 static bool fast_update_only(struct dc *dc,
4373                 struct dc_fast_update *fast_update,
4374                 struct dc_surface_update *srf_updates,
4375                 int surface_count,
4376                 struct dc_stream_update *stream_update,
4377                 struct dc_stream_state *stream)
4378 {
4379         return fast_updates_exist(fast_update, surface_count)
4380                         && !full_update_required(dc, srf_updates, surface_count, stream_update, stream);
4381 }
4382
4383 bool dc_update_planes_and_stream(struct dc *dc,
4384                 struct dc_surface_update *srf_updates, int surface_count,
4385                 struct dc_stream_state *stream,
4386                 struct dc_stream_update *stream_update)
4387 {
4388         struct dc_state *context;
4389         enum surface_update_type update_type;
4390         int i;
4391         struct dc_fast_update fast_update[MAX_SURFACES] = {0};
4392
4393         /* In cases where MPO and split or ODM are used transitions can
4394          * cause underflow. Apply stream configuration with minimal pipe
4395          * split first to avoid unsupported transitions for active pipes.
4396          */
4397         bool force_minimal_pipe_splitting = 0;
4398         bool is_plane_addition = 0;
4399         bool is_fast_update_only;
4400
4401         dc_exit_ips_for_hw_access(dc);
4402
4403         populate_fast_updates(fast_update, srf_updates, surface_count, stream_update);
4404         is_fast_update_only = fast_update_only(dc, fast_update, srf_updates,
4405                         surface_count, stream_update, stream);
4406         force_minimal_pipe_splitting = could_mpcc_tree_change_for_active_pipes(
4407                         dc,
4408                         stream,
4409                         srf_updates,
4410                         surface_count,
4411                         &is_plane_addition);
4412
4413         /* on plane addition, minimal state is the current one */
4414         if (force_minimal_pipe_splitting && is_plane_addition &&
4415                 !commit_minimal_transition_state_legacy(dc, dc->current_state))
4416                                 return false;
4417
4418         if (!update_planes_and_stream_state(
4419                         dc,
4420                         srf_updates,
4421                         surface_count,
4422                         stream,
4423                         stream_update,
4424                         &update_type,
4425                         &context))
4426                 return false;
4427
4428         /* on plane removal, minimal state is the new one */
4429         if (force_minimal_pipe_splitting && !is_plane_addition) {
4430                 if (!commit_minimal_transition_state_legacy(dc, context)) {
4431                         dc_state_release(context);
4432                         return false;
4433                 }
4434                 update_type = UPDATE_TYPE_FULL;
4435         }
4436
4437         if (dc->hwss.is_pipe_topology_transition_seamless &&
4438                         !dc->hwss.is_pipe_topology_transition_seamless(
4439                                         dc, dc->current_state, context)) {
4440                 commit_minimal_transition_state(dc,
4441                                 context, stream);
4442         }
4443         update_seamless_boot_flags(dc, context, surface_count, stream);
4444         if (is_fast_update_only && !dc->debug.enable_legacy_fast_update) {
4445                 commit_planes_for_stream_fast(dc,
4446                                 srf_updates,
4447                                 surface_count,
4448                                 stream,
4449                                 stream_update,
4450                                 update_type,
4451                                 context);
4452         } else {
4453                 if (!stream_update &&
4454                                 dc->hwss.is_pipe_topology_transition_seamless &&
4455                                 !dc->hwss.is_pipe_topology_transition_seamless(
4456                                                 dc, dc->current_state, context)) {
4457                         DC_LOG_ERROR("performing non-seamless pipe topology transition with surface only update!\n");
4458                         BREAK_TO_DEBUGGER();
4459                 }
4460                 commit_planes_for_stream(
4461                                 dc,
4462                                 srf_updates,
4463                                 surface_count,
4464                                 stream,
4465                                 stream_update,
4466                                 update_type,
4467                                 context);
4468         }
4469
4470         if (dc->current_state != context) {
4471
4472                 /* Since memory free requires elevated IRQL, an interrupt
4473                  * request is generated by mem free. If this happens
4474                  * between freeing and reassigning the context, our vsync
4475                  * interrupt will call into dc and cause a memory
4476                  * corruption BSOD. Hence, we first reassign the context,
4477                  * then free the old context.
4478                  */
4479
4480                 struct dc_state *old = dc->current_state;
4481
4482                 dc->current_state = context;
4483                 dc_state_release(old);
4484
4485                 // clear any forced full updates
4486                 for (i = 0; i < dc->res_pool->pipe_count; i++) {
4487                         struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i];
4488
4489                         if (pipe_ctx->plane_state && pipe_ctx->stream == stream)
4490                                 pipe_ctx->plane_state->force_full_update = false;
4491                 }
4492         }
4493         return true;
4494 }
4495
4496 void dc_commit_updates_for_stream(struct dc *dc,
4497                 struct dc_surface_update *srf_updates,
4498                 int surface_count,
4499                 struct dc_stream_state *stream,
4500                 struct dc_stream_update *stream_update,
4501                 struct dc_state *state)
4502 {
4503         const struct dc_stream_status *stream_status;
4504         enum surface_update_type update_type;
4505         struct dc_state *context;
4506         struct dc_context *dc_ctx = dc->ctx;
4507         int i, j;
4508         struct dc_fast_update fast_update[MAX_SURFACES] = {0};
4509
4510         dc_exit_ips_for_hw_access(dc);
4511
4512         populate_fast_updates(fast_update, srf_updates, surface_count, stream_update);
4513         stream_status = dc_stream_get_status(stream);
4514         context = dc->current_state;
4515
4516         update_type = dc_check_update_surfaces_for_stream(
4517                                 dc, srf_updates, surface_count, stream_update, stream_status);
4518
4519         /* TODO: Since change commit sequence can have a huge impact,
4520          * we decided to only enable it for DCN3x. However, as soon as
4521          * we get more confident about this change we'll need to enable
4522          * the new sequence for all ASICs.
4523          */
4524         if (dc->ctx->dce_version >= DCN_VERSION_3_2) {
4525                 /*
4526                  * Previous frame finished and HW is ready for optimization.
4527                  */
4528                 if (update_type == UPDATE_TYPE_FAST)
4529                         dc_post_update_surfaces_to_stream(dc);
4530
4531                 dc_update_planes_and_stream(dc, srf_updates,
4532                                             surface_count, stream,
4533                                             stream_update);
4534                 return;
4535         }
4536
4537         if (update_type >= update_surface_trace_level)
4538                 update_surface_trace(dc, srf_updates, surface_count);
4539
4540
4541         if (update_type >= UPDATE_TYPE_FULL) {
4542
4543                 /* initialize scratch memory for building context */
4544                 context = dc_state_create_copy(state);
4545                 if (context == NULL) {
4546                         DC_ERROR("Failed to allocate new validate context!\n");
4547                         return;
4548                 }
4549
4550                 for (i = 0; i < dc->res_pool->pipe_count; i++) {
4551                         struct pipe_ctx *new_pipe = &context->res_ctx.pipe_ctx[i];
4552                         struct pipe_ctx *old_pipe = &dc->current_state->res_ctx.pipe_ctx[i];
4553
4554                         if (new_pipe->plane_state && new_pipe->plane_state != old_pipe->plane_state)
4555                                 new_pipe->plane_state->force_full_update = true;
4556                 }
4557         } else if (update_type == UPDATE_TYPE_FAST) {
4558                 /*
4559                  * Previous frame finished and HW is ready for optimization.
4560                  */
4561                 dc_post_update_surfaces_to_stream(dc);
4562         }
4563
4564
4565         for (i = 0; i < surface_count; i++) {
4566                 struct dc_plane_state *surface = srf_updates[i].surface;
4567
4568                 copy_surface_update_to_plane(surface, &srf_updates[i]);
4569
4570                 if (update_type >= UPDATE_TYPE_MED) {
4571                         for (j = 0; j < dc->res_pool->pipe_count; j++) {
4572                                 struct pipe_ctx *pipe_ctx =
4573                                         &context->res_ctx.pipe_ctx[j];
4574
4575                                 if (pipe_ctx->plane_state != surface)
4576                                         continue;
4577
4578                                 resource_build_scaling_params(pipe_ctx);
4579                         }
4580                 }
4581         }
4582
4583         copy_stream_update_to_stream(dc, context, stream, stream_update);
4584
4585         if (update_type >= UPDATE_TYPE_FULL) {
4586                 if (!dc->res_pool->funcs->validate_bandwidth(dc, context, false)) {
4587                         DC_ERROR("Mode validation failed for stream update!\n");
4588                         dc_state_release(context);
4589                         return;
4590                 }
4591         }
4592
4593         TRACE_DC_PIPE_STATE(pipe_ctx, i, MAX_PIPES);
4594
4595         update_seamless_boot_flags(dc, context, surface_count, stream);
4596         if (fast_update_only(dc, fast_update, srf_updates, surface_count, stream_update, stream) &&
4597                         !dc->debug.enable_legacy_fast_update) {
4598                 commit_planes_for_stream_fast(dc,
4599                                 srf_updates,
4600                                 surface_count,
4601                                 stream,
4602                                 stream_update,
4603                                 update_type,
4604                                 context);
4605         } else {
4606                 commit_planes_for_stream(
4607                                 dc,
4608                                 srf_updates,
4609                                 surface_count,
4610                                 stream,
4611                                 stream_update,
4612                                 update_type,
4613                                 context);
4614         }
4615         /*update current_State*/
4616         if (dc->current_state != context) {
4617
4618                 struct dc_state *old = dc->current_state;
4619
4620                 dc->current_state = context;
4621                 dc_state_release(old);
4622
4623                 for (i = 0; i < dc->res_pool->pipe_count; i++) {
4624                         struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i];
4625
4626                         if (pipe_ctx->plane_state && pipe_ctx->stream == stream)
4627                                 pipe_ctx->plane_state->force_full_update = false;
4628                 }
4629         }
4630
4631         /* Legacy optimization path for DCE. */
4632         if (update_type >= UPDATE_TYPE_FULL && dc_ctx->dce_version < DCE_VERSION_MAX) {
4633                 dc_post_update_surfaces_to_stream(dc);
4634                 TRACE_DCE_CLOCK_STATE(&context->bw_ctx.bw.dce);
4635         }
4636
4637         return;
4638
4639 }
4640
4641 uint8_t dc_get_current_stream_count(struct dc *dc)
4642 {
4643         return dc->current_state->stream_count;
4644 }
4645
4646 struct dc_stream_state *dc_get_stream_at_index(struct dc *dc, uint8_t i)
4647 {
4648         if (i < dc->current_state->stream_count)
4649                 return dc->current_state->streams[i];
4650         return NULL;
4651 }
4652
4653 enum dc_irq_source dc_interrupt_to_irq_source(
4654                 struct dc *dc,
4655                 uint32_t src_id,
4656                 uint32_t ext_id)
4657 {
4658         return dal_irq_service_to_irq_source(dc->res_pool->irqs, src_id, ext_id);
4659 }
4660
4661 /*
4662  * dc_interrupt_set() - Enable/disable an AMD hw interrupt source
4663  */
4664 bool dc_interrupt_set(struct dc *dc, enum dc_irq_source src, bool enable)
4665 {
4666
4667         if (dc == NULL)
4668                 return false;
4669
4670         return dal_irq_service_set(dc->res_pool->irqs, src, enable);
4671 }
4672
4673 void dc_interrupt_ack(struct dc *dc, enum dc_irq_source src)
4674 {
4675         dal_irq_service_ack(dc->res_pool->irqs, src);
4676 }
4677
4678 void dc_power_down_on_boot(struct dc *dc)
4679 {
4680         if (dc->ctx->dce_environment != DCE_ENV_VIRTUAL_HW &&
4681                         dc->hwss.power_down_on_boot)
4682                 dc->hwss.power_down_on_boot(dc);
4683 }
4684
4685 void dc_set_power_state(
4686         struct dc *dc,
4687         enum dc_acpi_cm_power_state power_state)
4688 {
4689         if (!dc->current_state)
4690                 return;
4691
4692         switch (power_state) {
4693         case DC_ACPI_CM_POWER_STATE_D0:
4694                 dc_state_construct(dc, dc->current_state);
4695
4696                 dc_exit_ips_for_hw_access(dc);
4697
4698                 dc_z10_restore(dc);
4699
4700                 dc->hwss.init_hw(dc);
4701
4702                 if (dc->hwss.init_sys_ctx != NULL &&
4703                         dc->vm_pa_config.valid) {
4704                         dc->hwss.init_sys_ctx(dc->hwseq, dc, &dc->vm_pa_config);
4705                 }
4706
4707                 break;
4708         default:
4709                 ASSERT(dc->current_state->stream_count == 0);
4710
4711                 dc_state_destruct(dc->current_state);
4712
4713                 break;
4714         }
4715 }
4716
4717 void dc_resume(struct dc *dc)
4718 {
4719         uint32_t i;
4720
4721         for (i = 0; i < dc->link_count; i++)
4722                 dc->link_srv->resume(dc->links[i]);
4723 }
4724
4725 bool dc_is_dmcu_initialized(struct dc *dc)
4726 {
4727         struct dmcu *dmcu = dc->res_pool->dmcu;
4728
4729         if (dmcu)
4730                 return dmcu->funcs->is_dmcu_initialized(dmcu);
4731         return false;
4732 }
4733
4734 void get_clock_requirements_for_state(struct dc_state *state, struct AsicStateEx *info)
4735 {
4736         info->displayClock                              = (unsigned int)state->bw_ctx.bw.dcn.clk.dispclk_khz;
4737         info->engineClock                               = (unsigned int)state->bw_ctx.bw.dcn.clk.dcfclk_khz;
4738         info->memoryClock                               = (unsigned int)state->bw_ctx.bw.dcn.clk.dramclk_khz;
4739         info->maxSupportedDppClock              = (unsigned int)state->bw_ctx.bw.dcn.clk.max_supported_dppclk_khz;
4740         info->dppClock                                  = (unsigned int)state->bw_ctx.bw.dcn.clk.dppclk_khz;
4741         info->socClock                                  = (unsigned int)state->bw_ctx.bw.dcn.clk.socclk_khz;
4742         info->dcfClockDeepSleep                 = (unsigned int)state->bw_ctx.bw.dcn.clk.dcfclk_deep_sleep_khz;
4743         info->fClock                                    = (unsigned int)state->bw_ctx.bw.dcn.clk.fclk_khz;
4744         info->phyClock                                  = (unsigned int)state->bw_ctx.bw.dcn.clk.phyclk_khz;
4745 }
4746 enum dc_status dc_set_clock(struct dc *dc, enum dc_clock_type clock_type, uint32_t clk_khz, uint32_t stepping)
4747 {
4748         if (dc->hwss.set_clock)
4749                 return dc->hwss.set_clock(dc, clock_type, clk_khz, stepping);
4750         return DC_ERROR_UNEXPECTED;
4751 }
4752 void dc_get_clock(struct dc *dc, enum dc_clock_type clock_type, struct dc_clock_config *clock_cfg)
4753 {
4754         if (dc->hwss.get_clock)
4755                 dc->hwss.get_clock(dc, clock_type, clock_cfg);
4756 }
4757
4758 /* enable/disable eDP PSR without specify stream for eDP */
4759 bool dc_set_psr_allow_active(struct dc *dc, bool enable)
4760 {
4761         int i;
4762         bool allow_active;
4763
4764         for (i = 0; i < dc->current_state->stream_count ; i++) {
4765                 struct dc_link *link;
4766                 struct dc_stream_state *stream = dc->current_state->streams[i];
4767
4768                 link = stream->link;
4769                 if (!link)
4770                         continue;
4771
4772                 if (link->psr_settings.psr_feature_enabled) {
4773                         if (enable && !link->psr_settings.psr_allow_active) {
4774                                 allow_active = true;
4775                                 if (!dc_link_set_psr_allow_active(link, &allow_active, false, false, NULL))
4776                                         return false;
4777                         } else if (!enable && link->psr_settings.psr_allow_active) {
4778                                 allow_active = false;
4779                                 if (!dc_link_set_psr_allow_active(link, &allow_active, true, false, NULL))
4780                                         return false;
4781                         }
4782                 }
4783         }
4784
4785         return true;
4786 }
4787
4788 /* enable/disable eDP Replay without specify stream for eDP */
4789 bool dc_set_replay_allow_active(struct dc *dc, bool active)
4790 {
4791         int i;
4792         bool allow_active;
4793
4794         for (i = 0; i < dc->current_state->stream_count; i++) {
4795                 struct dc_link *link;
4796                 struct dc_stream_state *stream = dc->current_state->streams[i];
4797
4798                 link = stream->link;
4799                 if (!link)
4800                         continue;
4801
4802                 if (link->replay_settings.replay_feature_enabled) {
4803                         if (active && !link->replay_settings.replay_allow_active) {
4804                                 allow_active = true;
4805                                 if (!dc_link_set_replay_allow_active(link, &allow_active,
4806                                         false, false, NULL))
4807                                         return false;
4808                         } else if (!active && link->replay_settings.replay_allow_active) {
4809                                 allow_active = false;
4810                                 if (!dc_link_set_replay_allow_active(link, &allow_active,
4811                                         true, false, NULL))
4812                                         return false;
4813                         }
4814                 }
4815         }
4816
4817         return true;
4818 }
4819
4820 void dc_allow_idle_optimizations(struct dc *dc, bool allow)
4821 {
4822         if (dc->debug.disable_idle_power_optimizations)
4823                 return;
4824
4825         if (dc->caps.ips_support && (dc->config.disable_ips == DMUB_IPS_DISABLE_ALL))
4826                 return;
4827
4828         if (dc->clk_mgr != NULL && dc->clk_mgr->funcs->is_smu_present)
4829                 if (!dc->clk_mgr->funcs->is_smu_present(dc->clk_mgr))
4830                         return;
4831
4832         if (allow == dc->idle_optimizations_allowed)
4833                 return;
4834
4835         if (dc->hwss.apply_idle_power_optimizations && dc->hwss.apply_idle_power_optimizations(dc, allow))
4836                 dc->idle_optimizations_allowed = allow;
4837 }
4838
4839 void dc_exit_ips_for_hw_access(struct dc *dc)
4840 {
4841         if (dc->caps.ips_support)
4842                 dc_allow_idle_optimizations(dc, false);
4843 }
4844
4845 bool dc_dmub_is_ips_idle_state(struct dc *dc)
4846 {
4847         uint32_t idle_state = 0;
4848
4849         if (dc->debug.disable_idle_power_optimizations)
4850                 return false;
4851
4852         if (!dc->caps.ips_support || (dc->config.disable_ips == DMUB_IPS_DISABLE_ALL))
4853                 return false;
4854
4855         if (dc->hwss.get_idle_state)
4856                 idle_state = dc->hwss.get_idle_state(dc);
4857
4858         if (!(idle_state & DMUB_IPS1_ALLOW_MASK) ||
4859                 !(idle_state & DMUB_IPS2_ALLOW_MASK))
4860                 return true;
4861
4862         return false;
4863 }
4864
4865 /* set min and max memory clock to lowest and highest DPM level, respectively */
4866 void dc_unlock_memory_clock_frequency(struct dc *dc)
4867 {
4868         if (dc->clk_mgr->funcs->set_hard_min_memclk)
4869                 dc->clk_mgr->funcs->set_hard_min_memclk(dc->clk_mgr, false);
4870
4871         if (dc->clk_mgr->funcs->set_hard_max_memclk)
4872                 dc->clk_mgr->funcs->set_hard_max_memclk(dc->clk_mgr);
4873 }
4874
4875 /* set min memory clock to the min required for current mode, max to maxDPM */
4876 void dc_lock_memory_clock_frequency(struct dc *dc)
4877 {
4878         if (dc->clk_mgr->funcs->get_memclk_states_from_smu)
4879                 dc->clk_mgr->funcs->get_memclk_states_from_smu(dc->clk_mgr);
4880
4881         if (dc->clk_mgr->funcs->set_hard_min_memclk)
4882                 dc->clk_mgr->funcs->set_hard_min_memclk(dc->clk_mgr, true);
4883
4884         if (dc->clk_mgr->funcs->set_hard_max_memclk)
4885                 dc->clk_mgr->funcs->set_hard_max_memclk(dc->clk_mgr);
4886 }
4887
4888 static void blank_and_force_memclk(struct dc *dc, bool apply, unsigned int memclk_mhz)
4889 {
4890         struct dc_state *context = dc->current_state;
4891         struct hubp *hubp;
4892         struct pipe_ctx *pipe;
4893         int i;
4894
4895         for (i = 0; i < dc->res_pool->pipe_count; i++) {
4896                 pipe = &context->res_ctx.pipe_ctx[i];
4897
4898                 if (pipe->stream != NULL) {
4899                         dc->hwss.disable_pixel_data(dc, pipe, true);
4900
4901                         // wait for double buffer
4902                         pipe->stream_res.tg->funcs->wait_for_state(pipe->stream_res.tg, CRTC_STATE_VACTIVE);
4903                         pipe->stream_res.tg->funcs->wait_for_state(pipe->stream_res.tg, CRTC_STATE_VBLANK);
4904                         pipe->stream_res.tg->funcs->wait_for_state(pipe->stream_res.tg, CRTC_STATE_VACTIVE);
4905
4906                         hubp = pipe->plane_res.hubp;
4907                         hubp->funcs->set_blank_regs(hubp, true);
4908                 }
4909         }
4910
4911         dc->clk_mgr->funcs->set_max_memclk(dc->clk_mgr, memclk_mhz);
4912         dc->clk_mgr->funcs->set_min_memclk(dc->clk_mgr, memclk_mhz);
4913
4914         for (i = 0; i < dc->res_pool->pipe_count; i++) {
4915                 pipe = &context->res_ctx.pipe_ctx[i];
4916
4917                 if (pipe->stream != NULL) {
4918                         dc->hwss.disable_pixel_data(dc, pipe, false);
4919
4920                         hubp = pipe->plane_res.hubp;
4921                         hubp->funcs->set_blank_regs(hubp, false);
4922                 }
4923         }
4924 }
4925
4926
4927 /**
4928  * dc_enable_dcmode_clk_limit() - lower clocks in dc (battery) mode
4929  * @dc: pointer to dc of the dm calling this
4930  * @enable: True = transition to DC mode, false = transition back to AC mode
4931  *
4932  * Some SoCs define additional clock limits when in DC mode, DM should
4933  * invoke this function when the platform undergoes a power source transition
4934  * so DC can apply/unapply the limit. This interface may be disruptive to
4935  * the onscreen content.
4936  *
4937  * Context: Triggered by OS through DM interface, or manually by escape calls.
4938  * Need to hold a dclock when doing so.
4939  *
4940  * Return: none (void function)
4941  *
4942  */
4943 void dc_enable_dcmode_clk_limit(struct dc *dc, bool enable)
4944 {
4945         unsigned int softMax = 0, maxDPM = 0, funcMin = 0, i;
4946         bool p_state_change_support;
4947
4948         if (!dc->config.dc_mode_clk_limit_support)
4949                 return;
4950
4951         softMax = dc->clk_mgr->bw_params->dc_mode_softmax_memclk;
4952         for (i = 0; i < dc->clk_mgr->bw_params->clk_table.num_entries; i++) {
4953                 if (dc->clk_mgr->bw_params->clk_table.entries[i].memclk_mhz > maxDPM)
4954                         maxDPM = dc->clk_mgr->bw_params->clk_table.entries[i].memclk_mhz;
4955         }
4956         funcMin = (dc->clk_mgr->clks.dramclk_khz + 999) / 1000;
4957         p_state_change_support = dc->clk_mgr->clks.p_state_change_support;
4958
4959         if (enable && !dc->clk_mgr->dc_mode_softmax_enabled) {
4960                 if (p_state_change_support) {
4961                         if (funcMin <= softMax)
4962                                 dc->clk_mgr->funcs->set_max_memclk(dc->clk_mgr, softMax);
4963                         // else: No-Op
4964                 } else {
4965                         if (funcMin <= softMax)
4966                                 blank_and_force_memclk(dc, true, softMax);
4967                         // else: No-Op
4968                 }
4969         } else if (!enable && dc->clk_mgr->dc_mode_softmax_enabled) {
4970                 if (p_state_change_support) {
4971                         if (funcMin <= softMax)
4972                                 dc->clk_mgr->funcs->set_max_memclk(dc->clk_mgr, maxDPM);
4973                         // else: No-Op
4974                 } else {
4975                         if (funcMin <= softMax)
4976                                 blank_and_force_memclk(dc, true, maxDPM);
4977                         // else: No-Op
4978                 }
4979         }
4980         dc->clk_mgr->dc_mode_softmax_enabled = enable;
4981 }
4982 bool dc_is_plane_eligible_for_idle_optimizations(struct dc *dc, struct dc_plane_state *plane,
4983                 struct dc_cursor_attributes *cursor_attr)
4984 {
4985         if (dc->hwss.does_plane_fit_in_mall && dc->hwss.does_plane_fit_in_mall(dc, plane, cursor_attr))
4986                 return true;
4987         return false;
4988 }
4989
4990 /* cleanup on driver unload */
4991 void dc_hardware_release(struct dc *dc)
4992 {
4993         dc_mclk_switch_using_fw_based_vblank_stretch_shut_down(dc);
4994
4995         if (dc->hwss.hardware_release)
4996                 dc->hwss.hardware_release(dc);
4997 }
4998
4999 void dc_mclk_switch_using_fw_based_vblank_stretch_shut_down(struct dc *dc)
5000 {
5001         if (dc->current_state)
5002                 dc->current_state->bw_ctx.bw.dcn.clk.fw_based_mclk_switching_shut_down = true;
5003 }
5004
5005 /**
5006  * dc_is_dmub_outbox_supported - Check if DMUB firmware support outbox notification
5007  *
5008  * @dc: [in] dc structure
5009  *
5010  * Checks whether DMUB FW supports outbox notifications, if supported DM
5011  * should register outbox interrupt prior to actually enabling interrupts
5012  * via dc_enable_dmub_outbox
5013  *
5014  * Return:
5015  * True if DMUB FW supports outbox notifications, False otherwise
5016  */
5017 bool dc_is_dmub_outbox_supported(struct dc *dc)
5018 {
5019         switch (dc->ctx->asic_id.chip_family) {
5020
5021         case FAMILY_YELLOW_CARP:
5022                 /* DCN31 B0 USB4 DPIA needs dmub notifications for interrupts */
5023                 if (dc->ctx->asic_id.hw_internal_rev == YELLOW_CARP_B0 &&
5024                     !dc->debug.dpia_debug.bits.disable_dpia)
5025                         return true;
5026         break;
5027
5028         case AMDGPU_FAMILY_GC_11_0_1:
5029         case AMDGPU_FAMILY_GC_11_5_0:
5030                 if (!dc->debug.dpia_debug.bits.disable_dpia)
5031                         return true;
5032         break;
5033
5034         default:
5035                 break;
5036         }
5037
5038         /* dmub aux needs dmub notifications to be enabled */
5039         return dc->debug.enable_dmub_aux_for_legacy_ddc;
5040
5041 }
5042
5043 /**
5044  * dc_enable_dmub_notifications - Check if dmub fw supports outbox
5045  *
5046  * @dc: [in] dc structure
5047  *
5048  * Calls dc_is_dmub_outbox_supported to check if dmub fw supports outbox
5049  * notifications. All DMs shall switch to dc_is_dmub_outbox_supported.  This
5050  * API shall be removed after switching.
5051  *
5052  * Return:
5053  * True if DMUB FW supports outbox notifications, False otherwise
5054  */
5055 bool dc_enable_dmub_notifications(struct dc *dc)
5056 {
5057         return dc_is_dmub_outbox_supported(dc);
5058 }
5059
5060 /**
5061  * dc_enable_dmub_outbox - Enables DMUB unsolicited notification
5062  *
5063  * @dc: [in] dc structure
5064  *
5065  * Enables DMUB unsolicited notifications to x86 via outbox.
5066  */
5067 void dc_enable_dmub_outbox(struct dc *dc)
5068 {
5069         struct dc_context *dc_ctx = dc->ctx;
5070
5071         dmub_enable_outbox_notification(dc_ctx->dmub_srv);
5072         DC_LOG_DC("%s: dmub outbox notifications enabled\n", __func__);
5073 }
5074
5075 /**
5076  * dc_process_dmub_aux_transfer_async - Submits aux command to dmub via inbox message
5077  *                                      Sets port index appropriately for legacy DDC
5078  * @dc: dc structure
5079  * @link_index: link index
5080  * @payload: aux payload
5081  *
5082  * Returns: True if successful, False if failure
5083  */
5084 bool dc_process_dmub_aux_transfer_async(struct dc *dc,
5085                                 uint32_t link_index,
5086                                 struct aux_payload *payload)
5087 {
5088         uint8_t action;
5089         union dmub_rb_cmd cmd = {0};
5090
5091         ASSERT(payload->length <= 16);
5092
5093         cmd.dp_aux_access.header.type = DMUB_CMD__DP_AUX_ACCESS;
5094         cmd.dp_aux_access.header.payload_bytes = 0;
5095         /* For dpia, ddc_pin is set to NULL */
5096         if (!dc->links[link_index]->ddc->ddc_pin)
5097                 cmd.dp_aux_access.aux_control.type = AUX_CHANNEL_DPIA;
5098         else
5099                 cmd.dp_aux_access.aux_control.type = AUX_CHANNEL_LEGACY_DDC;
5100
5101         cmd.dp_aux_access.aux_control.instance = dc->links[link_index]->ddc_hw_inst;
5102         cmd.dp_aux_access.aux_control.sw_crc_enabled = 0;
5103         cmd.dp_aux_access.aux_control.timeout = 0;
5104         cmd.dp_aux_access.aux_control.dpaux.address = payload->address;
5105         cmd.dp_aux_access.aux_control.dpaux.is_i2c_over_aux = payload->i2c_over_aux;
5106         cmd.dp_aux_access.aux_control.dpaux.length = payload->length;
5107
5108         /* set aux action */
5109         if (payload->i2c_over_aux) {
5110                 if (payload->write) {
5111                         if (payload->mot)
5112                                 action = DP_AUX_REQ_ACTION_I2C_WRITE_MOT;
5113                         else
5114                                 action = DP_AUX_REQ_ACTION_I2C_WRITE;
5115                 } else {
5116                         if (payload->mot)
5117                                 action = DP_AUX_REQ_ACTION_I2C_READ_MOT;
5118                         else
5119                                 action = DP_AUX_REQ_ACTION_I2C_READ;
5120                         }
5121         } else {
5122                 if (payload->write)
5123                         action = DP_AUX_REQ_ACTION_DPCD_WRITE;
5124                 else
5125                         action = DP_AUX_REQ_ACTION_DPCD_READ;
5126         }
5127
5128         cmd.dp_aux_access.aux_control.dpaux.action = action;
5129
5130         if (payload->length && payload->write) {
5131                 memcpy(cmd.dp_aux_access.aux_control.dpaux.data,
5132                         payload->data,
5133                         payload->length
5134                         );
5135         }
5136
5137         dc_wake_and_execute_dmub_cmd(dc->ctx, &cmd, DM_DMUB_WAIT_TYPE_WAIT);
5138
5139         return true;
5140 }
5141
5142 uint8_t get_link_index_from_dpia_port_index(const struct dc *dc,
5143                                             uint8_t dpia_port_index)
5144 {
5145         uint8_t index, link_index = 0xFF;
5146
5147         for (index = 0; index < dc->link_count; index++) {
5148                 /* ddc_hw_inst has dpia port index for dpia links
5149                  * and ddc instance for legacy links
5150                  */
5151                 if (!dc->links[index]->ddc->ddc_pin) {
5152                         if (dc->links[index]->ddc_hw_inst == dpia_port_index) {
5153                                 link_index = index;
5154                                 break;
5155                         }
5156                 }
5157         }
5158         ASSERT(link_index != 0xFF);
5159         return link_index;
5160 }
5161
5162 /**
5163  * dc_process_dmub_set_config_async - Submits set_config command
5164  *
5165  * @dc: [in] dc structure
5166  * @link_index: [in] link_index: link index
5167  * @payload: [in] aux payload
5168  * @notify: [out] set_config immediate reply
5169  *
5170  * Submits set_config command to dmub via inbox message.
5171  *
5172  * Return:
5173  * True if successful, False if failure
5174  */
5175 bool dc_process_dmub_set_config_async(struct dc *dc,
5176                                 uint32_t link_index,
5177                                 struct set_config_cmd_payload *payload,
5178                                 struct dmub_notification *notify)
5179 {
5180         union dmub_rb_cmd cmd = {0};
5181         bool is_cmd_complete = true;
5182
5183         /* prepare SET_CONFIG command */
5184         cmd.set_config_access.header.type = DMUB_CMD__DPIA;
5185         cmd.set_config_access.header.sub_type = DMUB_CMD__DPIA_SET_CONFIG_ACCESS;
5186
5187         cmd.set_config_access.set_config_control.instance = dc->links[link_index]->ddc_hw_inst;
5188         cmd.set_config_access.set_config_control.cmd_pkt.msg_type = payload->msg_type;
5189         cmd.set_config_access.set_config_control.cmd_pkt.msg_data = payload->msg_data;
5190
5191         if (!dc_wake_and_execute_dmub_cmd(dc->ctx, &cmd, DM_DMUB_WAIT_TYPE_WAIT_WITH_REPLY)) {
5192                 /* command is not processed by dmub */
5193                 notify->sc_status = SET_CONFIG_UNKNOWN_ERROR;
5194                 return is_cmd_complete;
5195         }
5196
5197         /* command processed by dmub, if ret_status is 1, it is completed instantly */
5198         if (cmd.set_config_access.header.ret_status == 1)
5199                 notify->sc_status = cmd.set_config_access.set_config_control.immed_status;
5200         else
5201                 /* cmd pending, will receive notification via outbox */
5202                 is_cmd_complete = false;
5203
5204         return is_cmd_complete;
5205 }
5206
5207 /**
5208  * dc_process_dmub_set_mst_slots - Submits MST solt allocation
5209  *
5210  * @dc: [in] dc structure
5211  * @link_index: [in] link index
5212  * @mst_alloc_slots: [in] mst slots to be allotted
5213  * @mst_slots_in_use: [out] mst slots in use returned in failure case
5214  *
5215  * Submits mst slot allocation command to dmub via inbox message
5216  *
5217  * Return:
5218  * DC_OK if successful, DC_ERROR if failure
5219  */
5220 enum dc_status dc_process_dmub_set_mst_slots(const struct dc *dc,
5221                                 uint32_t link_index,
5222                                 uint8_t mst_alloc_slots,
5223                                 uint8_t *mst_slots_in_use)
5224 {
5225         union dmub_rb_cmd cmd = {0};
5226
5227         /* prepare MST_ALLOC_SLOTS command */
5228         cmd.set_mst_alloc_slots.header.type = DMUB_CMD__DPIA;
5229         cmd.set_mst_alloc_slots.header.sub_type = DMUB_CMD__DPIA_MST_ALLOC_SLOTS;
5230
5231         cmd.set_mst_alloc_slots.mst_slots_control.instance = dc->links[link_index]->ddc_hw_inst;
5232         cmd.set_mst_alloc_slots.mst_slots_control.mst_alloc_slots = mst_alloc_slots;
5233
5234         if (!dc_wake_and_execute_dmub_cmd(dc->ctx, &cmd, DM_DMUB_WAIT_TYPE_WAIT_WITH_REPLY))
5235                 /* command is not processed by dmub */
5236                 return DC_ERROR_UNEXPECTED;
5237
5238         /* command processed by dmub, if ret_status is 1 */
5239         if (cmd.set_config_access.header.ret_status != 1)
5240                 /* command processing error */
5241                 return DC_ERROR_UNEXPECTED;
5242
5243         /* command processed and we have a status of 2, mst not enabled in dpia */
5244         if (cmd.set_mst_alloc_slots.mst_slots_control.immed_status == 2)
5245                 return DC_FAIL_UNSUPPORTED_1;
5246
5247         /* previously configured mst alloc and used slots did not match */
5248         if (cmd.set_mst_alloc_slots.mst_slots_control.immed_status == 3) {
5249                 *mst_slots_in_use = cmd.set_mst_alloc_slots.mst_slots_control.mst_slots_in_use;
5250                 return DC_NOT_SUPPORTED;
5251         }
5252
5253         return DC_OK;
5254 }
5255
5256 /**
5257  * dc_process_dmub_dpia_hpd_int_enable - Submits DPIA DPD interruption
5258  *
5259  * @dc: [in] dc structure
5260  * @hpd_int_enable: [in] 1 for hpd int enable, 0 to disable
5261  *
5262  * Submits dpia hpd int enable command to dmub via inbox message
5263  */
5264 void dc_process_dmub_dpia_hpd_int_enable(const struct dc *dc,
5265                                 uint32_t hpd_int_enable)
5266 {
5267         union dmub_rb_cmd cmd = {0};
5268
5269         cmd.dpia_hpd_int_enable.header.type = DMUB_CMD__DPIA_HPD_INT_ENABLE;
5270         cmd.dpia_hpd_int_enable.enable = hpd_int_enable;
5271
5272         dc_wake_and_execute_dmub_cmd(dc->ctx, &cmd, DM_DMUB_WAIT_TYPE_WAIT);
5273
5274         DC_LOG_DEBUG("%s: hpd_int_enable(%d)\n", __func__, hpd_int_enable);
5275 }
5276
5277 /**
5278  * dc_print_dmub_diagnostic_data - Print DMUB diagnostic data for debugging
5279  *
5280  * @dc: [in] dc structure
5281  *
5282  *
5283  */
5284 void dc_print_dmub_diagnostic_data(const struct dc *dc)
5285 {
5286         dc_dmub_srv_log_diagnostic_data(dc->ctx->dmub_srv);
5287 }
5288
5289 /**
5290  * dc_disable_accelerated_mode - disable accelerated mode
5291  * @dc: dc structure
5292  */
5293 void dc_disable_accelerated_mode(struct dc *dc)
5294 {
5295         bios_set_scratch_acc_mode_change(dc->ctx->dc_bios, 0);
5296 }
5297
5298
5299 /**
5300  *  dc_notify_vsync_int_state - notifies vsync enable/disable state
5301  *  @dc: dc structure
5302  *  @stream: stream where vsync int state changed
5303  *  @enable: whether vsync is enabled or disabled
5304  *
5305  *  Called when vsync is enabled/disabled Will notify DMUB to start/stop ABM
5306  *  interrupts after steady state is reached.
5307  */
5308 void dc_notify_vsync_int_state(struct dc *dc, struct dc_stream_state *stream, bool enable)
5309 {
5310         int i;
5311         int edp_num;
5312         struct pipe_ctx *pipe = NULL;
5313         struct dc_link *link = stream->sink->link;
5314         struct dc_link *edp_links[MAX_NUM_EDP];
5315
5316
5317         if (link->psr_settings.psr_feature_enabled)
5318                 return;
5319
5320         if (link->replay_settings.replay_feature_enabled)
5321                 return;
5322
5323         /*find primary pipe associated with stream*/
5324         for (i = 0; i < MAX_PIPES; i++) {
5325                 pipe = &dc->current_state->res_ctx.pipe_ctx[i];
5326
5327                 if (pipe->stream == stream && pipe->stream_res.tg)
5328                         break;
5329         }
5330
5331         if (i == MAX_PIPES) {
5332                 ASSERT(0);
5333                 return;
5334         }
5335
5336         dc_get_edp_links(dc, edp_links, &edp_num);
5337
5338         /* Determine panel inst */
5339         for (i = 0; i < edp_num; i++) {
5340                 if (edp_links[i] == link)
5341                         break;
5342         }
5343
5344         if (i == edp_num) {
5345                 return;
5346         }
5347
5348         if (pipe->stream_res.abm && pipe->stream_res.abm->funcs->set_abm_pause)
5349                 pipe->stream_res.abm->funcs->set_abm_pause(pipe->stream_res.abm, !enable, i, pipe->stream_res.tg->inst);
5350 }
5351
5352 /*****************************************************************************
5353  *  dc_abm_save_restore() - Interface to DC for save+pause and restore+un-pause
5354  *                          ABM
5355  *  @dc: dc structure
5356  *      @stream: stream where vsync int state changed
5357  *  @pData: abm hw states
5358  *
5359  ****************************************************************************/
5360 bool dc_abm_save_restore(
5361                 struct dc *dc,
5362                 struct dc_stream_state *stream,
5363                 struct abm_save_restore *pData)
5364 {
5365         int i;
5366         int edp_num;
5367         struct pipe_ctx *pipe = NULL;
5368         struct dc_link *link = stream->sink->link;
5369         struct dc_link *edp_links[MAX_NUM_EDP];
5370
5371         if (link->replay_settings.replay_feature_enabled)
5372                 return false;
5373
5374         /*find primary pipe associated with stream*/
5375         for (i = 0; i < MAX_PIPES; i++) {
5376                 pipe = &dc->current_state->res_ctx.pipe_ctx[i];
5377
5378                 if (pipe->stream == stream && pipe->stream_res.tg)
5379                         break;
5380         }
5381
5382         if (i == MAX_PIPES) {
5383                 ASSERT(0);
5384                 return false;
5385         }
5386
5387         dc_get_edp_links(dc, edp_links, &edp_num);
5388
5389         /* Determine panel inst */
5390         for (i = 0; i < edp_num; i++)
5391                 if (edp_links[i] == link)
5392                         break;
5393
5394         if (i == edp_num)
5395                 return false;
5396
5397         if (pipe->stream_res.abm &&
5398                 pipe->stream_res.abm->funcs->save_restore)
5399                 return pipe->stream_res.abm->funcs->save_restore(
5400                                 pipe->stream_res.abm,
5401                                 i,
5402                                 pData);
5403         return false;
5404 }
5405
5406 void dc_query_current_properties(struct dc *dc, struct dc_current_properties *properties)
5407 {
5408         unsigned int i;
5409         bool subvp_sw_cursor_req = false;
5410
5411         for (i = 0; i < dc->current_state->stream_count; i++) {
5412                 if (check_subvp_sw_cursor_fallback_req(dc, dc->current_state->streams[i])) {
5413                         subvp_sw_cursor_req = true;
5414                         break;
5415                 }
5416         }
5417         properties->cursor_size_limit = subvp_sw_cursor_req ? 64 : dc->caps.max_cursor_size;
5418 }
5419
5420 /**
5421  * dc_set_edp_power() - DM controls eDP power to be ON/OFF
5422  *
5423  * Called when DM wants to power on/off eDP.
5424  *     Only work on links with flag skip_implict_edp_power_control is set.
5425  *
5426  * @dc: Current DC state
5427  * @edp_link: a link with eDP connector signal type
5428  * @powerOn: power on/off eDP
5429  *
5430  * Return: void
5431  */
5432 void dc_set_edp_power(const struct dc *dc, struct dc_link *edp_link,
5433                                  bool powerOn)
5434 {
5435         if (edp_link->connector_signal != SIGNAL_TYPE_EDP)
5436                 return;
5437
5438         if (edp_link->skip_implict_edp_power_control == false)
5439                 return;
5440
5441         edp_link->dc->link_srv->edp_set_panel_power(edp_link, powerOn);
5442 }
5443
5444 /*
5445  *****************************************************************************
5446  * dc_get_power_profile_for_dc_state() - extracts power profile from dc state
5447  *
5448  * Called when DM wants to make power policy decisions based on dc_state
5449  *
5450  *****************************************************************************
5451  */
5452 struct dc_power_profile dc_get_power_profile_for_dc_state(const struct dc_state *context)
5453 {
5454         struct dc_power_profile profile = { 0 };
5455
5456         profile.power_level += !context->bw_ctx.bw.dcn.clk.p_state_change_support;
5457
5458         return profile;
5459 }
5460
This page took 0.367686 seconds and 4 git commands to generate.