]> Git Repo - J-linux.git/blob - drivers/gpu/drm/amd/display/dc/core/dc.c
Merge tag 'mvebu-dt-6.11-1' of https://git.kernel.org/pub/scm/linux/kernel/git/gcleme...
[J-linux.git] / drivers / gpu / drm / amd / display / dc / core / dc.c
1 /*
2  * Copyright 2015 Advanced Micro Devices, Inc.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20  * OTHER DEALINGS IN THE SOFTWARE.
21  *
22  * Authors: AMD
23  */
24
25 #include "dm_services.h"
26
27 #include "amdgpu.h"
28
29 #include "dc.h"
30
31 #include "core_status.h"
32 #include "core_types.h"
33 #include "hw_sequencer.h"
34 #include "dce/dce_hwseq.h"
35
36 #include "resource.h"
37 #include "dc_state.h"
38 #include "dc_state_priv.h"
39 #include "dc_plane_priv.h"
40
41 #include "gpio_service_interface.h"
42 #include "clk_mgr.h"
43 #include "clock_source.h"
44 #include "dc_bios_types.h"
45
46 #include "bios_parser_interface.h"
47 #include "bios/bios_parser_helper.h"
48 #include "include/irq_service_interface.h"
49 #include "transform.h"
50 #include "dmcu.h"
51 #include "dpp.h"
52 #include "timing_generator.h"
53 #include "abm.h"
54 #include "virtual/virtual_link_encoder.h"
55 #include "hubp.h"
56
57 #include "link_hwss.h"
58 #include "link_encoder.h"
59 #include "link_enc_cfg.h"
60
61 #include "link.h"
62 #include "dm_helpers.h"
63 #include "mem_input.h"
64
65 #include "dc_dmub_srv.h"
66
67 #include "dsc.h"
68
69 #include "vm_helper.h"
70
71 #include "dce/dce_i2c.h"
72
73 #include "dmub/dmub_srv.h"
74
75 #include "dce/dmub_psr.h"
76
77 #include "dce/dmub_hw_lock_mgr.h"
78
79 #include "dc_trace.h"
80
81 #include "hw_sequencer_private.h"
82
83 #include "dml2/dml2_internal_types.h"
84
85 #include "dce/dmub_outbox.h"
86
87 #define CTX \
88         dc->ctx
89
90 #define DC_LOGGER \
91         dc->ctx->logger
92
93 static const char DC_BUILD_ID[] = "production-build";
94
95 /**
96  * DOC: Overview
97  *
98  * DC is the OS-agnostic component of the amdgpu DC driver.
99  *
100  * DC maintains and validates a set of structs representing the state of the
101  * driver and writes that state to AMD hardware
102  *
103  * Main DC HW structs:
104  *
105  * struct dc - The central struct.  One per driver.  Created on driver load,
106  * destroyed on driver unload.
107  *
108  * struct dc_context - One per driver.
109  * Used as a backpointer by most other structs in dc.
110  *
111  * struct dc_link - One per connector (the physical DP, HDMI, miniDP, or eDP
112  * plugpoints).  Created on driver load, destroyed on driver unload.
113  *
114  * struct dc_sink - One per display.  Created on boot or hotplug.
115  * Destroyed on shutdown or hotunplug.  A dc_link can have a local sink
116  * (the display directly attached).  It may also have one or more remote
117  * sinks (in the Multi-Stream Transport case)
118  *
119  * struct resource_pool - One per driver.  Represents the hw blocks not in the
120  * main pipeline.  Not directly accessible by dm.
121  *
122  * Main dc state structs:
123  *
124  * These structs can be created and destroyed as needed.  There is a full set of
125  * these structs in dc->current_state representing the currently programmed state.
126  *
127  * struct dc_state - The global DC state to track global state information,
128  * such as bandwidth values.
129  *
130  * struct dc_stream_state - Represents the hw configuration for the pipeline from
131  * a framebuffer to a display.  Maps one-to-one with dc_sink.
132  *
133  * struct dc_plane_state - Represents a framebuffer.  Each stream has at least one,
134  * and may have more in the Multi-Plane Overlay case.
135  *
136  * struct resource_context - Represents the programmable state of everything in
137  * the resource_pool.  Not directly accessible by dm.
138  *
139  * struct pipe_ctx - A member of struct resource_context.  Represents the
140  * internal hardware pipeline components.  Each dc_plane_state has either
141  * one or two (in the pipe-split case).
142  */
143
144 /* Private functions */
145
146 static inline void elevate_update_type(enum surface_update_type *original, enum surface_update_type new)
147 {
148         if (new > *original)
149                 *original = new;
150 }
151
152 static void destroy_links(struct dc *dc)
153 {
154         uint32_t i;
155
156         for (i = 0; i < dc->link_count; i++) {
157                 if (NULL != dc->links[i])
158                         dc->link_srv->destroy_link(&dc->links[i]);
159         }
160 }
161
162 static uint32_t get_num_of_internal_disp(struct dc_link **links, uint32_t num_links)
163 {
164         int i;
165         uint32_t count = 0;
166
167         for (i = 0; i < num_links; i++) {
168                 if (links[i]->connector_signal == SIGNAL_TYPE_EDP ||
169                                 links[i]->is_internal_display)
170                         count++;
171         }
172
173         return count;
174 }
175
176 static int get_seamless_boot_stream_count(struct dc_state *ctx)
177 {
178         uint8_t i;
179         uint8_t seamless_boot_stream_count = 0;
180
181         for (i = 0; i < ctx->stream_count; i++)
182                 if (ctx->streams[i]->apply_seamless_boot_optimization)
183                         seamless_boot_stream_count++;
184
185         return seamless_boot_stream_count;
186 }
187
188 static bool create_links(
189                 struct dc *dc,
190                 uint32_t num_virtual_links)
191 {
192         int i;
193         int connectors_num;
194         struct dc_bios *bios = dc->ctx->dc_bios;
195
196         dc->link_count = 0;
197
198         connectors_num = bios->funcs->get_connectors_number(bios);
199
200         DC_LOG_DC("BIOS object table - number of connectors: %d", connectors_num);
201
202         if (connectors_num > ENUM_ID_COUNT) {
203                 dm_error(
204                         "DC: Number of connectors %d exceeds maximum of %d!\n",
205                         connectors_num,
206                         ENUM_ID_COUNT);
207                 return false;
208         }
209
210         dm_output_to_console(
211                 "DC: %s: connectors_num: physical:%d, virtual:%d\n",
212                 __func__,
213                 connectors_num,
214                 num_virtual_links);
215
216         // condition loop on link_count to allow skipping invalid indices
217         for (i = 0; dc->link_count < connectors_num && i < MAX_LINKS; i++) {
218                 struct link_init_data link_init_params = {0};
219                 struct dc_link *link;
220
221                 DC_LOG_DC("BIOS object table - printing link object info for connector number: %d, link_index: %d", i, dc->link_count);
222
223                 link_init_params.ctx = dc->ctx;
224                 /* next BIOS object table connector */
225                 link_init_params.connector_index = i;
226                 link_init_params.link_index = dc->link_count;
227                 link_init_params.dc = dc;
228                 link = dc->link_srv->create_link(&link_init_params);
229
230                 if (link) {
231                         dc->links[dc->link_count] = link;
232                         link->dc = dc;
233                         ++dc->link_count;
234                 }
235         }
236
237         DC_LOG_DC("BIOS object table - end");
238
239         /* Create a link for each usb4 dpia port */
240         for (i = 0; i < dc->res_pool->usb4_dpia_count; i++) {
241                 struct link_init_data link_init_params = {0};
242                 struct dc_link *link;
243
244                 link_init_params.ctx = dc->ctx;
245                 link_init_params.connector_index = i;
246                 link_init_params.link_index = dc->link_count;
247                 link_init_params.dc = dc;
248                 link_init_params.is_dpia_link = true;
249
250                 link = dc->link_srv->create_link(&link_init_params);
251                 if (link) {
252                         dc->links[dc->link_count] = link;
253                         link->dc = dc;
254                         ++dc->link_count;
255                 }
256         }
257
258         for (i = 0; i < num_virtual_links; i++) {
259                 struct dc_link *link = kzalloc(sizeof(*link), GFP_KERNEL);
260                 struct encoder_init_data enc_init = {0};
261
262                 if (link == NULL) {
263                         BREAK_TO_DEBUGGER();
264                         goto failed_alloc;
265                 }
266
267                 link->link_index = dc->link_count;
268                 dc->links[dc->link_count] = link;
269                 dc->link_count++;
270
271                 link->ctx = dc->ctx;
272                 link->dc = dc;
273                 link->connector_signal = SIGNAL_TYPE_VIRTUAL;
274                 link->link_id.type = OBJECT_TYPE_CONNECTOR;
275                 link->link_id.id = CONNECTOR_ID_VIRTUAL;
276                 link->link_id.enum_id = ENUM_ID_1;
277                 link->link_enc = kzalloc(sizeof(*link->link_enc), GFP_KERNEL);
278
279                 if (!link->link_enc) {
280                         BREAK_TO_DEBUGGER();
281                         goto failed_alloc;
282                 }
283
284                 link->link_status.dpcd_caps = &link->dpcd_caps;
285
286                 enc_init.ctx = dc->ctx;
287                 enc_init.channel = CHANNEL_ID_UNKNOWN;
288                 enc_init.hpd_source = HPD_SOURCEID_UNKNOWN;
289                 enc_init.transmitter = TRANSMITTER_UNKNOWN;
290                 enc_init.connector = link->link_id;
291                 enc_init.encoder.type = OBJECT_TYPE_ENCODER;
292                 enc_init.encoder.id = ENCODER_ID_INTERNAL_VIRTUAL;
293                 enc_init.encoder.enum_id = ENUM_ID_1;
294                 virtual_link_encoder_construct(link->link_enc, &enc_init);
295         }
296
297         dc->caps.num_of_internal_disp = get_num_of_internal_disp(dc->links, dc->link_count);
298
299         return true;
300
301 failed_alloc:
302         return false;
303 }
304
305 /* Create additional DIG link encoder objects if fewer than the platform
306  * supports were created during link construction. This can happen if the
307  * number of physical connectors is less than the number of DIGs.
308  */
309 static bool create_link_encoders(struct dc *dc)
310 {
311         bool res = true;
312         unsigned int num_usb4_dpia = dc->res_pool->res_cap->num_usb4_dpia;
313         unsigned int num_dig_link_enc = dc->res_pool->res_cap->num_dig_link_enc;
314         int i;
315
316         /* A platform without USB4 DPIA endpoints has a fixed mapping between DIG
317          * link encoders and physical display endpoints and does not require
318          * additional link encoder objects.
319          */
320         if (num_usb4_dpia == 0)
321                 return res;
322
323         /* Create as many link encoder objects as the platform supports. DPIA
324          * endpoints can be programmably mapped to any DIG.
325          */
326         if (num_dig_link_enc > dc->res_pool->dig_link_enc_count) {
327                 for (i = 0; i < num_dig_link_enc; i++) {
328                         struct link_encoder *link_enc = dc->res_pool->link_encoders[i];
329
330                         if (!link_enc && dc->res_pool->funcs->link_enc_create_minimal) {
331                                 link_enc = dc->res_pool->funcs->link_enc_create_minimal(dc->ctx,
332                                                 (enum engine_id)(ENGINE_ID_DIGA + i));
333                                 if (link_enc) {
334                                         dc->res_pool->link_encoders[i] = link_enc;
335                                         dc->res_pool->dig_link_enc_count++;
336                                 } else {
337                                         res = false;
338                                 }
339                         }
340                 }
341         }
342
343         return res;
344 }
345
346 /* Destroy any additional DIG link encoder objects created by
347  * create_link_encoders().
348  * NB: Must only be called after destroy_links().
349  */
350 static void destroy_link_encoders(struct dc *dc)
351 {
352         unsigned int num_usb4_dpia;
353         unsigned int num_dig_link_enc;
354         int i;
355
356         if (!dc->res_pool)
357                 return;
358
359         num_usb4_dpia = dc->res_pool->res_cap->num_usb4_dpia;
360         num_dig_link_enc = dc->res_pool->res_cap->num_dig_link_enc;
361
362         /* A platform without USB4 DPIA endpoints has a fixed mapping between DIG
363          * link encoders and physical display endpoints and does not require
364          * additional link encoder objects.
365          */
366         if (num_usb4_dpia == 0)
367                 return;
368
369         for (i = 0; i < num_dig_link_enc; i++) {
370                 struct link_encoder *link_enc = dc->res_pool->link_encoders[i];
371
372                 if (link_enc) {
373                         link_enc->funcs->destroy(&link_enc);
374                         dc->res_pool->link_encoders[i] = NULL;
375                         dc->res_pool->dig_link_enc_count--;
376                 }
377         }
378 }
379
380 static struct dc_perf_trace *dc_perf_trace_create(void)
381 {
382         return kzalloc(sizeof(struct dc_perf_trace), GFP_KERNEL);
383 }
384
385 static void dc_perf_trace_destroy(struct dc_perf_trace **perf_trace)
386 {
387         kfree(*perf_trace);
388         *perf_trace = NULL;
389 }
390
391 static bool set_long_vtotal(struct dc *dc, struct dc_stream_state *stream, struct dc_crtc_timing_adjust *adjust)
392 {
393         if (!dc || !stream || !adjust)
394                 return false;
395
396         if (!dc->current_state)
397                 return false;
398
399         int i;
400
401         for (i = 0; i < MAX_PIPES; i++) {
402                 struct pipe_ctx *pipe = &dc->current_state->res_ctx.pipe_ctx[i];
403
404                 if (pipe->stream == stream && pipe->stream_res.tg) {
405                         if (dc->hwss.set_long_vtotal)
406                                 dc->hwss.set_long_vtotal(&pipe, 1, adjust->v_total_min, adjust->v_total_max);
407
408                         return true;
409                 }
410         }
411
412         return false;
413 }
414
415 /**
416  *  dc_stream_adjust_vmin_vmax - look up pipe context & update parts of DRR
417  *  @dc:     dc reference
418  *  @stream: Initial dc stream state
419  *  @adjust: Updated parameters for vertical_total_min and vertical_total_max
420  *
421  *  Looks up the pipe context of dc_stream_state and updates the
422  *  vertical_total_min and vertical_total_max of the DRR, Dynamic Refresh
423  *  Rate, which is a power-saving feature that targets reducing panel
424  *  refresh rate while the screen is static
425  *
426  *  Return: %true if the pipe context is found and adjusted;
427  *          %false if the pipe context is not found.
428  */
429 bool dc_stream_adjust_vmin_vmax(struct dc *dc,
430                 struct dc_stream_state *stream,
431                 struct dc_crtc_timing_adjust *adjust)
432 {
433         int i;
434
435         /*
436          * Don't adjust DRR while there's bandwidth optimizations pending to
437          * avoid conflicting with firmware updates.
438          */
439         if (dc->ctx->dce_version > DCE_VERSION_MAX)
440                 if (dc->optimized_required || dc->wm_optimized_required)
441                         return false;
442
443         dc_exit_ips_for_hw_access(dc);
444
445         stream->adjust.v_total_max = adjust->v_total_max;
446         stream->adjust.v_total_mid = adjust->v_total_mid;
447         stream->adjust.v_total_mid_frame_num = adjust->v_total_mid_frame_num;
448         stream->adjust.v_total_min = adjust->v_total_min;
449         stream->adjust.allow_otg_v_count_halt = adjust->allow_otg_v_count_halt;
450
451         if (dc->caps.max_v_total != 0 &&
452                 (adjust->v_total_max > dc->caps.max_v_total || adjust->v_total_min > dc->caps.max_v_total)) {
453                 if (adjust->allow_otg_v_count_halt)
454                         return set_long_vtotal(dc, stream, adjust);
455                 else
456                         return false;
457         }
458
459         for (i = 0; i < MAX_PIPES; i++) {
460                 struct pipe_ctx *pipe = &dc->current_state->res_ctx.pipe_ctx[i];
461
462                 if (pipe->stream == stream && pipe->stream_res.tg) {
463                         dc->hwss.set_drr(&pipe,
464                                         1,
465                                         *adjust);
466
467                         return true;
468                 }
469         }
470         return false;
471 }
472
473 /**
474  * dc_stream_get_last_used_drr_vtotal - Looks up the pipe context of
475  * dc_stream_state and gets the last VTOTAL used by DRR (Dynamic Refresh Rate)
476  *
477  * @dc: [in] dc reference
478  * @stream: [in] Initial dc stream state
479  * @refresh_rate: [in] new refresh_rate
480  *
481  * Return: %true if the pipe context is found and there is an associated
482  *         timing_generator for the DC;
483  *         %false if the pipe context is not found or there is no
484  *         timing_generator for the DC.
485  */
486 bool dc_stream_get_last_used_drr_vtotal(struct dc *dc,
487                 struct dc_stream_state *stream,
488                 uint32_t *refresh_rate)
489 {
490         bool status = false;
491
492         int i = 0;
493
494         dc_exit_ips_for_hw_access(dc);
495
496         for (i = 0; i < MAX_PIPES; i++) {
497                 struct pipe_ctx *pipe = &dc->current_state->res_ctx.pipe_ctx[i];
498
499                 if (pipe->stream == stream && pipe->stream_res.tg) {
500                         /* Only execute if a function pointer has been defined for
501                          * the DC version in question
502                          */
503                         if (pipe->stream_res.tg->funcs->get_last_used_drr_vtotal) {
504                                 pipe->stream_res.tg->funcs->get_last_used_drr_vtotal(pipe->stream_res.tg, refresh_rate);
505
506                                 status = true;
507
508                                 break;
509                         }
510                 }
511         }
512
513         return status;
514 }
515
516 bool dc_stream_get_crtc_position(struct dc *dc,
517                 struct dc_stream_state **streams, int num_streams,
518                 unsigned int *v_pos, unsigned int *nom_v_pos)
519 {
520         /* TODO: Support multiple streams */
521         const struct dc_stream_state *stream = streams[0];
522         int i;
523         bool ret = false;
524         struct crtc_position position;
525
526         dc_exit_ips_for_hw_access(dc);
527
528         for (i = 0; i < MAX_PIPES; i++) {
529                 struct pipe_ctx *pipe =
530                                 &dc->current_state->res_ctx.pipe_ctx[i];
531
532                 if (pipe->stream == stream && pipe->stream_res.stream_enc) {
533                         dc->hwss.get_position(&pipe, 1, &position);
534
535                         *v_pos = position.vertical_count;
536                         *nom_v_pos = position.nominal_vcount;
537                         ret = true;
538                 }
539         }
540         return ret;
541 }
542
543 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
544 static inline void
545 dc_stream_forward_dmub_crc_window(struct dc_dmub_srv *dmub_srv,
546                 struct rect *rect, struct otg_phy_mux *mux_mapping, bool is_stop)
547 {
548         union dmub_rb_cmd cmd = {0};
549
550         cmd.secure_display.roi_info.phy_id = mux_mapping->phy_output_num;
551         cmd.secure_display.roi_info.otg_id = mux_mapping->otg_output_num;
552
553         if (is_stop) {
554                 cmd.secure_display.header.type = DMUB_CMD__SECURE_DISPLAY;
555                 cmd.secure_display.header.sub_type = DMUB_CMD__SECURE_DISPLAY_CRC_STOP_UPDATE;
556         } else {
557                 cmd.secure_display.header.type = DMUB_CMD__SECURE_DISPLAY;
558                 cmd.secure_display.header.sub_type = DMUB_CMD__SECURE_DISPLAY_CRC_WIN_NOTIFY;
559                 cmd.secure_display.roi_info.x_start = rect->x;
560                 cmd.secure_display.roi_info.y_start = rect->y;
561                 cmd.secure_display.roi_info.x_end = rect->x + rect->width;
562                 cmd.secure_display.roi_info.y_end = rect->y + rect->height;
563         }
564
565         dc_wake_and_execute_dmub_cmd(dmub_srv->ctx, &cmd, DM_DMUB_WAIT_TYPE_NO_WAIT);
566 }
567
568 static inline void
569 dc_stream_forward_dmcu_crc_window(struct dmcu *dmcu,
570                 struct rect *rect, struct otg_phy_mux *mux_mapping, bool is_stop)
571 {
572         if (is_stop)
573                 dmcu->funcs->stop_crc_win_update(dmcu, mux_mapping);
574         else
575                 dmcu->funcs->forward_crc_window(dmcu, rect, mux_mapping);
576 }
577
578 bool
579 dc_stream_forward_crc_window(struct dc_stream_state *stream,
580                 struct rect *rect, bool is_stop)
581 {
582         struct dmcu *dmcu;
583         struct dc_dmub_srv *dmub_srv;
584         struct otg_phy_mux mux_mapping;
585         struct pipe_ctx *pipe;
586         int i;
587         struct dc *dc = stream->ctx->dc;
588
589         for (i = 0; i < MAX_PIPES; i++) {
590                 pipe = &dc->current_state->res_ctx.pipe_ctx[i];
591                 if (pipe->stream == stream && !pipe->top_pipe && !pipe->prev_odm_pipe)
592                         break;
593         }
594
595         /* Stream not found */
596         if (i == MAX_PIPES)
597                 return false;
598
599         mux_mapping.phy_output_num = stream->link->link_enc_hw_inst;
600         mux_mapping.otg_output_num = pipe->stream_res.tg->inst;
601
602         dmcu = dc->res_pool->dmcu;
603         dmub_srv = dc->ctx->dmub_srv;
604
605         /* forward to dmub */
606         if (dmub_srv)
607                 dc_stream_forward_dmub_crc_window(dmub_srv, rect, &mux_mapping, is_stop);
608         /* forward to dmcu */
609         else if (dmcu && dmcu->funcs->is_dmcu_initialized(dmcu))
610                 dc_stream_forward_dmcu_crc_window(dmcu, rect, &mux_mapping, is_stop);
611         else
612                 return false;
613
614         return true;
615 }
616 #endif /* CONFIG_DRM_AMD_SECURE_DISPLAY */
617
618 /**
619  * dc_stream_configure_crc() - Configure CRC capture for the given stream.
620  * @dc: DC Object
621  * @stream: The stream to configure CRC on.
622  * @enable: Enable CRC if true, disable otherwise.
623  * @crc_window: CRC window (x/y start/end) information
624  * @continuous: Capture CRC on every frame if true. Otherwise, only capture
625  *              once.
626  *
627  * By default, only CRC0 is configured, and the entire frame is used to
628  * calculate the CRC.
629  *
630  * Return: %false if the stream is not found or CRC capture is not supported;
631  *         %true if the stream has been configured.
632  */
633 bool dc_stream_configure_crc(struct dc *dc, struct dc_stream_state *stream,
634                              struct crc_params *crc_window, bool enable, bool continuous)
635 {
636         struct pipe_ctx *pipe;
637         struct crc_params param;
638         struct timing_generator *tg;
639
640         pipe = resource_get_otg_master_for_stream(
641                         &dc->current_state->res_ctx, stream);
642
643         /* Stream not found */
644         if (pipe == NULL)
645                 return false;
646
647         dc_exit_ips_for_hw_access(dc);
648
649         /* By default, capture the full frame */
650         param.windowa_x_start = 0;
651         param.windowa_y_start = 0;
652         param.windowa_x_end = pipe->stream->timing.h_addressable;
653         param.windowa_y_end = pipe->stream->timing.v_addressable;
654         param.windowb_x_start = 0;
655         param.windowb_y_start = 0;
656         param.windowb_x_end = pipe->stream->timing.h_addressable;
657         param.windowb_y_end = pipe->stream->timing.v_addressable;
658
659         if (crc_window) {
660                 param.windowa_x_start = crc_window->windowa_x_start;
661                 param.windowa_y_start = crc_window->windowa_y_start;
662                 param.windowa_x_end = crc_window->windowa_x_end;
663                 param.windowa_y_end = crc_window->windowa_y_end;
664                 param.windowb_x_start = crc_window->windowb_x_start;
665                 param.windowb_y_start = crc_window->windowb_y_start;
666                 param.windowb_x_end = crc_window->windowb_x_end;
667                 param.windowb_y_end = crc_window->windowb_y_end;
668         }
669
670         param.dsc_mode = pipe->stream->timing.flags.DSC ? 1:0;
671         param.odm_mode = pipe->next_odm_pipe ? 1:0;
672
673         /* Default to the union of both windows */
674         param.selection = UNION_WINDOW_A_B;
675         param.continuous_mode = continuous;
676         param.enable = enable;
677
678         tg = pipe->stream_res.tg;
679
680         /* Only call if supported */
681         if (tg->funcs->configure_crc)
682                 return tg->funcs->configure_crc(tg, &param);
683         DC_LOG_WARNING("CRC capture not supported.");
684         return false;
685 }
686
687 /**
688  * dc_stream_get_crc() - Get CRC values for the given stream.
689  *
690  * @dc: DC object.
691  * @stream: The DC stream state of the stream to get CRCs from.
692  * @r_cr: CRC value for the red component.
693  * @g_y:  CRC value for the green component.
694  * @b_cb: CRC value for the blue component.
695  *
696  * dc_stream_configure_crc needs to be called beforehand to enable CRCs.
697  *
698  * Return:
699  * %false if stream is not found, or if CRCs are not enabled.
700  */
701 bool dc_stream_get_crc(struct dc *dc, struct dc_stream_state *stream,
702                        uint32_t *r_cr, uint32_t *g_y, uint32_t *b_cb)
703 {
704         int i;
705         struct pipe_ctx *pipe;
706         struct timing_generator *tg;
707
708         dc_exit_ips_for_hw_access(dc);
709
710         for (i = 0; i < MAX_PIPES; i++) {
711                 pipe = &dc->current_state->res_ctx.pipe_ctx[i];
712                 if (pipe->stream == stream)
713                         break;
714         }
715         /* Stream not found */
716         if (i == MAX_PIPES)
717                 return false;
718
719         tg = pipe->stream_res.tg;
720
721         if (tg->funcs->get_crc)
722                 return tg->funcs->get_crc(tg, r_cr, g_y, b_cb);
723         DC_LOG_WARNING("CRC capture not supported.");
724         return false;
725 }
726
727 void dc_stream_set_dyn_expansion(struct dc *dc, struct dc_stream_state *stream,
728                 enum dc_dynamic_expansion option)
729 {
730         /* OPP FMT dyn expansion updates*/
731         int i;
732         struct pipe_ctx *pipe_ctx;
733
734         dc_exit_ips_for_hw_access(dc);
735
736         for (i = 0; i < MAX_PIPES; i++) {
737                 if (dc->current_state->res_ctx.pipe_ctx[i].stream
738                                 == stream) {
739                         pipe_ctx = &dc->current_state->res_ctx.pipe_ctx[i];
740                         pipe_ctx->stream_res.opp->dyn_expansion = option;
741                         pipe_ctx->stream_res.opp->funcs->opp_set_dyn_expansion(
742                                         pipe_ctx->stream_res.opp,
743                                         COLOR_SPACE_YCBCR601,
744                                         stream->timing.display_color_depth,
745                                         stream->signal);
746                 }
747         }
748 }
749
750 void dc_stream_set_dither_option(struct dc_stream_state *stream,
751                 enum dc_dither_option option)
752 {
753         struct bit_depth_reduction_params params;
754         struct dc_link *link = stream->link;
755         struct pipe_ctx *pipes = NULL;
756         int i;
757
758         for (i = 0; i < MAX_PIPES; i++) {
759                 if (link->dc->current_state->res_ctx.pipe_ctx[i].stream ==
760                                 stream) {
761                         pipes = &link->dc->current_state->res_ctx.pipe_ctx[i];
762                         break;
763                 }
764         }
765
766         if (!pipes)
767                 return;
768         if (option > DITHER_OPTION_MAX)
769                 return;
770
771         dc_exit_ips_for_hw_access(stream->ctx->dc);
772
773         stream->dither_option = option;
774
775         memset(&params, 0, sizeof(params));
776         resource_build_bit_depth_reduction_params(stream, &params);
777         stream->bit_depth_params = params;
778
779         if (pipes->plane_res.xfm &&
780             pipes->plane_res.xfm->funcs->transform_set_pixel_storage_depth) {
781                 pipes->plane_res.xfm->funcs->transform_set_pixel_storage_depth(
782                         pipes->plane_res.xfm,
783                         pipes->plane_res.scl_data.lb_params.depth,
784                         &stream->bit_depth_params);
785         }
786
787         pipes->stream_res.opp->funcs->
788                 opp_program_bit_depth_reduction(pipes->stream_res.opp, &params);
789 }
790
791 bool dc_stream_set_gamut_remap(struct dc *dc, const struct dc_stream_state *stream)
792 {
793         int i;
794         bool ret = false;
795         struct pipe_ctx *pipes;
796
797         dc_exit_ips_for_hw_access(dc);
798
799         for (i = 0; i < MAX_PIPES; i++) {
800                 if (dc->current_state->res_ctx.pipe_ctx[i].stream == stream) {
801                         pipes = &dc->current_state->res_ctx.pipe_ctx[i];
802                         dc->hwss.program_gamut_remap(pipes);
803                         ret = true;
804                 }
805         }
806
807         return ret;
808 }
809
810 bool dc_stream_program_csc_matrix(struct dc *dc, struct dc_stream_state *stream)
811 {
812         int i;
813         bool ret = false;
814         struct pipe_ctx *pipes;
815
816         dc_exit_ips_for_hw_access(dc);
817
818         for (i = 0; i < MAX_PIPES; i++) {
819                 if (dc->current_state->res_ctx.pipe_ctx[i].stream
820                                 == stream) {
821
822                         pipes = &dc->current_state->res_ctx.pipe_ctx[i];
823                         dc->hwss.program_output_csc(dc,
824                                         pipes,
825                                         stream->output_color_space,
826                                         stream->csc_color_matrix.matrix,
827                                         pipes->stream_res.opp->inst);
828                         ret = true;
829                 }
830         }
831
832         return ret;
833 }
834
835 void dc_stream_set_static_screen_params(struct dc *dc,
836                 struct dc_stream_state **streams,
837                 int num_streams,
838                 const struct dc_static_screen_params *params)
839 {
840         int i, j;
841         struct pipe_ctx *pipes_affected[MAX_PIPES];
842         int num_pipes_affected = 0;
843
844         dc_exit_ips_for_hw_access(dc);
845
846         for (i = 0; i < num_streams; i++) {
847                 struct dc_stream_state *stream = streams[i];
848
849                 for (j = 0; j < MAX_PIPES; j++) {
850                         if (dc->current_state->res_ctx.pipe_ctx[j].stream
851                                         == stream) {
852                                 pipes_affected[num_pipes_affected++] =
853                                                 &dc->current_state->res_ctx.pipe_ctx[j];
854                         }
855                 }
856         }
857
858         dc->hwss.set_static_screen_control(pipes_affected, num_pipes_affected, params);
859 }
860
861 static void dc_destruct(struct dc *dc)
862 {
863         // reset link encoder assignment table on destruct
864         if (dc->res_pool && dc->res_pool->funcs->link_encs_assign)
865                 link_enc_cfg_init(dc, dc->current_state);
866
867         if (dc->current_state) {
868                 dc_state_release(dc->current_state);
869                 dc->current_state = NULL;
870         }
871
872         destroy_links(dc);
873
874         destroy_link_encoders(dc);
875
876         if (dc->clk_mgr) {
877                 dc_destroy_clk_mgr(dc->clk_mgr);
878                 dc->clk_mgr = NULL;
879         }
880
881         dc_destroy_resource_pool(dc);
882
883         if (dc->link_srv)
884                 link_destroy_link_service(&dc->link_srv);
885
886         if (dc->ctx->gpio_service)
887                 dal_gpio_service_destroy(&dc->ctx->gpio_service);
888
889         if (dc->ctx->created_bios)
890                 dal_bios_parser_destroy(&dc->ctx->dc_bios);
891
892         kfree(dc->ctx->logger);
893         dc_perf_trace_destroy(&dc->ctx->perf_trace);
894
895         kfree(dc->ctx);
896         dc->ctx = NULL;
897
898         kfree(dc->bw_vbios);
899         dc->bw_vbios = NULL;
900
901         kfree(dc->bw_dceip);
902         dc->bw_dceip = NULL;
903
904         kfree(dc->dcn_soc);
905         dc->dcn_soc = NULL;
906
907         kfree(dc->dcn_ip);
908         dc->dcn_ip = NULL;
909
910         kfree(dc->vm_helper);
911         dc->vm_helper = NULL;
912
913 }
914
915 static bool dc_construct_ctx(struct dc *dc,
916                 const struct dc_init_data *init_params)
917 {
918         struct dc_context *dc_ctx;
919
920         dc_ctx = kzalloc(sizeof(*dc_ctx), GFP_KERNEL);
921         if (!dc_ctx)
922                 return false;
923
924         dc_ctx->cgs_device = init_params->cgs_device;
925         dc_ctx->driver_context = init_params->driver;
926         dc_ctx->dc = dc;
927         dc_ctx->asic_id = init_params->asic_id;
928         dc_ctx->dc_sink_id_count = 0;
929         dc_ctx->dc_stream_id_count = 0;
930         dc_ctx->dce_environment = init_params->dce_environment;
931         dc_ctx->dcn_reg_offsets = init_params->dcn_reg_offsets;
932         dc_ctx->nbio_reg_offsets = init_params->nbio_reg_offsets;
933         dc_ctx->clk_reg_offsets = init_params->clk_reg_offsets;
934
935         /* Create logger */
936         dc_ctx->logger = kmalloc(sizeof(*dc_ctx->logger), GFP_KERNEL);
937
938         if (!dc_ctx->logger) {
939                 kfree(dc_ctx);
940                 return false;
941         }
942
943         dc_ctx->logger->dev = adev_to_drm(init_params->driver);
944         dc->dml.logger = dc_ctx->logger;
945
946         dc_ctx->dce_version = resource_parse_asic_id(init_params->asic_id);
947
948         dc_ctx->perf_trace = dc_perf_trace_create();
949         if (!dc_ctx->perf_trace) {
950                 kfree(dc_ctx);
951                 ASSERT_CRITICAL(false);
952                 return false;
953         }
954
955         dc->ctx = dc_ctx;
956
957         dc->link_srv = link_create_link_service();
958         if (!dc->link_srv)
959                 return false;
960
961         return true;
962 }
963
964 static bool dc_construct(struct dc *dc,
965                 const struct dc_init_data *init_params)
966 {
967         struct dc_context *dc_ctx;
968         struct bw_calcs_dceip *dc_dceip;
969         struct bw_calcs_vbios *dc_vbios;
970         struct dcn_soc_bounding_box *dcn_soc;
971         struct dcn_ip_params *dcn_ip;
972
973         dc->config = init_params->flags;
974
975         // Allocate memory for the vm_helper
976         dc->vm_helper = kzalloc(sizeof(struct vm_helper), GFP_KERNEL);
977         if (!dc->vm_helper) {
978                 dm_error("%s: failed to create dc->vm_helper\n", __func__);
979                 goto fail;
980         }
981
982         memcpy(&dc->bb_overrides, &init_params->bb_overrides, sizeof(dc->bb_overrides));
983
984         dc_dceip = kzalloc(sizeof(*dc_dceip), GFP_KERNEL);
985         if (!dc_dceip) {
986                 dm_error("%s: failed to create dceip\n", __func__);
987                 goto fail;
988         }
989
990         dc->bw_dceip = dc_dceip;
991
992         dc_vbios = kzalloc(sizeof(*dc_vbios), GFP_KERNEL);
993         if (!dc_vbios) {
994                 dm_error("%s: failed to create vbios\n", __func__);
995                 goto fail;
996         }
997
998         dc->bw_vbios = dc_vbios;
999         dcn_soc = kzalloc(sizeof(*dcn_soc), GFP_KERNEL);
1000         if (!dcn_soc) {
1001                 dm_error("%s: failed to create dcn_soc\n", __func__);
1002                 goto fail;
1003         }
1004
1005         dc->dcn_soc = dcn_soc;
1006
1007         dcn_ip = kzalloc(sizeof(*dcn_ip), GFP_KERNEL);
1008         if (!dcn_ip) {
1009                 dm_error("%s: failed to create dcn_ip\n", __func__);
1010                 goto fail;
1011         }
1012
1013         dc->dcn_ip = dcn_ip;
1014
1015         if (!dc_construct_ctx(dc, init_params)) {
1016                 dm_error("%s: failed to create ctx\n", __func__);
1017                 goto fail;
1018         }
1019
1020         dc_ctx = dc->ctx;
1021
1022         /* Resource should construct all asic specific resources.
1023          * This should be the only place where we need to parse the asic id
1024          */
1025         if (init_params->vbios_override)
1026                 dc_ctx->dc_bios = init_params->vbios_override;
1027         else {
1028                 /* Create BIOS parser */
1029                 struct bp_init_data bp_init_data;
1030
1031                 bp_init_data.ctx = dc_ctx;
1032                 bp_init_data.bios = init_params->asic_id.atombios_base_address;
1033
1034                 dc_ctx->dc_bios = dal_bios_parser_create(
1035                                 &bp_init_data, dc_ctx->dce_version);
1036
1037                 if (!dc_ctx->dc_bios) {
1038                         ASSERT_CRITICAL(false);
1039                         goto fail;
1040                 }
1041
1042                 dc_ctx->created_bios = true;
1043         }
1044
1045         dc->vendor_signature = init_params->vendor_signature;
1046
1047         /* Create GPIO service */
1048         dc_ctx->gpio_service = dal_gpio_service_create(
1049                         dc_ctx->dce_version,
1050                         dc_ctx->dce_environment,
1051                         dc_ctx);
1052
1053         if (!dc_ctx->gpio_service) {
1054                 ASSERT_CRITICAL(false);
1055                 goto fail;
1056         }
1057
1058         dc->res_pool = dc_create_resource_pool(dc, init_params, dc_ctx->dce_version);
1059         if (!dc->res_pool)
1060                 goto fail;
1061
1062         /* set i2c speed if not done by the respective dcnxxx__resource.c */
1063         if (dc->caps.i2c_speed_in_khz_hdcp == 0)
1064                 dc->caps.i2c_speed_in_khz_hdcp = dc->caps.i2c_speed_in_khz;
1065         if (dc->caps.max_optimizable_video_width == 0)
1066                 dc->caps.max_optimizable_video_width = 5120;
1067         dc->clk_mgr = dc_clk_mgr_create(dc->ctx, dc->res_pool->pp_smu, dc->res_pool->dccg);
1068         if (!dc->clk_mgr)
1069                 goto fail;
1070 #ifdef CONFIG_DRM_AMD_DC_FP
1071         dc->clk_mgr->force_smu_not_present = init_params->force_smu_not_present;
1072
1073         if (dc->res_pool->funcs->update_bw_bounding_box) {
1074                 DC_FP_START();
1075                 dc->res_pool->funcs->update_bw_bounding_box(dc, dc->clk_mgr->bw_params);
1076                 DC_FP_END();
1077         }
1078 #endif
1079
1080         if (!create_links(dc, init_params->num_virtual_links))
1081                 goto fail;
1082
1083         /* Create additional DIG link encoder objects if fewer than the platform
1084          * supports were created during link construction.
1085          */
1086         if (!create_link_encoders(dc))
1087                 goto fail;
1088
1089         /* Creation of current_state must occur after dc->dml
1090          * is initialized in dc_create_resource_pool because
1091          * on creation it copies the contents of dc->dml
1092          */
1093         dc->current_state = dc_state_create(dc, NULL);
1094
1095         if (!dc->current_state) {
1096                 dm_error("%s: failed to create validate ctx\n", __func__);
1097                 goto fail;
1098         }
1099
1100         return true;
1101
1102 fail:
1103         return false;
1104 }
1105
1106 static void disable_all_writeback_pipes_for_stream(
1107                 const struct dc *dc,
1108                 struct dc_stream_state *stream,
1109                 struct dc_state *context)
1110 {
1111         int i;
1112
1113         for (i = 0; i < stream->num_wb_info; i++)
1114                 stream->writeback_info[i].wb_enabled = false;
1115 }
1116
1117 static void apply_ctx_interdependent_lock(struct dc *dc,
1118                                           struct dc_state *context,
1119                                           struct dc_stream_state *stream,
1120                                           bool lock)
1121 {
1122         int i;
1123
1124         /* Checks if interdependent update function pointer is NULL or not, takes care of DCE110 case */
1125         if (dc->hwss.interdependent_update_lock)
1126                 dc->hwss.interdependent_update_lock(dc, context, lock);
1127         else {
1128                 for (i = 0; i < dc->res_pool->pipe_count; i++) {
1129                         struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i];
1130                         struct pipe_ctx *old_pipe_ctx = &dc->current_state->res_ctx.pipe_ctx[i];
1131
1132                         // Copied conditions that were previously in dce110_apply_ctx_for_surface
1133                         if (stream == pipe_ctx->stream) {
1134                                 if (resource_is_pipe_type(pipe_ctx, OPP_HEAD) &&
1135                                         (pipe_ctx->plane_state || old_pipe_ctx->plane_state))
1136                                         dc->hwss.pipe_control_lock(dc, pipe_ctx, lock);
1137                         }
1138                 }
1139         }
1140 }
1141
1142 static void dc_update_visual_confirm_color(struct dc *dc, struct dc_state *context, struct pipe_ctx *pipe_ctx)
1143 {
1144         if (dc->ctx->dce_version >= DCN_VERSION_1_0) {
1145                 memset(&pipe_ctx->visual_confirm_color, 0, sizeof(struct tg_color));
1146
1147                 if (dc->debug.visual_confirm == VISUAL_CONFIRM_HDR)
1148                         get_hdr_visual_confirm_color(pipe_ctx, &(pipe_ctx->visual_confirm_color));
1149                 else if (dc->debug.visual_confirm == VISUAL_CONFIRM_SURFACE)
1150                         get_surface_visual_confirm_color(pipe_ctx, &(pipe_ctx->visual_confirm_color));
1151                 else if (dc->debug.visual_confirm == VISUAL_CONFIRM_SWIZZLE)
1152                         get_surface_tile_visual_confirm_color(pipe_ctx, &(pipe_ctx->visual_confirm_color));
1153                 else {
1154                         if (dc->ctx->dce_version < DCN_VERSION_2_0)
1155                                 color_space_to_black_color(
1156                                         dc, pipe_ctx->stream->output_color_space, &(pipe_ctx->visual_confirm_color));
1157                 }
1158                 if (dc->ctx->dce_version >= DCN_VERSION_2_0) {
1159                         if (dc->debug.visual_confirm == VISUAL_CONFIRM_MPCTREE)
1160                                 get_mpctree_visual_confirm_color(pipe_ctx, &(pipe_ctx->visual_confirm_color));
1161                         else if (dc->debug.visual_confirm == VISUAL_CONFIRM_SUBVP)
1162                                 get_subvp_visual_confirm_color(pipe_ctx, &(pipe_ctx->visual_confirm_color));
1163                         else if (dc->debug.visual_confirm == VISUAL_CONFIRM_MCLK_SWITCH)
1164                                 get_mclk_switch_visual_confirm_color(pipe_ctx, &(pipe_ctx->visual_confirm_color));
1165                 }
1166         }
1167 }
1168
1169 static void disable_dangling_plane(struct dc *dc, struct dc_state *context)
1170 {
1171         int i, j;
1172         struct dc_state *dangling_context = dc_state_create_current_copy(dc);
1173         struct dc_state *current_ctx;
1174         struct pipe_ctx *pipe;
1175         struct timing_generator *tg;
1176
1177         if (dangling_context == NULL)
1178                 return;
1179
1180         for (i = 0; i < dc->res_pool->pipe_count; i++) {
1181                 struct dc_stream_state *old_stream =
1182                                 dc->current_state->res_ctx.pipe_ctx[i].stream;
1183                 bool should_disable = true;
1184                 bool pipe_split_change = false;
1185
1186                 if ((context->res_ctx.pipe_ctx[i].top_pipe) &&
1187                         (dc->current_state->res_ctx.pipe_ctx[i].top_pipe))
1188                         pipe_split_change = context->res_ctx.pipe_ctx[i].top_pipe->pipe_idx !=
1189                                 dc->current_state->res_ctx.pipe_ctx[i].top_pipe->pipe_idx;
1190                 else
1191                         pipe_split_change = context->res_ctx.pipe_ctx[i].top_pipe !=
1192                                 dc->current_state->res_ctx.pipe_ctx[i].top_pipe;
1193
1194                 for (j = 0; j < context->stream_count; j++) {
1195                         if (old_stream == context->streams[j]) {
1196                                 should_disable = false;
1197                                 break;
1198                         }
1199                 }
1200                 if (!should_disable && pipe_split_change &&
1201                                 dc->current_state->stream_count != context->stream_count)
1202                         should_disable = true;
1203
1204                 if (old_stream && !dc->current_state->res_ctx.pipe_ctx[i].top_pipe &&
1205                                 !dc->current_state->res_ctx.pipe_ctx[i].prev_odm_pipe) {
1206                         struct pipe_ctx *old_pipe, *new_pipe;
1207
1208                         old_pipe = &dc->current_state->res_ctx.pipe_ctx[i];
1209                         new_pipe = &context->res_ctx.pipe_ctx[i];
1210
1211                         if (old_pipe->plane_state && !new_pipe->plane_state)
1212                                 should_disable = true;
1213                 }
1214
1215                 if (should_disable && old_stream) {
1216                         bool is_phantom = dc_state_get_stream_subvp_type(dc->current_state, old_stream) == SUBVP_PHANTOM;
1217                         pipe = &dc->current_state->res_ctx.pipe_ctx[i];
1218                         tg = pipe->stream_res.tg;
1219                         /* When disabling plane for a phantom pipe, we must turn on the
1220                          * phantom OTG so the disable programming gets the double buffer
1221                          * update. Otherwise the pipe will be left in a partially disabled
1222                          * state that can result in underflow or hang when enabling it
1223                          * again for different use.
1224                          */
1225                         if (is_phantom) {
1226                                 if (tg->funcs->enable_crtc) {
1227                                         int main_pipe_width, main_pipe_height;
1228                                         struct dc_stream_state *old_paired_stream = dc_state_get_paired_subvp_stream(dc->current_state, old_stream);
1229
1230                                         main_pipe_width = old_paired_stream->dst.width;
1231                                         main_pipe_height = old_paired_stream->dst.height;
1232                                         if (dc->hwss.blank_phantom)
1233                                                 dc->hwss.blank_phantom(dc, tg, main_pipe_width, main_pipe_height);
1234                                         tg->funcs->enable_crtc(tg);
1235                                 }
1236                         }
1237
1238                         if (is_phantom)
1239                                 dc_state_rem_all_phantom_planes_for_stream(dc, old_stream, dangling_context, true);
1240                         else
1241                                 dc_state_rem_all_planes_for_stream(dc, old_stream, dangling_context);
1242                         disable_all_writeback_pipes_for_stream(dc, old_stream, dangling_context);
1243
1244                         if (pipe->stream && pipe->plane_state) {
1245                                 set_p_state_switch_method(dc, context, pipe);
1246                                 dc_update_visual_confirm_color(dc, context, pipe);
1247                         }
1248
1249                         if (dc->hwss.apply_ctx_for_surface) {
1250                                 apply_ctx_interdependent_lock(dc, dc->current_state, old_stream, true);
1251                                 dc->hwss.apply_ctx_for_surface(dc, old_stream, 0, dangling_context);
1252                                 apply_ctx_interdependent_lock(dc, dc->current_state, old_stream, false);
1253                                 dc->hwss.post_unlock_program_front_end(dc, dangling_context);
1254                         }
1255                         if (dc->hwss.program_front_end_for_ctx) {
1256                                 dc->hwss.interdependent_update_lock(dc, dc->current_state, true);
1257                                 dc->hwss.program_front_end_for_ctx(dc, dangling_context);
1258                                 dc->hwss.interdependent_update_lock(dc, dc->current_state, false);
1259                                 dc->hwss.post_unlock_program_front_end(dc, dangling_context);
1260                         }
1261                         /* We need to put the phantom OTG back into it's default (disabled) state or we
1262                          * can get corruption when transition from one SubVP config to a different one.
1263                          * The OTG is set to disable on falling edge of VUPDATE so the plane disable
1264                          * will still get it's double buffer update.
1265                          */
1266                         if (is_phantom) {
1267                                 if (tg->funcs->disable_phantom_crtc)
1268                                         tg->funcs->disable_phantom_crtc(tg);
1269                         }
1270                 }
1271         }
1272
1273         current_ctx = dc->current_state;
1274         dc->current_state = dangling_context;
1275         dc_state_release(current_ctx);
1276 }
1277
1278 static void disable_vbios_mode_if_required(
1279                 struct dc *dc,
1280                 struct dc_state *context)
1281 {
1282         unsigned int i, j;
1283
1284         /* check if timing_changed, disable stream*/
1285         for (i = 0; i < dc->res_pool->pipe_count; i++) {
1286                 struct dc_stream_state *stream = NULL;
1287                 struct dc_link *link = NULL;
1288                 struct pipe_ctx *pipe = NULL;
1289
1290                 pipe = &context->res_ctx.pipe_ctx[i];
1291                 stream = pipe->stream;
1292                 if (stream == NULL)
1293                         continue;
1294
1295                 if (stream->apply_seamless_boot_optimization)
1296                         continue;
1297
1298                 // only looking for first odm pipe
1299                 if (pipe->prev_odm_pipe)
1300                         continue;
1301
1302                 if (stream->link->local_sink &&
1303                         stream->link->local_sink->sink_signal == SIGNAL_TYPE_EDP) {
1304                         link = stream->link;
1305                 }
1306
1307                 if (link != NULL && link->link_enc->funcs->is_dig_enabled(link->link_enc)) {
1308                         unsigned int enc_inst, tg_inst = 0;
1309                         unsigned int pix_clk_100hz = 0;
1310
1311                         enc_inst = link->link_enc->funcs->get_dig_frontend(link->link_enc);
1312                         if (enc_inst != ENGINE_ID_UNKNOWN) {
1313                                 for (j = 0; j < dc->res_pool->stream_enc_count; j++) {
1314                                         if (dc->res_pool->stream_enc[j]->id == enc_inst) {
1315                                                 tg_inst = dc->res_pool->stream_enc[j]->funcs->dig_source_otg(
1316                                                         dc->res_pool->stream_enc[j]);
1317                                                 break;
1318                                         }
1319                                 }
1320
1321                                 dc->res_pool->dp_clock_source->funcs->get_pixel_clk_frequency_100hz(
1322                                         dc->res_pool->dp_clock_source,
1323                                         tg_inst, &pix_clk_100hz);
1324
1325                                 if (link->link_status.link_active) {
1326                                         uint32_t requested_pix_clk_100hz =
1327                                                 pipe->stream_res.pix_clk_params.requested_pix_clk_100hz;
1328
1329                                         if (pix_clk_100hz != requested_pix_clk_100hz) {
1330                                                 dc->link_srv->set_dpms_off(pipe);
1331                                                 pipe->stream->dpms_off = false;
1332                                         }
1333                                 }
1334                         }
1335                 }
1336         }
1337 }
1338
1339 /**
1340  * wait_for_blank_complete - wait for all active OPPs to finish pending blank
1341  * pattern updates
1342  *
1343  * @dc: [in] dc reference
1344  * @context: [in] hardware context in use
1345  */
1346 static void wait_for_blank_complete(struct dc *dc,
1347                 struct dc_state *context)
1348 {
1349         struct pipe_ctx *opp_head;
1350         struct dce_hwseq *hws = dc->hwseq;
1351         int i;
1352
1353         if (!hws->funcs.wait_for_blank_complete)
1354                 return;
1355
1356         for (i = 0; i < MAX_PIPES; i++) {
1357                 opp_head = &context->res_ctx.pipe_ctx[i];
1358
1359                 if (!resource_is_pipe_type(opp_head, OPP_HEAD) ||
1360                                 dc_state_get_pipe_subvp_type(context, opp_head) == SUBVP_PHANTOM)
1361                         continue;
1362
1363                 hws->funcs.wait_for_blank_complete(opp_head->stream_res.opp);
1364         }
1365 }
1366
1367 static void wait_for_odm_update_pending_complete(struct dc *dc, struct dc_state *context)
1368 {
1369         struct pipe_ctx *otg_master;
1370         struct timing_generator *tg;
1371         int i;
1372
1373         for (i = 0; i < MAX_PIPES; i++) {
1374                 otg_master = &context->res_ctx.pipe_ctx[i];
1375                 if (!resource_is_pipe_type(otg_master, OTG_MASTER) ||
1376                                 dc_state_get_pipe_subvp_type(context, otg_master) == SUBVP_PHANTOM)
1377                         continue;
1378                 tg = otg_master->stream_res.tg;
1379                 if (tg->funcs->wait_odm_doublebuffer_pending_clear)
1380                         tg->funcs->wait_odm_doublebuffer_pending_clear(tg);
1381         }
1382
1383         /* ODM update may require to reprogram blank pattern for each OPP */
1384         wait_for_blank_complete(dc, context);
1385 }
1386
1387 static void wait_for_no_pipes_pending(struct dc *dc, struct dc_state *context)
1388 {
1389         int i;
1390         PERF_TRACE();
1391         for (i = 0; i < MAX_PIPES; i++) {
1392                 int count = 0;
1393                 struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i];
1394
1395                 if (!pipe->plane_state || dc_state_get_pipe_subvp_type(context, pipe) == SUBVP_PHANTOM)
1396                         continue;
1397
1398                 /* Timeout 100 ms */
1399                 while (count < 100000) {
1400                         /* Must set to false to start with, due to OR in update function */
1401                         pipe->plane_state->status.is_flip_pending = false;
1402                         dc->hwss.update_pending_status(pipe);
1403                         if (!pipe->plane_state->status.is_flip_pending)
1404                                 break;
1405                         udelay(1);
1406                         count++;
1407                 }
1408                 ASSERT(!pipe->plane_state->status.is_flip_pending);
1409         }
1410         PERF_TRACE();
1411 }
1412
1413 /* Public functions */
1414
1415 struct dc *dc_create(const struct dc_init_data *init_params)
1416 {
1417         struct dc *dc = kzalloc(sizeof(*dc), GFP_KERNEL);
1418         unsigned int full_pipe_count;
1419
1420         if (!dc)
1421                 return NULL;
1422
1423         if (init_params->dce_environment == DCE_ENV_VIRTUAL_HW) {
1424                 if (!dc_construct_ctx(dc, init_params))
1425                         goto destruct_dc;
1426         } else {
1427                 if (!dc_construct(dc, init_params))
1428                         goto destruct_dc;
1429
1430                 full_pipe_count = dc->res_pool->pipe_count;
1431                 if (dc->res_pool->underlay_pipe_index != NO_UNDERLAY_PIPE)
1432                         full_pipe_count--;
1433                 dc->caps.max_streams = min(
1434                                 full_pipe_count,
1435                                 dc->res_pool->stream_enc_count);
1436
1437                 dc->caps.max_links = dc->link_count;
1438                 dc->caps.max_audios = dc->res_pool->audio_count;
1439                 dc->caps.linear_pitch_alignment = 64;
1440
1441                 dc->caps.max_dp_protocol_version = DP_VERSION_1_4;
1442
1443                 dc->caps.max_otg_num = dc->res_pool->res_cap->num_timing_generator;
1444
1445                 if (dc->res_pool->dmcu != NULL)
1446                         dc->versions.dmcu_version = dc->res_pool->dmcu->dmcu_version;
1447         }
1448
1449         dc->dcn_reg_offsets = init_params->dcn_reg_offsets;
1450         dc->nbio_reg_offsets = init_params->nbio_reg_offsets;
1451         dc->clk_reg_offsets = init_params->clk_reg_offsets;
1452
1453         /* Populate versioning information */
1454         dc->versions.dc_ver = DC_VER;
1455
1456         dc->build_id = DC_BUILD_ID;
1457
1458         DC_LOG_DC("Display Core initialized\n");
1459
1460
1461
1462         return dc;
1463
1464 destruct_dc:
1465         dc_destruct(dc);
1466         kfree(dc);
1467         return NULL;
1468 }
1469
1470 static void detect_edp_presence(struct dc *dc)
1471 {
1472         struct dc_link *edp_links[MAX_NUM_EDP];
1473         struct dc_link *edp_link = NULL;
1474         enum dc_connection_type type;
1475         int i;
1476         int edp_num;
1477
1478         dc_get_edp_links(dc, edp_links, &edp_num);
1479         if (!edp_num)
1480                 return;
1481
1482         for (i = 0; i < edp_num; i++) {
1483                 edp_link = edp_links[i];
1484                 if (dc->config.edp_not_connected) {
1485                         edp_link->edp_sink_present = false;
1486                 } else {
1487                         dc_link_detect_connection_type(edp_link, &type);
1488                         edp_link->edp_sink_present = (type != dc_connection_none);
1489                 }
1490         }
1491 }
1492
1493 void dc_hardware_init(struct dc *dc)
1494 {
1495
1496         detect_edp_presence(dc);
1497         if (dc->ctx->dce_environment != DCE_ENV_VIRTUAL_HW)
1498                 dc->hwss.init_hw(dc);
1499 }
1500
1501 void dc_init_callbacks(struct dc *dc,
1502                 const struct dc_callback_init *init_params)
1503 {
1504         dc->ctx->cp_psp = init_params->cp_psp;
1505 }
1506
1507 void dc_deinit_callbacks(struct dc *dc)
1508 {
1509         memset(&dc->ctx->cp_psp, 0, sizeof(dc->ctx->cp_psp));
1510 }
1511
1512 void dc_destroy(struct dc **dc)
1513 {
1514         dc_destruct(*dc);
1515         kfree(*dc);
1516         *dc = NULL;
1517 }
1518
1519 static void enable_timing_multisync(
1520                 struct dc *dc,
1521                 struct dc_state *ctx)
1522 {
1523         int i, multisync_count = 0;
1524         int pipe_count = dc->res_pool->pipe_count;
1525         struct pipe_ctx *multisync_pipes[MAX_PIPES] = { NULL };
1526
1527         for (i = 0; i < pipe_count; i++) {
1528                 if (!ctx->res_ctx.pipe_ctx[i].stream ||
1529                                 !ctx->res_ctx.pipe_ctx[i].stream->triggered_crtc_reset.enabled)
1530                         continue;
1531                 if (ctx->res_ctx.pipe_ctx[i].stream == ctx->res_ctx.pipe_ctx[i].stream->triggered_crtc_reset.event_source)
1532                         continue;
1533                 multisync_pipes[multisync_count] = &ctx->res_ctx.pipe_ctx[i];
1534                 multisync_count++;
1535         }
1536
1537         if (multisync_count > 0) {
1538                 dc->hwss.enable_per_frame_crtc_position_reset(
1539                         dc, multisync_count, multisync_pipes);
1540         }
1541 }
1542
1543 static void program_timing_sync(
1544                 struct dc *dc,
1545                 struct dc_state *ctx)
1546 {
1547         int i, j, k;
1548         int group_index = 0;
1549         int num_group = 0;
1550         int pipe_count = dc->res_pool->pipe_count;
1551         struct pipe_ctx *unsynced_pipes[MAX_PIPES] = { NULL };
1552
1553         for (i = 0; i < pipe_count; i++) {
1554                 if (!ctx->res_ctx.pipe_ctx[i].stream
1555                                 || ctx->res_ctx.pipe_ctx[i].top_pipe
1556                                 || ctx->res_ctx.pipe_ctx[i].prev_odm_pipe)
1557                         continue;
1558
1559                 unsynced_pipes[i] = &ctx->res_ctx.pipe_ctx[i];
1560         }
1561
1562         for (i = 0; i < pipe_count; i++) {
1563                 int group_size = 1;
1564                 enum timing_synchronization_type sync_type = NOT_SYNCHRONIZABLE;
1565                 struct pipe_ctx *pipe_set[MAX_PIPES];
1566
1567                 if (!unsynced_pipes[i])
1568                         continue;
1569
1570                 pipe_set[0] = unsynced_pipes[i];
1571                 unsynced_pipes[i] = NULL;
1572
1573                 /* Add tg to the set, search rest of the tg's for ones with
1574                  * same timing, add all tgs with same timing to the group
1575                  */
1576                 for (j = i + 1; j < pipe_count; j++) {
1577                         if (!unsynced_pipes[j])
1578                                 continue;
1579                         if (sync_type != TIMING_SYNCHRONIZABLE &&
1580                                 dc->hwss.enable_vblanks_synchronization &&
1581                                 unsynced_pipes[j]->stream_res.tg->funcs->align_vblanks &&
1582                                 resource_are_vblanks_synchronizable(
1583                                         unsynced_pipes[j]->stream,
1584                                         pipe_set[0]->stream)) {
1585                                 sync_type = VBLANK_SYNCHRONIZABLE;
1586                                 pipe_set[group_size] = unsynced_pipes[j];
1587                                 unsynced_pipes[j] = NULL;
1588                                 group_size++;
1589                         } else
1590                         if (sync_type != VBLANK_SYNCHRONIZABLE &&
1591                                 resource_are_streams_timing_synchronizable(
1592                                         unsynced_pipes[j]->stream,
1593                                         pipe_set[0]->stream)) {
1594                                 sync_type = TIMING_SYNCHRONIZABLE;
1595                                 pipe_set[group_size] = unsynced_pipes[j];
1596                                 unsynced_pipes[j] = NULL;
1597                                 group_size++;
1598                         }
1599                 }
1600
1601                 /* set first unblanked pipe as master */
1602                 for (j = 0; j < group_size; j++) {
1603                         bool is_blanked;
1604
1605                         if (pipe_set[j]->stream_res.opp->funcs->dpg_is_blanked)
1606                                 is_blanked =
1607                                         pipe_set[j]->stream_res.opp->funcs->dpg_is_blanked(pipe_set[j]->stream_res.opp);
1608                         else
1609                                 is_blanked =
1610                                         pipe_set[j]->stream_res.tg->funcs->is_blanked(pipe_set[j]->stream_res.tg);
1611                         if (!is_blanked) {
1612                                 if (j == 0)
1613                                         break;
1614
1615                                 swap(pipe_set[0], pipe_set[j]);
1616                                 break;
1617                         }
1618                 }
1619
1620                 for (k = 0; k < group_size; k++) {
1621                         struct dc_stream_status *status = dc_state_get_stream_status(ctx, pipe_set[k]->stream);
1622
1623                         status->timing_sync_info.group_id = num_group;
1624                         status->timing_sync_info.group_size = group_size;
1625                         if (k == 0)
1626                                 status->timing_sync_info.master = true;
1627                         else
1628                                 status->timing_sync_info.master = false;
1629
1630                 }
1631
1632                 /* remove any other unblanked pipes as they have already been synced */
1633                 if (dc->config.use_pipe_ctx_sync_logic) {
1634                         /* check pipe's syncd to decide which pipe to be removed */
1635                         for (j = 1; j < group_size; j++) {
1636                                 if (pipe_set[j]->pipe_idx_syncd == pipe_set[0]->pipe_idx_syncd) {
1637                                         group_size--;
1638                                         pipe_set[j] = pipe_set[group_size];
1639                                         j--;
1640                                 } else
1641                                         /* link slave pipe's syncd with master pipe */
1642                                         pipe_set[j]->pipe_idx_syncd = pipe_set[0]->pipe_idx_syncd;
1643                         }
1644                 } else {
1645                         /* remove any other pipes by checking valid plane */
1646                         for (j = j + 1; j < group_size; j++) {
1647                                 bool is_blanked;
1648
1649                                 if (pipe_set[j]->stream_res.opp->funcs->dpg_is_blanked)
1650                                         is_blanked =
1651                                                 pipe_set[j]->stream_res.opp->funcs->dpg_is_blanked(pipe_set[j]->stream_res.opp);
1652                                 else
1653                                         is_blanked =
1654                                                 pipe_set[j]->stream_res.tg->funcs->is_blanked(pipe_set[j]->stream_res.tg);
1655                                 if (!is_blanked) {
1656                                         group_size--;
1657                                         pipe_set[j] = pipe_set[group_size];
1658                                         j--;
1659                                 }
1660                         }
1661                 }
1662
1663                 if (group_size > 1) {
1664                         if (sync_type == TIMING_SYNCHRONIZABLE) {
1665                                 dc->hwss.enable_timing_synchronization(
1666                                         dc, ctx, group_index, group_size, pipe_set);
1667                         } else
1668                                 if (sync_type == VBLANK_SYNCHRONIZABLE) {
1669                                 dc->hwss.enable_vblanks_synchronization(
1670                                         dc, group_index, group_size, pipe_set);
1671                                 }
1672                         group_index++;
1673                 }
1674                 num_group++;
1675         }
1676 }
1677
1678 static bool streams_changed(struct dc *dc,
1679                             struct dc_stream_state *streams[],
1680                             uint8_t stream_count)
1681 {
1682         uint8_t i;
1683
1684         if (stream_count != dc->current_state->stream_count)
1685                 return true;
1686
1687         for (i = 0; i < dc->current_state->stream_count; i++) {
1688                 if (dc->current_state->streams[i] != streams[i])
1689                         return true;
1690                 if (!streams[i]->link->link_state_valid)
1691                         return true;
1692         }
1693
1694         return false;
1695 }
1696
1697 bool dc_validate_boot_timing(const struct dc *dc,
1698                                 const struct dc_sink *sink,
1699                                 struct dc_crtc_timing *crtc_timing)
1700 {
1701         struct timing_generator *tg;
1702         struct stream_encoder *se = NULL;
1703
1704         struct dc_crtc_timing hw_crtc_timing = {0};
1705
1706         struct dc_link *link = sink->link;
1707         unsigned int i, enc_inst, tg_inst = 0;
1708
1709         /* Support seamless boot on EDP displays only */
1710         if (sink->sink_signal != SIGNAL_TYPE_EDP) {
1711                 return false;
1712         }
1713
1714         if (dc->debug.force_odm_combine)
1715                 return false;
1716
1717         /* Check for enabled DIG to identify enabled display */
1718         if (!link->link_enc->funcs->is_dig_enabled(link->link_enc))
1719                 return false;
1720
1721         enc_inst = link->link_enc->funcs->get_dig_frontend(link->link_enc);
1722
1723         if (enc_inst == ENGINE_ID_UNKNOWN)
1724                 return false;
1725
1726         for (i = 0; i < dc->res_pool->stream_enc_count; i++) {
1727                 if (dc->res_pool->stream_enc[i]->id == enc_inst) {
1728
1729                         se = dc->res_pool->stream_enc[i];
1730
1731                         tg_inst = dc->res_pool->stream_enc[i]->funcs->dig_source_otg(
1732                                 dc->res_pool->stream_enc[i]);
1733                         break;
1734                 }
1735         }
1736
1737         // tg_inst not found
1738         if (i == dc->res_pool->stream_enc_count)
1739                 return false;
1740
1741         if (tg_inst >= dc->res_pool->timing_generator_count)
1742                 return false;
1743
1744         if (tg_inst != link->link_enc->preferred_engine)
1745                 return false;
1746
1747         tg = dc->res_pool->timing_generators[tg_inst];
1748
1749         if (!tg->funcs->get_hw_timing)
1750                 return false;
1751
1752         if (!tg->funcs->get_hw_timing(tg, &hw_crtc_timing))
1753                 return false;
1754
1755         if (crtc_timing->h_total != hw_crtc_timing.h_total)
1756                 return false;
1757
1758         if (crtc_timing->h_border_left != hw_crtc_timing.h_border_left)
1759                 return false;
1760
1761         if (crtc_timing->h_addressable != hw_crtc_timing.h_addressable)
1762                 return false;
1763
1764         if (crtc_timing->h_border_right != hw_crtc_timing.h_border_right)
1765                 return false;
1766
1767         if (crtc_timing->h_front_porch != hw_crtc_timing.h_front_porch)
1768                 return false;
1769
1770         if (crtc_timing->h_sync_width != hw_crtc_timing.h_sync_width)
1771                 return false;
1772
1773         if (crtc_timing->v_total != hw_crtc_timing.v_total)
1774                 return false;
1775
1776         if (crtc_timing->v_border_top != hw_crtc_timing.v_border_top)
1777                 return false;
1778
1779         if (crtc_timing->v_addressable != hw_crtc_timing.v_addressable)
1780                 return false;
1781
1782         if (crtc_timing->v_border_bottom != hw_crtc_timing.v_border_bottom)
1783                 return false;
1784
1785         if (crtc_timing->v_front_porch != hw_crtc_timing.v_front_porch)
1786                 return false;
1787
1788         if (crtc_timing->v_sync_width != hw_crtc_timing.v_sync_width)
1789                 return false;
1790
1791         /* block DSC for now, as VBIOS does not currently support DSC timings */
1792         if (crtc_timing->flags.DSC)
1793                 return false;
1794
1795         if (dc_is_dp_signal(link->connector_signal)) {
1796                 unsigned int pix_clk_100hz = 0;
1797                 uint32_t numOdmPipes = 1;
1798                 uint32_t id_src[4] = {0};
1799
1800                 dc->res_pool->dp_clock_source->funcs->get_pixel_clk_frequency_100hz(
1801                         dc->res_pool->dp_clock_source,
1802                         tg_inst, &pix_clk_100hz);
1803
1804                 if (tg->funcs->get_optc_source)
1805                         tg->funcs->get_optc_source(tg,
1806                                                 &numOdmPipes, &id_src[0], &id_src[1]);
1807
1808                 if (numOdmPipes == 2)
1809                         pix_clk_100hz *= 2;
1810                 if (numOdmPipes == 4)
1811                         pix_clk_100hz *= 4;
1812
1813                 // Note: In rare cases, HW pixclk may differ from crtc's pixclk
1814                 // slightly due to rounding issues in 10 kHz units.
1815                 if (crtc_timing->pix_clk_100hz != pix_clk_100hz)
1816                         return false;
1817
1818                 if (!se->funcs->dp_get_pixel_format)
1819                         return false;
1820
1821                 if (!se->funcs->dp_get_pixel_format(
1822                         se,
1823                         &hw_crtc_timing.pixel_encoding,
1824                         &hw_crtc_timing.display_color_depth))
1825                         return false;
1826
1827                 if (hw_crtc_timing.display_color_depth != crtc_timing->display_color_depth)
1828                         return false;
1829
1830                 if (hw_crtc_timing.pixel_encoding != crtc_timing->pixel_encoding)
1831                         return false;
1832         }
1833
1834         if (link->dpcd_caps.dprx_feature.bits.VSC_SDP_COLORIMETRY_SUPPORTED) {
1835                 return false;
1836         }
1837
1838         if (link->dpcd_caps.channel_coding_cap.bits.DP_128b_132b_SUPPORTED)
1839                 return false;
1840
1841         if (dc->link_srv->edp_is_ilr_optimization_required(link, crtc_timing)) {
1842                 DC_LOG_EVENT_LINK_TRAINING("Seamless boot disabled to optimize eDP link rate\n");
1843                 return false;
1844         }
1845
1846         return true;
1847 }
1848
1849 static inline bool should_update_pipe_for_stream(
1850                 struct dc_state *context,
1851                 struct pipe_ctx *pipe_ctx,
1852                 struct dc_stream_state *stream)
1853 {
1854         return (pipe_ctx->stream && pipe_ctx->stream == stream);
1855 }
1856
1857 static inline bool should_update_pipe_for_plane(
1858                 struct dc_state *context,
1859                 struct pipe_ctx *pipe_ctx,
1860                 struct dc_plane_state *plane_state)
1861 {
1862         return (pipe_ctx->plane_state == plane_state);
1863 }
1864
1865 void dc_enable_stereo(
1866         struct dc *dc,
1867         struct dc_state *context,
1868         struct dc_stream_state *streams[],
1869         uint8_t stream_count)
1870 {
1871         int i, j;
1872         struct pipe_ctx *pipe;
1873
1874         dc_exit_ips_for_hw_access(dc);
1875
1876         for (i = 0; i < MAX_PIPES; i++) {
1877                 if (context != NULL) {
1878                         pipe = &context->res_ctx.pipe_ctx[i];
1879                 } else {
1880                         context = dc->current_state;
1881                         pipe = &dc->current_state->res_ctx.pipe_ctx[i];
1882                 }
1883
1884                 for (j = 0; pipe && j < stream_count; j++)  {
1885                         if (should_update_pipe_for_stream(context, pipe, streams[j]) &&
1886                                 dc->hwss.setup_stereo)
1887                                 dc->hwss.setup_stereo(pipe, dc);
1888                 }
1889         }
1890 }
1891
1892 void dc_trigger_sync(struct dc *dc, struct dc_state *context)
1893 {
1894         if (context->stream_count > 1 && !dc->debug.disable_timing_sync) {
1895                 dc_exit_ips_for_hw_access(dc);
1896
1897                 enable_timing_multisync(dc, context);
1898                 program_timing_sync(dc, context);
1899         }
1900 }
1901
1902 static uint8_t get_stream_mask(struct dc *dc, struct dc_state *context)
1903 {
1904         int i;
1905         unsigned int stream_mask = 0;
1906
1907         for (i = 0; i < dc->res_pool->pipe_count; i++) {
1908                 if (context->res_ctx.pipe_ctx[i].stream)
1909                         stream_mask |= 1 << i;
1910         }
1911
1912         return stream_mask;
1913 }
1914
1915 void dc_z10_restore(const struct dc *dc)
1916 {
1917         if (dc->hwss.z10_restore)
1918                 dc->hwss.z10_restore(dc);
1919 }
1920
1921 void dc_z10_save_init(struct dc *dc)
1922 {
1923         if (dc->hwss.z10_save_init)
1924                 dc->hwss.z10_save_init(dc);
1925 }
1926
1927 /**
1928  * dc_commit_state_no_check - Apply context to the hardware
1929  *
1930  * @dc: DC object with the current status to be updated
1931  * @context: New state that will become the current status at the end of this function
1932  *
1933  * Applies given context to the hardware and copy it into current context.
1934  * It's up to the user to release the src context afterwards.
1935  *
1936  * Return: an enum dc_status result code for the operation
1937  */
1938 static enum dc_status dc_commit_state_no_check(struct dc *dc, struct dc_state *context)
1939 {
1940         struct dc_bios *dcb = dc->ctx->dc_bios;
1941         enum dc_status result = DC_ERROR_UNEXPECTED;
1942         struct pipe_ctx *pipe;
1943         int i, k, l;
1944         struct dc_stream_state *dc_streams[MAX_STREAMS] = {0};
1945         struct dc_state *old_state;
1946         bool subvp_prev_use = false;
1947
1948         dc_z10_restore(dc);
1949         dc_allow_idle_optimizations(dc, false);
1950
1951         for (i = 0; i < dc->res_pool->pipe_count; i++) {
1952                 struct pipe_ctx *old_pipe = &dc->current_state->res_ctx.pipe_ctx[i];
1953
1954                 /* Check old context for SubVP */
1955                 subvp_prev_use |= (dc_state_get_pipe_subvp_type(dc->current_state, old_pipe) == SUBVP_PHANTOM);
1956                 if (subvp_prev_use)
1957                         break;
1958         }
1959
1960         for (i = 0; i < context->stream_count; i++)
1961                 dc_streams[i] =  context->streams[i];
1962
1963         if (!dcb->funcs->is_accelerated_mode(dcb)) {
1964                 disable_vbios_mode_if_required(dc, context);
1965                 dc->hwss.enable_accelerated_mode(dc, context);
1966         }
1967
1968         if (context->stream_count > get_seamless_boot_stream_count(context) ||
1969                 context->stream_count == 0)
1970                 dc->hwss.prepare_bandwidth(dc, context);
1971
1972         /* When SubVP is active, all HW programming must be done while
1973          * SubVP lock is acquired
1974          */
1975         if (dc->hwss.subvp_pipe_control_lock)
1976                 dc->hwss.subvp_pipe_control_lock(dc, context, true, true, NULL, subvp_prev_use);
1977
1978         if (dc->hwss.update_dsc_pg)
1979                 dc->hwss.update_dsc_pg(dc, context, false);
1980
1981         disable_dangling_plane(dc, context);
1982         /* re-program planes for existing stream, in case we need to
1983          * free up plane resource for later use
1984          */
1985         if (dc->hwss.apply_ctx_for_surface) {
1986                 for (i = 0; i < context->stream_count; i++) {
1987                         if (context->streams[i]->mode_changed)
1988                                 continue;
1989                         apply_ctx_interdependent_lock(dc, context, context->streams[i], true);
1990                         dc->hwss.apply_ctx_for_surface(
1991                                 dc, context->streams[i],
1992                                 context->stream_status[i].plane_count,
1993                                 context); /* use new pipe config in new context */
1994                         apply_ctx_interdependent_lock(dc, context, context->streams[i], false);
1995                         dc->hwss.post_unlock_program_front_end(dc, context);
1996                 }
1997         }
1998
1999         /* Program hardware */
2000         for (i = 0; i < dc->res_pool->pipe_count; i++) {
2001                 pipe = &context->res_ctx.pipe_ctx[i];
2002                 dc->hwss.wait_for_mpcc_disconnect(dc, dc->res_pool, pipe);
2003         }
2004
2005         result = dc->hwss.apply_ctx_to_hw(dc, context);
2006
2007         if (result != DC_OK) {
2008                 /* Application of dc_state to hardware stopped. */
2009                 dc->current_state->res_ctx.link_enc_cfg_ctx.mode = LINK_ENC_CFG_STEADY;
2010                 return result;
2011         }
2012
2013         dc_trigger_sync(dc, context);
2014
2015         /* Full update should unconditionally be triggered when dc_commit_state_no_check is called */
2016         for (i = 0; i < context->stream_count; i++) {
2017                 uint32_t prev_dsc_changed = context->streams[i]->update_flags.bits.dsc_changed;
2018
2019                 context->streams[i]->update_flags.raw = 0xFFFFFFFF;
2020                 context->streams[i]->update_flags.bits.dsc_changed = prev_dsc_changed;
2021         }
2022
2023         /* Program all planes within new context*/
2024         if (dc->hwss.program_front_end_for_ctx) {
2025                 dc->hwss.interdependent_update_lock(dc, context, true);
2026                 dc->hwss.program_front_end_for_ctx(dc, context);
2027                 dc->hwss.interdependent_update_lock(dc, context, false);
2028                 dc->hwss.post_unlock_program_front_end(dc, context);
2029         }
2030
2031         if (dc->hwss.commit_subvp_config)
2032                 dc->hwss.commit_subvp_config(dc, context);
2033         if (dc->hwss.subvp_pipe_control_lock)
2034                 dc->hwss.subvp_pipe_control_lock(dc, context, false, true, NULL, subvp_prev_use);
2035
2036         for (i = 0; i < context->stream_count; i++) {
2037                 const struct dc_link *link = context->streams[i]->link;
2038
2039                 if (!context->streams[i]->mode_changed)
2040                         continue;
2041
2042                 if (dc->hwss.apply_ctx_for_surface) {
2043                         apply_ctx_interdependent_lock(dc, context, context->streams[i], true);
2044                         dc->hwss.apply_ctx_for_surface(
2045                                         dc, context->streams[i],
2046                                         context->stream_status[i].plane_count,
2047                                         context);
2048                         apply_ctx_interdependent_lock(dc, context, context->streams[i], false);
2049                         dc->hwss.post_unlock_program_front_end(dc, context);
2050                 }
2051
2052                 /*
2053                  * enable stereo
2054                  * TODO rework dc_enable_stereo call to work with validation sets?
2055                  */
2056                 for (k = 0; k < MAX_PIPES; k++) {
2057                         pipe = &context->res_ctx.pipe_ctx[k];
2058
2059                         for (l = 0 ; pipe && l < context->stream_count; l++)  {
2060                                 if (context->streams[l] &&
2061                                         context->streams[l] == pipe->stream &&
2062                                         dc->hwss.setup_stereo)
2063                                         dc->hwss.setup_stereo(pipe, dc);
2064                         }
2065                 }
2066
2067                 CONN_MSG_MODE(link, "{%dx%d, %dx%d@%dKhz}",
2068                                 context->streams[i]->timing.h_addressable,
2069                                 context->streams[i]->timing.v_addressable,
2070                                 context->streams[i]->timing.h_total,
2071                                 context->streams[i]->timing.v_total,
2072                                 context->streams[i]->timing.pix_clk_100hz / 10);
2073         }
2074
2075         dc_enable_stereo(dc, context, dc_streams, context->stream_count);
2076
2077         if (context->stream_count > get_seamless_boot_stream_count(context) ||
2078                 context->stream_count == 0) {
2079                 /* Must wait for no flips to be pending before doing optimize bw */
2080                 wait_for_no_pipes_pending(dc, context);
2081                 /*
2082                  * optimized dispclk depends on ODM setup. Need to wait for ODM
2083                  * update pending complete before optimizing bandwidth.
2084                  */
2085                 wait_for_odm_update_pending_complete(dc, context);
2086                 /* pplib is notified if disp_num changed */
2087                 dc->hwss.optimize_bandwidth(dc, context);
2088                 /* Need to do otg sync again as otg could be out of sync due to otg
2089                  * workaround applied during clock update
2090                  */
2091                 dc_trigger_sync(dc, context);
2092         }
2093
2094         if (dc->hwss.update_dsc_pg)
2095                 dc->hwss.update_dsc_pg(dc, context, true);
2096
2097         if (dc->ctx->dce_version >= DCE_VERSION_MAX)
2098                 TRACE_DCN_CLOCK_STATE(&context->bw_ctx.bw.dcn.clk);
2099         else
2100                 TRACE_DCE_CLOCK_STATE(&context->bw_ctx.bw.dce);
2101
2102         context->stream_mask = get_stream_mask(dc, context);
2103
2104         if (context->stream_mask != dc->current_state->stream_mask)
2105                 dc_dmub_srv_notify_stream_mask(dc->ctx->dmub_srv, context->stream_mask);
2106
2107         for (i = 0; i < context->stream_count; i++)
2108                 context->streams[i]->mode_changed = false;
2109
2110         /* Clear update flags that were set earlier to avoid redundant programming */
2111         for (i = 0; i < context->stream_count; i++) {
2112                 context->streams[i]->update_flags.raw = 0x0;
2113         }
2114
2115         old_state = dc->current_state;
2116         dc->current_state = context;
2117
2118         dc_state_release(old_state);
2119
2120         dc_state_retain(dc->current_state);
2121
2122         return result;
2123 }
2124
2125 static bool commit_minimal_transition_state(struct dc *dc,
2126                 struct dc_state *transition_base_context);
2127
2128 /**
2129  * dc_commit_streams - Commit current stream state
2130  *
2131  * @dc: DC object with the commit state to be configured in the hardware
2132  * @params: Parameters for the commit, including the streams to be committed
2133  *
2134  * Function responsible for commit streams change to the hardware.
2135  *
2136  * Return:
2137  * Return DC_OK if everything work as expected, otherwise, return a dc_status
2138  * code.
2139  */
2140 enum dc_status dc_commit_streams(struct dc *dc, struct dc_commit_streams_params *params)
2141 {
2142         int i, j;
2143         struct dc_state *context;
2144         enum dc_status res = DC_OK;
2145         struct dc_validation_set set[MAX_STREAMS] = {0};
2146         struct pipe_ctx *pipe;
2147         bool handle_exit_odm2to1 = false;
2148
2149         if (!params)
2150                 return DC_ERROR_UNEXPECTED;
2151
2152         if (dc->ctx->dce_environment == DCE_ENV_VIRTUAL_HW)
2153                 return res;
2154
2155         if (!streams_changed(dc, params->streams, params->stream_count) &&
2156                         dc->current_state->power_source == params->power_source)
2157                 return res;
2158
2159         dc_exit_ips_for_hw_access(dc);
2160
2161         DC_LOG_DC("%s: %d streams\n", __func__, params->stream_count);
2162
2163         for (i = 0; i < params->stream_count; i++) {
2164                 struct dc_stream_state *stream = params->streams[i];
2165                 struct dc_stream_status *status = dc_stream_get_status(stream);
2166
2167                 dc_stream_log(dc, stream);
2168
2169                 set[i].stream = stream;
2170
2171                 if (status) {
2172                         set[i].plane_count = status->plane_count;
2173                         for (j = 0; j < status->plane_count; j++)
2174                                 set[i].plane_states[j] = status->plane_states[j];
2175                 }
2176         }
2177
2178         /* ODM Combine 2:1 power optimization is only applied for single stream
2179          * scenario, it uses extra pipes than needed to reduce power consumption
2180          * We need to switch off this feature to make room for new streams.
2181          */
2182         if (params->stream_count > dc->current_state->stream_count &&
2183                         dc->current_state->stream_count == 1) {
2184                 for (i = 0; i < dc->res_pool->pipe_count; i++) {
2185                         pipe = &dc->current_state->res_ctx.pipe_ctx[i];
2186                         if (pipe->next_odm_pipe)
2187                                 handle_exit_odm2to1 = true;
2188                 }
2189         }
2190
2191         if (handle_exit_odm2to1)
2192                 res = commit_minimal_transition_state(dc, dc->current_state);
2193
2194         context = dc_state_create_current_copy(dc);
2195         if (!context)
2196                 goto context_alloc_fail;
2197
2198         context->power_source = params->power_source;
2199
2200         res = dc_validate_with_context(dc, set, params->stream_count, context, false);
2201         if (res != DC_OK) {
2202                 BREAK_TO_DEBUGGER();
2203                 goto fail;
2204         }
2205
2206         res = dc_commit_state_no_check(dc, context);
2207
2208         for (i = 0; i < params->stream_count; i++) {
2209                 for (j = 0; j < context->stream_count; j++) {
2210                         if (params->streams[i]->stream_id == context->streams[j]->stream_id)
2211                                 params->streams[i]->out.otg_offset = context->stream_status[j].primary_otg_inst;
2212
2213                         if (dc_is_embedded_signal(params->streams[i]->signal)) {
2214                                 struct dc_stream_status *status = dc_state_get_stream_status(context, params->streams[i]);
2215
2216                                 if (dc->hwss.is_abm_supported)
2217                                         status->is_abm_supported = dc->hwss.is_abm_supported(dc, context, params->streams[i]);
2218                                 else
2219                                         status->is_abm_supported = true;
2220                         }
2221                 }
2222         }
2223
2224 fail:
2225         dc_state_release(context);
2226
2227 context_alloc_fail:
2228
2229         DC_LOG_DC("%s Finished.\n", __func__);
2230
2231         return res;
2232 }
2233
2234 bool dc_acquire_release_mpc_3dlut(
2235                 struct dc *dc, bool acquire,
2236                 struct dc_stream_state *stream,
2237                 struct dc_3dlut **lut,
2238                 struct dc_transfer_func **shaper)
2239 {
2240         int pipe_idx;
2241         bool ret = false;
2242         bool found_pipe_idx = false;
2243         const struct resource_pool *pool = dc->res_pool;
2244         struct resource_context *res_ctx = &dc->current_state->res_ctx;
2245         int mpcc_id = 0;
2246
2247         if (pool && res_ctx) {
2248                 if (acquire) {
2249                         /*find pipe idx for the given stream*/
2250                         for (pipe_idx = 0; pipe_idx < pool->pipe_count; pipe_idx++) {
2251                                 if (res_ctx->pipe_ctx[pipe_idx].stream == stream) {
2252                                         found_pipe_idx = true;
2253                                         mpcc_id = res_ctx->pipe_ctx[pipe_idx].plane_res.hubp->inst;
2254                                         break;
2255                                 }
2256                         }
2257                 } else
2258                         found_pipe_idx = true;/*for release pipe_idx is not required*/
2259
2260                 if (found_pipe_idx) {
2261                         if (acquire && pool->funcs->acquire_post_bldn_3dlut)
2262                                 ret = pool->funcs->acquire_post_bldn_3dlut(res_ctx, pool, mpcc_id, lut, shaper);
2263                         else if (!acquire && pool->funcs->release_post_bldn_3dlut)
2264                                 ret = pool->funcs->release_post_bldn_3dlut(res_ctx, pool, lut, shaper);
2265                 }
2266         }
2267         return ret;
2268 }
2269
2270 static bool is_flip_pending_in_pipes(struct dc *dc, struct dc_state *context)
2271 {
2272         int i;
2273         struct pipe_ctx *pipe;
2274
2275         for (i = 0; i < MAX_PIPES; i++) {
2276                 pipe = &context->res_ctx.pipe_ctx[i];
2277
2278                 // Don't check flip pending on phantom pipes
2279                 if (!pipe->plane_state || (dc_state_get_pipe_subvp_type(context, pipe) == SUBVP_PHANTOM))
2280                         continue;
2281
2282                 /* Must set to false to start with, due to OR in update function */
2283                 pipe->plane_state->status.is_flip_pending = false;
2284                 dc->hwss.update_pending_status(pipe);
2285                 if (pipe->plane_state->status.is_flip_pending)
2286                         return true;
2287         }
2288         return false;
2289 }
2290
2291 /* Perform updates here which need to be deferred until next vupdate
2292  *
2293  * i.e. blnd lut, 3dlut, and shaper lut bypass regs are double buffered
2294  * but forcing lut memory to shutdown state is immediate. This causes
2295  * single frame corruption as lut gets disabled mid-frame unless shutdown
2296  * is deferred until after entering bypass.
2297  */
2298 static void process_deferred_updates(struct dc *dc)
2299 {
2300         int i = 0;
2301
2302         if (dc->debug.enable_mem_low_power.bits.cm) {
2303                 ASSERT(dc->dcn_ip->max_num_dpp);
2304                 for (i = 0; i < dc->dcn_ip->max_num_dpp; i++)
2305                         if (dc->res_pool->dpps[i]->funcs->dpp_deferred_update)
2306                                 dc->res_pool->dpps[i]->funcs->dpp_deferred_update(dc->res_pool->dpps[i]);
2307         }
2308 }
2309
2310 void dc_post_update_surfaces_to_stream(struct dc *dc)
2311 {
2312         int i;
2313         struct dc_state *context = dc->current_state;
2314
2315         if ((!dc->optimized_required) || get_seamless_boot_stream_count(context) > 0)
2316                 return;
2317
2318         post_surface_trace(dc);
2319
2320         /*
2321          * Only relevant for DCN behavior where we can guarantee the optimization
2322          * is safe to apply - retain the legacy behavior for DCE.
2323          */
2324
2325         if (dc->ctx->dce_version < DCE_VERSION_MAX)
2326                 TRACE_DCE_CLOCK_STATE(&context->bw_ctx.bw.dce);
2327         else {
2328                 TRACE_DCN_CLOCK_STATE(&context->bw_ctx.bw.dcn.clk);
2329
2330                 if (is_flip_pending_in_pipes(dc, context))
2331                         return;
2332
2333                 for (i = 0; i < dc->res_pool->pipe_count; i++)
2334                         if (context->res_ctx.pipe_ctx[i].stream == NULL ||
2335                                         context->res_ctx.pipe_ctx[i].plane_state == NULL) {
2336                                 context->res_ctx.pipe_ctx[i].pipe_idx = i;
2337                                 dc->hwss.disable_plane(dc, context, &context->res_ctx.pipe_ctx[i]);
2338                         }
2339
2340                 process_deferred_updates(dc);
2341
2342                 dc->hwss.optimize_bandwidth(dc, context);
2343
2344                 if (dc->hwss.update_dsc_pg)
2345                         dc->hwss.update_dsc_pg(dc, context, true);
2346         }
2347
2348         dc->optimized_required = false;
2349         dc->wm_optimized_required = false;
2350 }
2351
2352 bool dc_set_generic_gpio_for_stereo(bool enable,
2353                 struct gpio_service *gpio_service)
2354 {
2355         enum gpio_result gpio_result = GPIO_RESULT_NON_SPECIFIC_ERROR;
2356         struct gpio_pin_info pin_info;
2357         struct gpio *generic;
2358         struct gpio_generic_mux_config *config = kzalloc(sizeof(struct gpio_generic_mux_config),
2359                            GFP_KERNEL);
2360
2361         if (!config)
2362                 return false;
2363         pin_info = dal_gpio_get_generic_pin_info(gpio_service, GPIO_ID_GENERIC, 0);
2364
2365         if (pin_info.mask == 0xFFFFFFFF || pin_info.offset == 0xFFFFFFFF) {
2366                 kfree(config);
2367                 return false;
2368         } else {
2369                 generic = dal_gpio_service_create_generic_mux(
2370                         gpio_service,
2371                         pin_info.offset,
2372                         pin_info.mask);
2373         }
2374
2375         if (!generic) {
2376                 kfree(config);
2377                 return false;
2378         }
2379
2380         gpio_result = dal_gpio_open(generic, GPIO_MODE_OUTPUT);
2381
2382         config->enable_output_from_mux = enable;
2383         config->mux_select = GPIO_SIGNAL_SOURCE_PASS_THROUGH_STEREO_SYNC;
2384
2385         if (gpio_result == GPIO_RESULT_OK)
2386                 gpio_result = dal_mux_setup_config(generic, config);
2387
2388         if (gpio_result == GPIO_RESULT_OK) {
2389                 dal_gpio_close(generic);
2390                 dal_gpio_destroy_generic_mux(&generic);
2391                 kfree(config);
2392                 return true;
2393         } else {
2394                 dal_gpio_close(generic);
2395                 dal_gpio_destroy_generic_mux(&generic);
2396                 kfree(config);
2397                 return false;
2398         }
2399 }
2400
2401 static bool is_surface_in_context(
2402                 const struct dc_state *context,
2403                 const struct dc_plane_state *plane_state)
2404 {
2405         int j;
2406
2407         for (j = 0; j < MAX_PIPES; j++) {
2408                 const struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[j];
2409
2410                 if (plane_state == pipe_ctx->plane_state) {
2411                         return true;
2412                 }
2413         }
2414
2415         return false;
2416 }
2417
2418 static enum surface_update_type get_plane_info_update_type(const struct dc_surface_update *u)
2419 {
2420         union surface_update_flags *update_flags = &u->surface->update_flags;
2421         enum surface_update_type update_type = UPDATE_TYPE_FAST;
2422
2423         if (!u->plane_info)
2424                 return UPDATE_TYPE_FAST;
2425
2426         if (u->plane_info->color_space != u->surface->color_space) {
2427                 update_flags->bits.color_space_change = 1;
2428                 elevate_update_type(&update_type, UPDATE_TYPE_MED);
2429         }
2430
2431         if (u->plane_info->horizontal_mirror != u->surface->horizontal_mirror) {
2432                 update_flags->bits.horizontal_mirror_change = 1;
2433                 elevate_update_type(&update_type, UPDATE_TYPE_MED);
2434         }
2435
2436         if (u->plane_info->rotation != u->surface->rotation) {
2437                 update_flags->bits.rotation_change = 1;
2438                 elevate_update_type(&update_type, UPDATE_TYPE_FULL);
2439         }
2440
2441         if (u->plane_info->format != u->surface->format) {
2442                 update_flags->bits.pixel_format_change = 1;
2443                 elevate_update_type(&update_type, UPDATE_TYPE_FULL);
2444         }
2445
2446         if (u->plane_info->stereo_format != u->surface->stereo_format) {
2447                 update_flags->bits.stereo_format_change = 1;
2448                 elevate_update_type(&update_type, UPDATE_TYPE_FULL);
2449         }
2450
2451         if (u->plane_info->per_pixel_alpha != u->surface->per_pixel_alpha) {
2452                 update_flags->bits.per_pixel_alpha_change = 1;
2453                 elevate_update_type(&update_type, UPDATE_TYPE_MED);
2454         }
2455
2456         if (u->plane_info->global_alpha_value != u->surface->global_alpha_value) {
2457                 update_flags->bits.global_alpha_change = 1;
2458                 elevate_update_type(&update_type, UPDATE_TYPE_MED);
2459         }
2460
2461         if (u->plane_info->dcc.enable != u->surface->dcc.enable
2462                         || u->plane_info->dcc.dcc_ind_blk != u->surface->dcc.dcc_ind_blk
2463                         || u->plane_info->dcc.meta_pitch != u->surface->dcc.meta_pitch) {
2464                 /* During DCC on/off, stutter period is calculated before
2465                  * DCC has fully transitioned. This results in incorrect
2466                  * stutter period calculation. Triggering a full update will
2467                  * recalculate stutter period.
2468                  */
2469                 update_flags->bits.dcc_change = 1;
2470                 elevate_update_type(&update_type, UPDATE_TYPE_FULL);
2471         }
2472
2473         if (resource_pixel_format_to_bpp(u->plane_info->format) !=
2474                         resource_pixel_format_to_bpp(u->surface->format)) {
2475                 /* different bytes per element will require full bandwidth
2476                  * and DML calculation
2477                  */
2478                 update_flags->bits.bpp_change = 1;
2479                 elevate_update_type(&update_type, UPDATE_TYPE_FULL);
2480         }
2481
2482         if (u->plane_info->plane_size.surface_pitch != u->surface->plane_size.surface_pitch
2483                         || u->plane_info->plane_size.chroma_pitch != u->surface->plane_size.chroma_pitch) {
2484                 update_flags->bits.plane_size_change = 1;
2485                 elevate_update_type(&update_type, UPDATE_TYPE_MED);
2486         }
2487
2488
2489         if (memcmp(&u->plane_info->tiling_info, &u->surface->tiling_info,
2490                         sizeof(union dc_tiling_info)) != 0) {
2491                 update_flags->bits.swizzle_change = 1;
2492                 elevate_update_type(&update_type, UPDATE_TYPE_MED);
2493
2494                 /* todo: below are HW dependent, we should add a hook to
2495                  * DCE/N resource and validated there.
2496                  */
2497                 if (u->plane_info->tiling_info.gfx9.swizzle != DC_SW_LINEAR) {
2498                         /* swizzled mode requires RQ to be setup properly,
2499                          * thus need to run DML to calculate RQ settings
2500                          */
2501                         update_flags->bits.bandwidth_change = 1;
2502                         elevate_update_type(&update_type, UPDATE_TYPE_FULL);
2503                 }
2504         }
2505
2506         /* This should be UPDATE_TYPE_FAST if nothing has changed. */
2507         return update_type;
2508 }
2509
2510 static enum surface_update_type get_scaling_info_update_type(
2511                 const struct dc *dc,
2512                 const struct dc_surface_update *u)
2513 {
2514         union surface_update_flags *update_flags = &u->surface->update_flags;
2515
2516         if (!u->scaling_info)
2517                 return UPDATE_TYPE_FAST;
2518
2519         if (u->scaling_info->dst_rect.width != u->surface->dst_rect.width
2520                         || u->scaling_info->dst_rect.height != u->surface->dst_rect.height
2521                         || u->scaling_info->scaling_quality.integer_scaling !=
2522                                 u->surface->scaling_quality.integer_scaling
2523                         ) {
2524                 update_flags->bits.scaling_change = 1;
2525
2526                 if ((u->scaling_info->dst_rect.width < u->surface->dst_rect.width
2527                         || u->scaling_info->dst_rect.height < u->surface->dst_rect.height)
2528                                 && (u->scaling_info->dst_rect.width < u->surface->src_rect.width
2529                                         || u->scaling_info->dst_rect.height < u->surface->src_rect.height))
2530                         /* Making dst rect smaller requires a bandwidth change */
2531                         update_flags->bits.bandwidth_change = 1;
2532         }
2533
2534         if (u->scaling_info->src_rect.width != u->surface->src_rect.width
2535                 || u->scaling_info->src_rect.height != u->surface->src_rect.height) {
2536
2537                 update_flags->bits.scaling_change = 1;
2538                 if (u->scaling_info->src_rect.width > u->surface->src_rect.width
2539                                 || u->scaling_info->src_rect.height > u->surface->src_rect.height)
2540                         /* Making src rect bigger requires a bandwidth change */
2541                         update_flags->bits.clock_change = 1;
2542         }
2543
2544         if (u->scaling_info->src_rect.width > dc->caps.max_optimizable_video_width &&
2545                 (u->scaling_info->clip_rect.width > u->surface->clip_rect.width ||
2546                  u->scaling_info->clip_rect.height > u->surface->clip_rect.height))
2547                  /* Changing clip size of a large surface may result in MPC slice count change */
2548                 update_flags->bits.bandwidth_change = 1;
2549
2550         if (u->scaling_info->clip_rect.width != u->surface->clip_rect.width ||
2551                         u->scaling_info->clip_rect.height != u->surface->clip_rect.height)
2552                 update_flags->bits.clip_size_change = 1;
2553
2554         if (u->scaling_info->src_rect.x != u->surface->src_rect.x
2555                         || u->scaling_info->src_rect.y != u->surface->src_rect.y
2556                         || u->scaling_info->clip_rect.x != u->surface->clip_rect.x
2557                         || u->scaling_info->clip_rect.y != u->surface->clip_rect.y
2558                         || u->scaling_info->dst_rect.x != u->surface->dst_rect.x
2559                         || u->scaling_info->dst_rect.y != u->surface->dst_rect.y)
2560                 update_flags->bits.position_change = 1;
2561
2562         if (update_flags->bits.clock_change
2563                         || update_flags->bits.bandwidth_change
2564                         || update_flags->bits.scaling_change)
2565                 return UPDATE_TYPE_FULL;
2566
2567         if (update_flags->bits.position_change ||
2568                         update_flags->bits.clip_size_change)
2569                 return UPDATE_TYPE_MED;
2570
2571         return UPDATE_TYPE_FAST;
2572 }
2573
2574 static enum surface_update_type det_surface_update(const struct dc *dc,
2575                 const struct dc_surface_update *u)
2576 {
2577         const struct dc_state *context = dc->current_state;
2578         enum surface_update_type type;
2579         enum surface_update_type overall_type = UPDATE_TYPE_FAST;
2580         union surface_update_flags *update_flags = &u->surface->update_flags;
2581
2582         if (!is_surface_in_context(context, u->surface) || u->surface->force_full_update) {
2583                 update_flags->raw = 0xFFFFFFFF;
2584                 return UPDATE_TYPE_FULL;
2585         }
2586
2587         update_flags->raw = 0; // Reset all flags
2588
2589         type = get_plane_info_update_type(u);
2590         elevate_update_type(&overall_type, type);
2591
2592         type = get_scaling_info_update_type(dc, u);
2593         elevate_update_type(&overall_type, type);
2594
2595         if (u->flip_addr) {
2596                 update_flags->bits.addr_update = 1;
2597                 if (u->flip_addr->address.tmz_surface != u->surface->address.tmz_surface) {
2598                         update_flags->bits.tmz_changed = 1;
2599                         elevate_update_type(&overall_type, UPDATE_TYPE_FULL);
2600                 }
2601         }
2602         if (u->in_transfer_func)
2603                 update_flags->bits.in_transfer_func_change = 1;
2604
2605         if (u->input_csc_color_matrix)
2606                 update_flags->bits.input_csc_change = 1;
2607
2608         if (u->coeff_reduction_factor)
2609                 update_flags->bits.coeff_reduction_change = 1;
2610
2611         if (u->gamut_remap_matrix)
2612                 update_flags->bits.gamut_remap_change = 1;
2613
2614         if (u->blend_tf)
2615                 update_flags->bits.gamma_change = 1;
2616
2617         if (u->gamma) {
2618                 enum surface_pixel_format format = SURFACE_PIXEL_FORMAT_GRPH_BEGIN;
2619
2620                 if (u->plane_info)
2621                         format = u->plane_info->format;
2622                 else if (u->surface)
2623                         format = u->surface->format;
2624
2625                 if (dce_use_lut(format))
2626                         update_flags->bits.gamma_change = 1;
2627         }
2628
2629         if (u->lut3d_func || u->func_shaper)
2630                 update_flags->bits.lut_3d = 1;
2631
2632         if (u->hdr_mult.value)
2633                 if (u->hdr_mult.value != u->surface->hdr_mult.value) {
2634                         update_flags->bits.hdr_mult = 1;
2635                         elevate_update_type(&overall_type, UPDATE_TYPE_MED);
2636                 }
2637
2638         if (update_flags->bits.in_transfer_func_change) {
2639                 type = UPDATE_TYPE_MED;
2640                 elevate_update_type(&overall_type, type);
2641         }
2642
2643         if (update_flags->bits.lut_3d) {
2644                 type = UPDATE_TYPE_FULL;
2645                 elevate_update_type(&overall_type, type);
2646         }
2647
2648         if (dc->debug.enable_legacy_fast_update &&
2649                         (update_flags->bits.gamma_change ||
2650                         update_flags->bits.gamut_remap_change ||
2651                         update_flags->bits.input_csc_change ||
2652                         update_flags->bits.coeff_reduction_change)) {
2653                 type = UPDATE_TYPE_FULL;
2654                 elevate_update_type(&overall_type, type);
2655         }
2656         return overall_type;
2657 }
2658
2659 static enum surface_update_type check_update_surfaces_for_stream(
2660                 struct dc *dc,
2661                 struct dc_surface_update *updates,
2662                 int surface_count,
2663                 struct dc_stream_update *stream_update,
2664                 const struct dc_stream_status *stream_status)
2665 {
2666         int i;
2667         enum surface_update_type overall_type = UPDATE_TYPE_FAST;
2668
2669         if (dc->idle_optimizations_allowed)
2670                 overall_type = UPDATE_TYPE_FULL;
2671
2672         if (stream_status == NULL || stream_status->plane_count != surface_count)
2673                 overall_type = UPDATE_TYPE_FULL;
2674
2675         if (stream_update && stream_update->pending_test_pattern) {
2676                 overall_type = UPDATE_TYPE_FULL;
2677         }
2678
2679         /* some stream updates require passive update */
2680         if (stream_update) {
2681                 union stream_update_flags *su_flags = &stream_update->stream->update_flags;
2682
2683                 if ((stream_update->src.height != 0 && stream_update->src.width != 0) ||
2684                         (stream_update->dst.height != 0 && stream_update->dst.width != 0) ||
2685                         stream_update->integer_scaling_update)
2686                         su_flags->bits.scaling = 1;
2687
2688                 if (dc->debug.enable_legacy_fast_update && stream_update->out_transfer_func)
2689                         su_flags->bits.out_tf = 1;
2690
2691                 if (stream_update->abm_level)
2692                         su_flags->bits.abm_level = 1;
2693
2694                 if (stream_update->dpms_off)
2695                         su_flags->bits.dpms_off = 1;
2696
2697                 if (stream_update->gamut_remap)
2698                         su_flags->bits.gamut_remap = 1;
2699
2700                 if (stream_update->wb_update)
2701                         su_flags->bits.wb_update = 1;
2702
2703                 if (stream_update->dsc_config)
2704                         su_flags->bits.dsc_changed = 1;
2705
2706                 if (stream_update->mst_bw_update)
2707                         su_flags->bits.mst_bw = 1;
2708
2709                 if (stream_update->stream && stream_update->stream->freesync_on_desktop &&
2710                         (stream_update->vrr_infopacket || stream_update->allow_freesync ||
2711                                 stream_update->vrr_active_variable || stream_update->vrr_active_fixed))
2712                         su_flags->bits.fams_changed = 1;
2713
2714                 if (su_flags->raw != 0)
2715                         overall_type = UPDATE_TYPE_FULL;
2716
2717                 if (stream_update->output_csc_transform || stream_update->output_color_space)
2718                         su_flags->bits.out_csc = 1;
2719
2720                 /* Output transfer function changes do not require bandwidth recalculation,
2721                  * so don't trigger a full update
2722                  */
2723                 if (!dc->debug.enable_legacy_fast_update && stream_update->out_transfer_func)
2724                         su_flags->bits.out_tf = 1;
2725         }
2726
2727         for (i = 0 ; i < surface_count; i++) {
2728                 enum surface_update_type type =
2729                                 det_surface_update(dc, &updates[i]);
2730
2731                 elevate_update_type(&overall_type, type);
2732         }
2733
2734         return overall_type;
2735 }
2736
2737 /*
2738  * dc_check_update_surfaces_for_stream() - Determine update type (fast, med, or full)
2739  *
2740  * See :c:type:`enum surface_update_type <surface_update_type>` for explanation of update types
2741  */
2742 enum surface_update_type dc_check_update_surfaces_for_stream(
2743                 struct dc *dc,
2744                 struct dc_surface_update *updates,
2745                 int surface_count,
2746                 struct dc_stream_update *stream_update,
2747                 const struct dc_stream_status *stream_status)
2748 {
2749         int i;
2750         enum surface_update_type type;
2751
2752         if (stream_update)
2753                 stream_update->stream->update_flags.raw = 0;
2754         for (i = 0; i < surface_count; i++)
2755                 updates[i].surface->update_flags.raw = 0;
2756
2757         type = check_update_surfaces_for_stream(dc, updates, surface_count, stream_update, stream_status);
2758         if (type == UPDATE_TYPE_FULL) {
2759                 if (stream_update) {
2760                         uint32_t dsc_changed = stream_update->stream->update_flags.bits.dsc_changed;
2761                         stream_update->stream->update_flags.raw = 0xFFFFFFFF;
2762                         stream_update->stream->update_flags.bits.dsc_changed = dsc_changed;
2763                 }
2764                 for (i = 0; i < surface_count; i++)
2765                         updates[i].surface->update_flags.raw = 0xFFFFFFFF;
2766         }
2767
2768         if (type == UPDATE_TYPE_FAST) {
2769                 // If there's an available clock comparator, we use that.
2770                 if (dc->clk_mgr->funcs->are_clock_states_equal) {
2771                         if (!dc->clk_mgr->funcs->are_clock_states_equal(&dc->clk_mgr->clks, &dc->current_state->bw_ctx.bw.dcn.clk))
2772                                 dc->optimized_required = true;
2773                 // Else we fallback to mem compare.
2774                 } else if (memcmp(&dc->current_state->bw_ctx.bw.dcn.clk, &dc->clk_mgr->clks, offsetof(struct dc_clocks, prev_p_state_change_support)) != 0) {
2775                         dc->optimized_required = true;
2776                 }
2777
2778                 dc->optimized_required |= dc->wm_optimized_required;
2779         }
2780
2781         return type;
2782 }
2783
2784 static struct dc_stream_status *stream_get_status(
2785         struct dc_state *ctx,
2786         struct dc_stream_state *stream)
2787 {
2788         uint8_t i;
2789
2790         for (i = 0; i < ctx->stream_count; i++) {
2791                 if (stream == ctx->streams[i]) {
2792                         return &ctx->stream_status[i];
2793                 }
2794         }
2795
2796         return NULL;
2797 }
2798
2799 static const enum surface_update_type update_surface_trace_level = UPDATE_TYPE_FULL;
2800
2801 static void copy_surface_update_to_plane(
2802                 struct dc_plane_state *surface,
2803                 struct dc_surface_update *srf_update)
2804 {
2805         if (srf_update->flip_addr) {
2806                 surface->address = srf_update->flip_addr->address;
2807                 surface->flip_immediate =
2808                         srf_update->flip_addr->flip_immediate;
2809                 surface->time.time_elapsed_in_us[surface->time.index] =
2810                         srf_update->flip_addr->flip_timestamp_in_us -
2811                                 surface->time.prev_update_time_in_us;
2812                 surface->time.prev_update_time_in_us =
2813                         srf_update->flip_addr->flip_timestamp_in_us;
2814                 surface->time.index++;
2815                 if (surface->time.index >= DC_PLANE_UPDATE_TIMES_MAX)
2816                         surface->time.index = 0;
2817
2818                 surface->triplebuffer_flips = srf_update->flip_addr->triplebuffer_flips;
2819         }
2820
2821         if (srf_update->scaling_info) {
2822                 surface->scaling_quality =
2823                                 srf_update->scaling_info->scaling_quality;
2824                 surface->dst_rect =
2825                                 srf_update->scaling_info->dst_rect;
2826                 surface->src_rect =
2827                                 srf_update->scaling_info->src_rect;
2828                 surface->clip_rect =
2829                                 srf_update->scaling_info->clip_rect;
2830         }
2831
2832         if (srf_update->plane_info) {
2833                 surface->color_space =
2834                                 srf_update->plane_info->color_space;
2835                 surface->format =
2836                                 srf_update->plane_info->format;
2837                 surface->plane_size =
2838                                 srf_update->plane_info->plane_size;
2839                 surface->rotation =
2840                                 srf_update->plane_info->rotation;
2841                 surface->horizontal_mirror =
2842                                 srf_update->plane_info->horizontal_mirror;
2843                 surface->stereo_format =
2844                                 srf_update->plane_info->stereo_format;
2845                 surface->tiling_info =
2846                                 srf_update->plane_info->tiling_info;
2847                 surface->visible =
2848                                 srf_update->plane_info->visible;
2849                 surface->per_pixel_alpha =
2850                                 srf_update->plane_info->per_pixel_alpha;
2851                 surface->global_alpha =
2852                                 srf_update->plane_info->global_alpha;
2853                 surface->global_alpha_value =
2854                                 srf_update->plane_info->global_alpha_value;
2855                 surface->dcc =
2856                                 srf_update->plane_info->dcc;
2857                 surface->layer_index =
2858                                 srf_update->plane_info->layer_index;
2859         }
2860
2861         if (srf_update->gamma) {
2862                 memcpy(&surface->gamma_correction.entries,
2863                         &srf_update->gamma->entries,
2864                         sizeof(struct dc_gamma_entries));
2865                 surface->gamma_correction.is_identity =
2866                         srf_update->gamma->is_identity;
2867                 surface->gamma_correction.num_entries =
2868                         srf_update->gamma->num_entries;
2869                 surface->gamma_correction.type =
2870                         srf_update->gamma->type;
2871         }
2872
2873         if (srf_update->in_transfer_func) {
2874                 surface->in_transfer_func.sdr_ref_white_level =
2875                         srf_update->in_transfer_func->sdr_ref_white_level;
2876                 surface->in_transfer_func.tf =
2877                         srf_update->in_transfer_func->tf;
2878                 surface->in_transfer_func.type =
2879                         srf_update->in_transfer_func->type;
2880                 memcpy(&surface->in_transfer_func.tf_pts,
2881                         &srf_update->in_transfer_func->tf_pts,
2882                         sizeof(struct dc_transfer_func_distributed_points));
2883         }
2884
2885         if (srf_update->func_shaper)
2886                 memcpy(&surface->in_shaper_func, srf_update->func_shaper,
2887                 sizeof(surface->in_shaper_func));
2888
2889         if (srf_update->lut3d_func)
2890                 memcpy(&surface->lut3d_func, srf_update->lut3d_func,
2891                 sizeof(surface->lut3d_func));
2892
2893         if (srf_update->hdr_mult.value)
2894                 surface->hdr_mult =
2895                                 srf_update->hdr_mult;
2896
2897         if (srf_update->blend_tf)
2898                 memcpy(&surface->blend_tf, srf_update->blend_tf,
2899                 sizeof(surface->blend_tf));
2900
2901         if (srf_update->input_csc_color_matrix)
2902                 surface->input_csc_color_matrix =
2903                         *srf_update->input_csc_color_matrix;
2904
2905         if (srf_update->coeff_reduction_factor)
2906                 surface->coeff_reduction_factor =
2907                         *srf_update->coeff_reduction_factor;
2908
2909         if (srf_update->gamut_remap_matrix)
2910                 surface->gamut_remap_matrix =
2911                         *srf_update->gamut_remap_matrix;
2912 }
2913
2914 static void copy_stream_update_to_stream(struct dc *dc,
2915                                          struct dc_state *context,
2916                                          struct dc_stream_state *stream,
2917                                          struct dc_stream_update *update)
2918 {
2919         struct dc_context *dc_ctx = dc->ctx;
2920
2921         if (update == NULL || stream == NULL)
2922                 return;
2923
2924         if (update->src.height && update->src.width)
2925                 stream->src = update->src;
2926
2927         if (update->dst.height && update->dst.width)
2928                 stream->dst = update->dst;
2929
2930         if (update->out_transfer_func) {
2931                 stream->out_transfer_func.sdr_ref_white_level =
2932                         update->out_transfer_func->sdr_ref_white_level;
2933                 stream->out_transfer_func.tf = update->out_transfer_func->tf;
2934                 stream->out_transfer_func.type =
2935                         update->out_transfer_func->type;
2936                 memcpy(&stream->out_transfer_func.tf_pts,
2937                        &update->out_transfer_func->tf_pts,
2938                        sizeof(struct dc_transfer_func_distributed_points));
2939         }
2940
2941         if (update->hdr_static_metadata)
2942                 stream->hdr_static_metadata = *update->hdr_static_metadata;
2943
2944         if (update->abm_level)
2945                 stream->abm_level = *update->abm_level;
2946
2947         if (update->periodic_interrupt)
2948                 stream->periodic_interrupt = *update->periodic_interrupt;
2949
2950         if (update->gamut_remap)
2951                 stream->gamut_remap_matrix = *update->gamut_remap;
2952
2953         /* Note: this being updated after mode set is currently not a use case
2954          * however if it arises OCSC would need to be reprogrammed at the
2955          * minimum
2956          */
2957         if (update->output_color_space)
2958                 stream->output_color_space = *update->output_color_space;
2959
2960         if (update->output_csc_transform)
2961                 stream->csc_color_matrix = *update->output_csc_transform;
2962
2963         if (update->vrr_infopacket)
2964                 stream->vrr_infopacket = *update->vrr_infopacket;
2965
2966         if (update->allow_freesync)
2967                 stream->allow_freesync = *update->allow_freesync;
2968
2969         if (update->vrr_active_variable)
2970                 stream->vrr_active_variable = *update->vrr_active_variable;
2971
2972         if (update->vrr_active_fixed)
2973                 stream->vrr_active_fixed = *update->vrr_active_fixed;
2974
2975         if (update->crtc_timing_adjust)
2976                 stream->adjust = *update->crtc_timing_adjust;
2977
2978         if (update->dpms_off)
2979                 stream->dpms_off = *update->dpms_off;
2980
2981         if (update->hfvsif_infopacket)
2982                 stream->hfvsif_infopacket = *update->hfvsif_infopacket;
2983
2984         if (update->vtem_infopacket)
2985                 stream->vtem_infopacket = *update->vtem_infopacket;
2986
2987         if (update->vsc_infopacket)
2988                 stream->vsc_infopacket = *update->vsc_infopacket;
2989
2990         if (update->vsp_infopacket)
2991                 stream->vsp_infopacket = *update->vsp_infopacket;
2992
2993         if (update->adaptive_sync_infopacket)
2994                 stream->adaptive_sync_infopacket = *update->adaptive_sync_infopacket;
2995
2996         if (update->dither_option)
2997                 stream->dither_option = *update->dither_option;
2998
2999         if (update->pending_test_pattern)
3000                 stream->test_pattern = *update->pending_test_pattern;
3001         /* update current stream with writeback info */
3002         if (update->wb_update) {
3003                 int i;
3004
3005                 stream->num_wb_info = update->wb_update->num_wb_info;
3006                 ASSERT(stream->num_wb_info <= MAX_DWB_PIPES);
3007                 for (i = 0; i < stream->num_wb_info; i++)
3008                         stream->writeback_info[i] =
3009                                 update->wb_update->writeback_info[i];
3010         }
3011         if (update->dsc_config) {
3012                 struct dc_dsc_config old_dsc_cfg = stream->timing.dsc_cfg;
3013                 uint32_t old_dsc_enabled = stream->timing.flags.DSC;
3014                 uint32_t enable_dsc = (update->dsc_config->num_slices_h != 0 &&
3015                                        update->dsc_config->num_slices_v != 0);
3016
3017                 /* Use temporarry context for validating new DSC config */
3018                 struct dc_state *dsc_validate_context = dc_state_create_copy(dc->current_state);
3019
3020                 if (dsc_validate_context) {
3021                         stream->timing.dsc_cfg = *update->dsc_config;
3022                         stream->timing.flags.DSC = enable_dsc;
3023                         if (!dc->res_pool->funcs->validate_bandwidth(dc, dsc_validate_context, true)) {
3024                                 stream->timing.dsc_cfg = old_dsc_cfg;
3025                                 stream->timing.flags.DSC = old_dsc_enabled;
3026                                 update->dsc_config = NULL;
3027                         }
3028
3029                         dc_state_release(dsc_validate_context);
3030                 } else {
3031                         DC_ERROR("Failed to allocate new validate context for DSC change\n");
3032                         update->dsc_config = NULL;
3033                 }
3034         }
3035 }
3036
3037 static void backup_planes_and_stream_state(
3038                 struct dc_scratch_space *scratch,
3039                 struct dc_stream_state *stream)
3040 {
3041         int i;
3042         struct dc_stream_status *status = dc_stream_get_status(stream);
3043
3044         if (!status)
3045                 return;
3046
3047         for (i = 0; i < status->plane_count; i++) {
3048                 scratch->plane_states[i] = *status->plane_states[i];
3049         }
3050         scratch->stream_state = *stream;
3051 }
3052
3053 static void restore_planes_and_stream_state(
3054                 struct dc_scratch_space *scratch,
3055                 struct dc_stream_state *stream)
3056 {
3057         int i;
3058         struct dc_stream_status *status = dc_stream_get_status(stream);
3059
3060         if (!status)
3061                 return;
3062
3063         for (i = 0; i < status->plane_count; i++) {
3064                 *status->plane_states[i] = scratch->plane_states[i];
3065         }
3066         *stream = scratch->stream_state;
3067 }
3068
3069 /**
3070  * update_seamless_boot_flags() - Helper function for updating seamless boot flags
3071  *
3072  * @dc: Current DC state
3073  * @context: New DC state to be programmed
3074  * @surface_count: Number of surfaces that have an updated
3075  * @stream: Corresponding stream to be updated in the current flip
3076  *
3077  * Updating seamless boot flags do not need to be part of the commit sequence. This
3078  * helper function will update the seamless boot flags on each flip (if required)
3079  * outside of the HW commit sequence (fast or slow).
3080  *
3081  * Return: void
3082  */
3083 static void update_seamless_boot_flags(struct dc *dc,
3084                 struct dc_state *context,
3085                 int surface_count,
3086                 struct dc_stream_state *stream)
3087 {
3088         if (get_seamless_boot_stream_count(context) > 0 && surface_count > 0) {
3089                 /* Optimize seamless boot flag keeps clocks and watermarks high until
3090                  * first flip. After first flip, optimization is required to lower
3091                  * bandwidth. Important to note that it is expected UEFI will
3092                  * only light up a single display on POST, therefore we only expect
3093                  * one stream with seamless boot flag set.
3094                  */
3095                 if (stream->apply_seamless_boot_optimization) {
3096                         stream->apply_seamless_boot_optimization = false;
3097
3098                         if (get_seamless_boot_stream_count(context) == 0)
3099                                 dc->optimized_required = true;
3100                 }
3101         }
3102 }
3103
3104 /**
3105  * update_planes_and_stream_state() - The function takes planes and stream
3106  * updates as inputs and determines the appropriate update type. If update type
3107  * is FULL, the function allocates a new context, populates and validates it.
3108  * Otherwise, it updates current dc context. The function will return both
3109  * new_context and new_update_type back to the caller. The function also backs
3110  * up both current and new contexts into corresponding dc state scratch memory.
3111  * TODO: The function does too many things, and even conditionally allocates dc
3112  * context memory implicitly. We should consider to break it down.
3113  *
3114  * @dc: Current DC state
3115  * @srf_updates: an array of surface updates
3116  * @surface_count: surface update count
3117  * @stream: Corresponding stream to be updated
3118  * @stream_update: stream update
3119  * @new_update_type: [out] determined update type by the function
3120  * @new_context: [out] new context allocated and validated if update type is
3121  * FULL, reference to current context if update type is less than FULL.
3122  *
3123  * Return: true if a valid update is populated into new_context, false
3124  * otherwise.
3125  */
3126 static bool update_planes_and_stream_state(struct dc *dc,
3127                 struct dc_surface_update *srf_updates, int surface_count,
3128                 struct dc_stream_state *stream,
3129                 struct dc_stream_update *stream_update,
3130                 enum surface_update_type *new_update_type,
3131                 struct dc_state **new_context)
3132 {
3133         struct dc_state *context;
3134         int i, j;
3135         enum surface_update_type update_type;
3136         const struct dc_stream_status *stream_status;
3137         struct dc_context *dc_ctx = dc->ctx;
3138
3139         stream_status = dc_stream_get_status(stream);
3140
3141         if (!stream_status) {
3142                 if (surface_count) /* Only an error condition if surf_count non-zero*/
3143                         ASSERT(false);
3144
3145                 return false; /* Cannot commit surface to stream that is not committed */
3146         }
3147
3148         context = dc->current_state;
3149         update_type = dc_check_update_surfaces_for_stream(
3150                         dc, srf_updates, surface_count, stream_update, stream_status);
3151         if (update_type == UPDATE_TYPE_FULL)
3152                 backup_planes_and_stream_state(&dc->scratch.current_state, stream);
3153
3154         /* update current stream with the new updates */
3155         copy_stream_update_to_stream(dc, context, stream, stream_update);
3156
3157         /* do not perform surface update if surface has invalid dimensions
3158          * (all zero) and no scaling_info is provided
3159          */
3160         if (surface_count > 0) {
3161                 for (i = 0; i < surface_count; i++) {
3162                         if ((srf_updates[i].surface->src_rect.width == 0 ||
3163                                  srf_updates[i].surface->src_rect.height == 0 ||
3164                                  srf_updates[i].surface->dst_rect.width == 0 ||
3165                                  srf_updates[i].surface->dst_rect.height == 0) &&
3166                                 (!srf_updates[i].scaling_info ||
3167                                   srf_updates[i].scaling_info->src_rect.width == 0 ||
3168                                   srf_updates[i].scaling_info->src_rect.height == 0 ||
3169                                   srf_updates[i].scaling_info->dst_rect.width == 0 ||
3170                                   srf_updates[i].scaling_info->dst_rect.height == 0)) {
3171                                 DC_ERROR("Invalid src/dst rects in surface update!\n");
3172                                 return false;
3173                         }
3174                 }
3175         }
3176
3177         if (update_type >= update_surface_trace_level)
3178                 update_surface_trace(dc, srf_updates, surface_count);
3179
3180         for (i = 0; i < surface_count; i++)
3181                 copy_surface_update_to_plane(srf_updates[i].surface, &srf_updates[i]);
3182
3183         if (update_type >= UPDATE_TYPE_FULL) {
3184                 struct dc_plane_state *new_planes[MAX_SURFACES] = {0};
3185
3186                 for (i = 0; i < surface_count; i++)
3187                         new_planes[i] = srf_updates[i].surface;
3188
3189                 /* initialize scratch memory for building context */
3190                 context = dc_state_create_copy(dc->current_state);
3191                 if (context == NULL) {
3192                         DC_ERROR("Failed to allocate new validate context!\n");
3193                         return false;
3194                 }
3195
3196                 /* For each full update, remove all existing phantom pipes first.
3197                  * Ensures that we have enough pipes for newly added MPO planes
3198                  */
3199                 dc_state_remove_phantom_streams_and_planes(dc, context);
3200                 dc_state_release_phantom_streams_and_planes(dc, context);
3201
3202                 /*remove old surfaces from context */
3203                 if (!dc_state_rem_all_planes_for_stream(dc, stream, context)) {
3204
3205                         BREAK_TO_DEBUGGER();
3206                         goto fail;
3207                 }
3208
3209                 /* add surface to context */
3210                 if (!dc_state_add_all_planes_for_stream(dc, stream, new_planes, surface_count, context)) {
3211
3212                         BREAK_TO_DEBUGGER();
3213                         goto fail;
3214                 }
3215         }
3216
3217         /* save update parameters into surface */
3218         for (i = 0; i < surface_count; i++) {
3219                 struct dc_plane_state *surface = srf_updates[i].surface;
3220
3221                 if (update_type != UPDATE_TYPE_MED)
3222                         continue;
3223                 if (surface->update_flags.bits.clip_size_change ||
3224                                 surface->update_flags.bits.position_change) {
3225                         for (j = 0; j < dc->res_pool->pipe_count; j++) {
3226                                 struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[j];
3227
3228                                 if (pipe_ctx->plane_state != surface)
3229                                         continue;
3230
3231                                 resource_build_scaling_params(pipe_ctx);
3232                         }
3233                 }
3234         }
3235
3236         if (update_type == UPDATE_TYPE_FULL) {
3237                 if (!dc->res_pool->funcs->validate_bandwidth(dc, context, false)) {
3238                         BREAK_TO_DEBUGGER();
3239                         goto fail;
3240                 }
3241         }
3242         update_seamless_boot_flags(dc, context, surface_count, stream);
3243
3244         *new_context = context;
3245         *new_update_type = update_type;
3246         if (update_type == UPDATE_TYPE_FULL)
3247                 backup_planes_and_stream_state(&dc->scratch.new_state, stream);
3248
3249         return true;
3250
3251 fail:
3252         dc_state_release(context);
3253
3254         return false;
3255
3256 }
3257
3258 static void commit_planes_do_stream_update(struct dc *dc,
3259                 struct dc_stream_state *stream,
3260                 struct dc_stream_update *stream_update,
3261                 enum surface_update_type update_type,
3262                 struct dc_state *context)
3263 {
3264         int j;
3265
3266         // Stream updates
3267         for (j = 0; j < dc->res_pool->pipe_count; j++) {
3268                 struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[j];
3269
3270                 if (resource_is_pipe_type(pipe_ctx, OTG_MASTER) && pipe_ctx->stream == stream) {
3271
3272                         if (stream_update->periodic_interrupt && dc->hwss.setup_periodic_interrupt)
3273                                 dc->hwss.setup_periodic_interrupt(dc, pipe_ctx);
3274
3275                         if ((stream_update->hdr_static_metadata && !stream->use_dynamic_meta) ||
3276                                         stream_update->vrr_infopacket ||
3277                                         stream_update->vsc_infopacket ||
3278                                         stream_update->vsp_infopacket ||
3279                                         stream_update->hfvsif_infopacket ||
3280                                         stream_update->adaptive_sync_infopacket ||
3281                                         stream_update->vtem_infopacket) {
3282                                 resource_build_info_frame(pipe_ctx);
3283                                 dc->hwss.update_info_frame(pipe_ctx);
3284
3285                                 if (dc_is_dp_signal(pipe_ctx->stream->signal))
3286                                         dc->link_srv->dp_trace_source_sequence(
3287                                                         pipe_ctx->stream->link,
3288                                                         DPCD_SOURCE_SEQ_AFTER_UPDATE_INFO_FRAME);
3289                         }
3290
3291                         if (stream_update->hdr_static_metadata &&
3292                                         stream->use_dynamic_meta &&
3293                                         dc->hwss.set_dmdata_attributes &&
3294                                         pipe_ctx->stream->dmdata_address.quad_part != 0)
3295                                 dc->hwss.set_dmdata_attributes(pipe_ctx);
3296
3297                         if (stream_update->gamut_remap)
3298                                 dc_stream_set_gamut_remap(dc, stream);
3299
3300                         if (stream_update->output_csc_transform)
3301                                 dc_stream_program_csc_matrix(dc, stream);
3302
3303                         if (stream_update->dither_option) {
3304                                 struct pipe_ctx *odm_pipe = pipe_ctx->next_odm_pipe;
3305                                 resource_build_bit_depth_reduction_params(pipe_ctx->stream,
3306                                                                         &pipe_ctx->stream->bit_depth_params);
3307                                 pipe_ctx->stream_res.opp->funcs->opp_program_fmt(pipe_ctx->stream_res.opp,
3308                                                 &stream->bit_depth_params,
3309                                                 &stream->clamping);
3310                                 while (odm_pipe) {
3311                                         odm_pipe->stream_res.opp->funcs->opp_program_fmt(odm_pipe->stream_res.opp,
3312                                                         &stream->bit_depth_params,
3313                                                         &stream->clamping);
3314                                         odm_pipe = odm_pipe->next_odm_pipe;
3315                                 }
3316                         }
3317
3318
3319                         /* Full fe update*/
3320                         if (update_type == UPDATE_TYPE_FAST)
3321                                 continue;
3322
3323                         if (stream_update->dsc_config)
3324                                 dc->link_srv->update_dsc_config(pipe_ctx);
3325
3326                         if (stream_update->mst_bw_update) {
3327                                 if (stream_update->mst_bw_update->is_increase)
3328                                         dc->link_srv->increase_mst_payload(pipe_ctx,
3329                                                         stream_update->mst_bw_update->mst_stream_bw);
3330                                 else
3331                                         dc->link_srv->reduce_mst_payload(pipe_ctx,
3332                                                         stream_update->mst_bw_update->mst_stream_bw);
3333                         }
3334
3335                         if (stream_update->pending_test_pattern) {
3336                                 /*
3337                                  * test pattern params depends on ODM topology
3338                                  * changes that we could be applying to front
3339                                  * end. Since at the current stage front end
3340                                  * changes are not yet applied. We can only
3341                                  * apply test pattern in hw based on current
3342                                  * state and populate the final test pattern
3343                                  * params in new state. If current and new test
3344                                  * pattern params are different as result of
3345                                  * different ODM topology being used, it will be
3346                                  * detected and handle during front end
3347                                  * programming update.
3348                                  */
3349                                 dc->link_srv->dp_set_test_pattern(stream->link,
3350                                         stream->test_pattern.type,
3351                                         stream->test_pattern.color_space,
3352                                         stream->test_pattern.p_link_settings,
3353                                         stream->test_pattern.p_custom_pattern,
3354                                         stream->test_pattern.cust_pattern_size);
3355                                 resource_build_test_pattern_params(&context->res_ctx, pipe_ctx);
3356                         }
3357
3358                         if (stream_update->dpms_off) {
3359                                 if (*stream_update->dpms_off) {
3360                                         dc->link_srv->set_dpms_off(pipe_ctx);
3361                                         /* for dpms, keep acquired resources*/
3362                                         if (pipe_ctx->stream_res.audio && !dc->debug.az_endpoint_mute_only)
3363                                                 pipe_ctx->stream_res.audio->funcs->az_disable(pipe_ctx->stream_res.audio);
3364
3365                                         dc->optimized_required = true;
3366
3367                                 } else {
3368                                         if (get_seamless_boot_stream_count(context) == 0)
3369                                                 dc->hwss.prepare_bandwidth(dc, dc->current_state);
3370                                         dc->link_srv->set_dpms_on(dc->current_state, pipe_ctx);
3371                                 }
3372                         } else if (pipe_ctx->stream->link->wa_flags.blank_stream_on_ocs_change && stream_update->output_color_space
3373                                         && !stream->dpms_off && dc_is_dp_signal(pipe_ctx->stream->signal)) {
3374                                 /*
3375                                  * Workaround for firmware issue in some receivers where they don't pick up
3376                                  * correct output color space unless DP link is disabled/re-enabled
3377                                  */
3378                                 dc->link_srv->set_dpms_on(dc->current_state, pipe_ctx);
3379                         }
3380
3381                         if (stream_update->abm_level && pipe_ctx->stream_res.abm) {
3382                                 bool should_program_abm = true;
3383
3384                                 // if otg funcs defined check if blanked before programming
3385                                 if (pipe_ctx->stream_res.tg->funcs->is_blanked)
3386                                         if (pipe_ctx->stream_res.tg->funcs->is_blanked(pipe_ctx->stream_res.tg))
3387                                                 should_program_abm = false;
3388
3389                                 if (should_program_abm) {
3390                                         if (*stream_update->abm_level == ABM_LEVEL_IMMEDIATE_DISABLE) {
3391                                                 dc->hwss.set_abm_immediate_disable(pipe_ctx);
3392                                         } else {
3393                                                 pipe_ctx->stream_res.abm->funcs->set_abm_level(
3394                                                         pipe_ctx->stream_res.abm, stream->abm_level);
3395                                         }
3396                                 }
3397                         }
3398                 }
3399         }
3400 }
3401
3402 static bool dc_dmub_should_send_dirty_rect_cmd(struct dc *dc, struct dc_stream_state *stream)
3403 {
3404         if ((stream->link->psr_settings.psr_version == DC_PSR_VERSION_SU_1
3405                         || stream->link->psr_settings.psr_version == DC_PSR_VERSION_1)
3406                         && stream->ctx->dce_version >= DCN_VERSION_3_1)
3407                 return true;
3408
3409         if (stream->link->replay_settings.config.replay_supported)
3410                 return true;
3411
3412         if (stream->ctx->dce_version >= DCN_VERSION_3_5 && stream->abm_level)
3413                 return true;
3414
3415         return false;
3416 }
3417
3418 void dc_dmub_update_dirty_rect(struct dc *dc,
3419                                int surface_count,
3420                                struct dc_stream_state *stream,
3421                                struct dc_surface_update *srf_updates,
3422                                struct dc_state *context)
3423 {
3424         union dmub_rb_cmd cmd;
3425         struct dmub_cmd_update_dirty_rect_data *update_dirty_rect;
3426         unsigned int i, j;
3427         unsigned int panel_inst = 0;
3428
3429         if (!dc_dmub_should_send_dirty_rect_cmd(dc, stream))
3430                 return;
3431
3432         if (!dc_get_edp_link_panel_inst(dc, stream->link, &panel_inst))
3433                 return;
3434
3435         memset(&cmd, 0x0, sizeof(cmd));
3436         cmd.update_dirty_rect.header.type = DMUB_CMD__UPDATE_DIRTY_RECT;
3437         cmd.update_dirty_rect.header.sub_type = 0;
3438         cmd.update_dirty_rect.header.payload_bytes =
3439                 sizeof(cmd.update_dirty_rect) -
3440                 sizeof(cmd.update_dirty_rect.header);
3441         update_dirty_rect = &cmd.update_dirty_rect.update_dirty_rect_data;
3442         for (i = 0; i < surface_count; i++) {
3443                 struct dc_plane_state *plane_state = srf_updates[i].surface;
3444                 const struct dc_flip_addrs *flip_addr = srf_updates[i].flip_addr;
3445
3446                 if (!srf_updates[i].surface || !flip_addr)
3447                         continue;
3448                 /* Do not send in immediate flip mode */
3449                 if (srf_updates[i].surface->flip_immediate)
3450                         continue;
3451
3452                 update_dirty_rect->cmd_version = DMUB_CMD_PSR_CONTROL_VERSION_1;
3453                 update_dirty_rect->dirty_rect_count = flip_addr->dirty_rect_count;
3454                 memcpy(update_dirty_rect->src_dirty_rects, flip_addr->dirty_rects,
3455                                 sizeof(flip_addr->dirty_rects));
3456                 for (j = 0; j < dc->res_pool->pipe_count; j++) {
3457                         struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[j];
3458
3459                         if (pipe_ctx->stream != stream)
3460                                 continue;
3461                         if (pipe_ctx->plane_state != plane_state)
3462                                 continue;
3463
3464                         update_dirty_rect->panel_inst = panel_inst;
3465                         update_dirty_rect->pipe_idx = j;
3466                         dc_wake_and_execute_dmub_cmd(dc->ctx, &cmd, DM_DMUB_WAIT_TYPE_NO_WAIT);
3467                 }
3468         }
3469 }
3470
3471 static void build_dmub_update_dirty_rect(
3472                 struct dc *dc,
3473                 int surface_count,
3474                 struct dc_stream_state *stream,
3475                 struct dc_surface_update *srf_updates,
3476                 struct dc_state *context,
3477                 struct dc_dmub_cmd dc_dmub_cmd[],
3478                 unsigned int *dmub_cmd_count)
3479 {
3480         union dmub_rb_cmd cmd;
3481         struct dmub_cmd_update_dirty_rect_data *update_dirty_rect;
3482         unsigned int i, j;
3483         unsigned int panel_inst = 0;
3484
3485         if (!dc_dmub_should_send_dirty_rect_cmd(dc, stream))
3486                 return;
3487
3488         if (!dc_get_edp_link_panel_inst(dc, stream->link, &panel_inst))
3489                 return;
3490
3491         memset(&cmd, 0x0, sizeof(cmd));
3492         cmd.update_dirty_rect.header.type = DMUB_CMD__UPDATE_DIRTY_RECT;
3493         cmd.update_dirty_rect.header.sub_type = 0;
3494         cmd.update_dirty_rect.header.payload_bytes =
3495                 sizeof(cmd.update_dirty_rect) -
3496                 sizeof(cmd.update_dirty_rect.header);
3497         update_dirty_rect = &cmd.update_dirty_rect.update_dirty_rect_data;
3498         for (i = 0; i < surface_count; i++) {
3499                 struct dc_plane_state *plane_state = srf_updates[i].surface;
3500                 const struct dc_flip_addrs *flip_addr = srf_updates[i].flip_addr;
3501
3502                 if (!srf_updates[i].surface || !flip_addr)
3503                         continue;
3504                 /* Do not send in immediate flip mode */
3505                 if (srf_updates[i].surface->flip_immediate)
3506                         continue;
3507                 update_dirty_rect->cmd_version = DMUB_CMD_PSR_CONTROL_VERSION_1;
3508                 update_dirty_rect->dirty_rect_count = flip_addr->dirty_rect_count;
3509                 memcpy(update_dirty_rect->src_dirty_rects, flip_addr->dirty_rects,
3510                                 sizeof(flip_addr->dirty_rects));
3511                 for (j = 0; j < dc->res_pool->pipe_count; j++) {
3512                         struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[j];
3513
3514                         if (pipe_ctx->stream != stream)
3515                                 continue;
3516                         if (pipe_ctx->plane_state != plane_state)
3517                                 continue;
3518                         update_dirty_rect->panel_inst = panel_inst;
3519                         update_dirty_rect->pipe_idx = j;
3520                         dc_dmub_cmd[*dmub_cmd_count].dmub_cmd = cmd;
3521                         dc_dmub_cmd[*dmub_cmd_count].wait_type = DM_DMUB_WAIT_TYPE_NO_WAIT;
3522                         (*dmub_cmd_count)++;
3523                 }
3524         }
3525 }
3526
3527
3528 /**
3529  * build_dmub_cmd_list() - Build an array of DMCUB commands to be sent to DMCUB
3530  *
3531  * @dc: Current DC state
3532  * @srf_updates: Array of surface updates
3533  * @surface_count: Number of surfaces that have an updated
3534  * @stream: Corresponding stream to be updated in the current flip
3535  * @context: New DC state to be programmed
3536  *
3537  * @dc_dmub_cmd: Array of DMCUB commands to be sent to DMCUB
3538  * @dmub_cmd_count: Count indicating the number of DMCUB commands in dc_dmub_cmd array
3539  *
3540  * This function builds an array of DMCUB commands to be sent to DMCUB. This function is required
3541  * to build an array of commands and have them sent while the OTG lock is acquired.
3542  *
3543  * Return: void
3544  */
3545 static void build_dmub_cmd_list(struct dc *dc,
3546                 struct dc_surface_update *srf_updates,
3547                 int surface_count,
3548                 struct dc_stream_state *stream,
3549                 struct dc_state *context,
3550                 struct dc_dmub_cmd dc_dmub_cmd[],
3551                 unsigned int *dmub_cmd_count)
3552 {
3553         // Initialize cmd count to 0
3554         *dmub_cmd_count = 0;
3555         build_dmub_update_dirty_rect(dc, surface_count, stream, srf_updates, context, dc_dmub_cmd, dmub_cmd_count);
3556 }
3557
3558 static void commit_planes_for_stream_fast(struct dc *dc,
3559                 struct dc_surface_update *srf_updates,
3560                 int surface_count,
3561                 struct dc_stream_state *stream,
3562                 struct dc_stream_update *stream_update,
3563                 enum surface_update_type update_type,
3564                 struct dc_state *context)
3565 {
3566         int i, j;
3567         struct pipe_ctx *top_pipe_to_program = NULL;
3568         struct dc_stream_status *stream_status = NULL;
3569
3570         dc_exit_ips_for_hw_access(dc);
3571
3572         dc_z10_restore(dc);
3573
3574         top_pipe_to_program = resource_get_otg_master_for_stream(
3575                         &context->res_ctx,
3576                         stream);
3577
3578         if (!top_pipe_to_program)
3579                 return;
3580
3581         for (i = 0; i < dc->res_pool->pipe_count; i++) {
3582                 struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i];
3583
3584                 if (pipe->stream && pipe->plane_state) {
3585                         set_p_state_switch_method(dc, context, pipe);
3586
3587                         if (dc->debug.visual_confirm)
3588                                 dc_update_visual_confirm_color(dc, context, pipe);
3589                 }
3590         }
3591
3592         for (i = 0; i < surface_count; i++) {
3593                 struct dc_plane_state *plane_state = srf_updates[i].surface;
3594                 /*set logical flag for lock/unlock use*/
3595                 for (j = 0; j < dc->res_pool->pipe_count; j++) {
3596                         struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[j];
3597
3598                         if (!pipe_ctx->plane_state)
3599                                 continue;
3600                         if (should_update_pipe_for_plane(context, pipe_ctx, plane_state))
3601                                 continue;
3602                         pipe_ctx->plane_state->triplebuffer_flips = false;
3603                         if (update_type == UPDATE_TYPE_FAST &&
3604                             dc->hwss.program_triplebuffer &&
3605                             !pipe_ctx->plane_state->flip_immediate && dc->debug.enable_tri_buf) {
3606                                 /*triple buffer for VUpdate  only*/
3607                                 pipe_ctx->plane_state->triplebuffer_flips = true;
3608                         }
3609                 }
3610         }
3611
3612         stream_status = dc_state_get_stream_status(context, stream);
3613
3614         build_dmub_cmd_list(dc,
3615                         srf_updates,
3616                         surface_count,
3617                         stream,
3618                         context,
3619                         context->dc_dmub_cmd,
3620                         &(context->dmub_cmd_count));
3621         hwss_build_fast_sequence(dc,
3622                         context->dc_dmub_cmd,
3623                         context->dmub_cmd_count,
3624                         context->block_sequence,
3625                         &(context->block_sequence_steps),
3626                         top_pipe_to_program,
3627                         stream_status,
3628                         context);
3629         hwss_execute_sequence(dc,
3630                         context->block_sequence,
3631                         context->block_sequence_steps);
3632         /* Clear update flags so next flip doesn't have redundant programming
3633          * (if there's no stream update, the update flags are not cleared).
3634          * Surface updates are cleared unconditionally at the beginning of each flip,
3635          * so no need to clear here.
3636          */
3637         if (top_pipe_to_program->stream)
3638                 top_pipe_to_program->stream->update_flags.raw = 0;
3639 }
3640
3641 static void wait_for_outstanding_hw_updates(struct dc *dc, struct dc_state *dc_context)
3642 {
3643 /*
3644  * This function calls HWSS to wait for any potentially double buffered
3645  * operations to complete. It should be invoked as a pre-amble prior
3646  * to full update programming before asserting any HW locks.
3647  */
3648         int pipe_idx;
3649         int opp_inst;
3650         int opp_count = dc->res_pool->res_cap->num_opp;
3651         struct hubp *hubp;
3652         int mpcc_inst;
3653         const struct pipe_ctx *pipe_ctx;
3654
3655         for (pipe_idx = 0; pipe_idx < dc->res_pool->pipe_count; pipe_idx++) {
3656                 pipe_ctx = &dc_context->res_ctx.pipe_ctx[pipe_idx];
3657
3658                 if (!pipe_ctx->stream)
3659                         continue;
3660
3661                 if (pipe_ctx->stream_res.tg->funcs->wait_drr_doublebuffer_pending_clear)
3662                         pipe_ctx->stream_res.tg->funcs->wait_drr_doublebuffer_pending_clear(pipe_ctx->stream_res.tg);
3663
3664                 hubp = pipe_ctx->plane_res.hubp;
3665                 if (!hubp)
3666                         continue;
3667
3668                 mpcc_inst = hubp->inst;
3669                 // MPCC inst is equal to pipe index in practice
3670                 for (opp_inst = 0; opp_inst < opp_count; opp_inst++) {
3671                         if ((dc->res_pool->opps[opp_inst] != NULL) &&
3672                                 (dc->res_pool->opps[opp_inst]->mpcc_disconnect_pending[mpcc_inst])) {
3673                                 dc->res_pool->mpc->funcs->wait_for_idle(dc->res_pool->mpc, mpcc_inst);
3674                                 dc->res_pool->opps[opp_inst]->mpcc_disconnect_pending[mpcc_inst] = false;
3675                                 break;
3676                         }
3677                 }
3678         }
3679         wait_for_odm_update_pending_complete(dc, dc_context);
3680 }
3681
3682 static void commit_planes_for_stream(struct dc *dc,
3683                 struct dc_surface_update *srf_updates,
3684                 int surface_count,
3685                 struct dc_stream_state *stream,
3686                 struct dc_stream_update *stream_update,
3687                 enum surface_update_type update_type,
3688                 struct dc_state *context)
3689 {
3690         int i, j;
3691         struct pipe_ctx *top_pipe_to_program = NULL;
3692         bool should_lock_all_pipes = (update_type != UPDATE_TYPE_FAST);
3693         bool subvp_prev_use = false;
3694         bool subvp_curr_use = false;
3695         uint8_t current_stream_mask = 0;
3696
3697         // Once we apply the new subvp context to hardware it won't be in the
3698         // dc->current_state anymore, so we have to cache it before we apply
3699         // the new SubVP context
3700         subvp_prev_use = false;
3701         dc_exit_ips_for_hw_access(dc);
3702
3703         dc_z10_restore(dc);
3704         if (update_type == UPDATE_TYPE_FULL)
3705                 wait_for_outstanding_hw_updates(dc, context);
3706
3707         for (i = 0; i < dc->res_pool->pipe_count; i++) {
3708                 struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i];
3709
3710                 if (pipe->stream && pipe->plane_state) {
3711                         set_p_state_switch_method(dc, context, pipe);
3712
3713                         if (dc->debug.visual_confirm)
3714                                 dc_update_visual_confirm_color(dc, context, pipe);
3715                 }
3716         }
3717
3718         if (update_type == UPDATE_TYPE_FULL) {
3719                 dc_allow_idle_optimizations(dc, false);
3720
3721                 if (get_seamless_boot_stream_count(context) == 0)
3722                         dc->hwss.prepare_bandwidth(dc, context);
3723
3724                 if (dc->hwss.update_dsc_pg)
3725                         dc->hwss.update_dsc_pg(dc, context, false);
3726
3727                 context_clock_trace(dc, context);
3728         }
3729
3730         top_pipe_to_program = resource_get_otg_master_for_stream(
3731                                 &context->res_ctx,
3732                                 stream);
3733         ASSERT(top_pipe_to_program != NULL);
3734         for (i = 0; i < dc->res_pool->pipe_count; i++) {
3735                 struct pipe_ctx *old_pipe = &dc->current_state->res_ctx.pipe_ctx[i];
3736
3737                 // Check old context for SubVP
3738                 subvp_prev_use |= (dc_state_get_pipe_subvp_type(dc->current_state, old_pipe) == SUBVP_PHANTOM);
3739                 if (subvp_prev_use)
3740                         break;
3741         }
3742
3743         for (i = 0; i < dc->res_pool->pipe_count; i++) {
3744                 struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i];
3745
3746                 if (dc_state_get_pipe_subvp_type(context, pipe) == SUBVP_PHANTOM) {
3747                         subvp_curr_use = true;
3748                         break;
3749                 }
3750         }
3751
3752         if (stream->test_pattern.type != DP_TEST_PATTERN_VIDEO_MODE) {
3753                 struct pipe_ctx *mpcc_pipe;
3754                 struct pipe_ctx *odm_pipe;
3755
3756                 for (mpcc_pipe = top_pipe_to_program; mpcc_pipe; mpcc_pipe = mpcc_pipe->bottom_pipe)
3757                         for (odm_pipe = mpcc_pipe; odm_pipe; odm_pipe = odm_pipe->next_odm_pipe)
3758                                 odm_pipe->ttu_regs.min_ttu_vblank = MAX_TTU;
3759         }
3760
3761         if ((update_type != UPDATE_TYPE_FAST) && stream->update_flags.bits.dsc_changed)
3762                 if (top_pipe_to_program &&
3763                         top_pipe_to_program->stream_res.tg->funcs->lock_doublebuffer_enable) {
3764                         if (should_use_dmub_lock(stream->link)) {
3765                                 union dmub_hw_lock_flags hw_locks = { 0 };
3766                                 struct dmub_hw_lock_inst_flags inst_flags = { 0 };
3767
3768                                 hw_locks.bits.lock_dig = 1;
3769                                 inst_flags.dig_inst = top_pipe_to_program->stream_res.tg->inst;
3770
3771                                 dmub_hw_lock_mgr_cmd(dc->ctx->dmub_srv,
3772                                                         true,
3773                                                         &hw_locks,
3774                                                         &inst_flags);
3775                         } else
3776                                 top_pipe_to_program->stream_res.tg->funcs->lock_doublebuffer_enable(
3777                                                 top_pipe_to_program->stream_res.tg);
3778                 }
3779
3780         if (should_lock_all_pipes && dc->hwss.interdependent_update_lock) {
3781                 if (dc->hwss.subvp_pipe_control_lock)
3782                                 dc->hwss.subvp_pipe_control_lock(dc, context, true, should_lock_all_pipes, NULL, subvp_prev_use);
3783                 dc->hwss.interdependent_update_lock(dc, context, true);
3784
3785         } else {
3786                 if (dc->hwss.subvp_pipe_control_lock)
3787                         dc->hwss.subvp_pipe_control_lock(dc, context, true, should_lock_all_pipes, top_pipe_to_program, subvp_prev_use);
3788                 /* Lock the top pipe while updating plane addrs, since freesync requires
3789                  *  plane addr update event triggers to be synchronized.
3790                  *  top_pipe_to_program is expected to never be NULL
3791                  */
3792                 dc->hwss.pipe_control_lock(dc, top_pipe_to_program, true);
3793         }
3794
3795         dc_dmub_update_dirty_rect(dc, surface_count, stream, srf_updates, context);
3796
3797         // Stream updates
3798         if (stream_update)
3799                 commit_planes_do_stream_update(dc, stream, stream_update, update_type, context);
3800
3801         if (surface_count == 0) {
3802                 /*
3803                  * In case of turning off screen, no need to program front end a second time.
3804                  * just return after program blank.
3805                  */
3806                 if (dc->hwss.apply_ctx_for_surface)
3807                         dc->hwss.apply_ctx_for_surface(dc, stream, 0, context);
3808                 if (dc->hwss.program_front_end_for_ctx)
3809                         dc->hwss.program_front_end_for_ctx(dc, context);
3810
3811                 if (should_lock_all_pipes && dc->hwss.interdependent_update_lock) {
3812                         dc->hwss.interdependent_update_lock(dc, context, false);
3813                 } else {
3814                         dc->hwss.pipe_control_lock(dc, top_pipe_to_program, false);
3815                 }
3816                 dc->hwss.post_unlock_program_front_end(dc, context);
3817
3818                 if (update_type != UPDATE_TYPE_FAST)
3819                         if (dc->hwss.commit_subvp_config)
3820                                 dc->hwss.commit_subvp_config(dc, context);
3821
3822                 /* Since phantom pipe programming is moved to post_unlock_program_front_end,
3823                  * move the SubVP lock to after the phantom pipes have been setup
3824                  */
3825                 if (dc->hwss.subvp_pipe_control_lock)
3826                         dc->hwss.subvp_pipe_control_lock(dc, context, false, should_lock_all_pipes,
3827                                                          NULL, subvp_prev_use);
3828                 return;
3829         }
3830
3831         if (update_type != UPDATE_TYPE_FAST) {
3832                 for (j = 0; j < dc->res_pool->pipe_count; j++) {
3833                         struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[j];
3834
3835                         if ((dc->debug.visual_confirm == VISUAL_CONFIRM_SUBVP ||
3836                                 dc->debug.visual_confirm == VISUAL_CONFIRM_MCLK_SWITCH) &&
3837                                 pipe_ctx->stream && pipe_ctx->plane_state) {
3838                                 /* Only update visual confirm for SUBVP and Mclk switching here.
3839                                  * The bar appears on all pipes, so we need to update the bar on all displays,
3840                                  * so the information doesn't get stale.
3841                                  */
3842                                 dc->hwss.update_visual_confirm_color(dc, pipe_ctx,
3843                                                 pipe_ctx->plane_res.hubp->inst);
3844                         }
3845                 }
3846         }
3847
3848         for (i = 0; i < surface_count; i++) {
3849                 struct dc_plane_state *plane_state = srf_updates[i].surface;
3850                 /*set logical flag for lock/unlock use*/
3851                 for (j = 0; j < dc->res_pool->pipe_count; j++) {
3852                         struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[j];
3853                         if (!pipe_ctx->plane_state)
3854                                 continue;
3855                         if (should_update_pipe_for_plane(context, pipe_ctx, plane_state))
3856                                 continue;
3857                         pipe_ctx->plane_state->triplebuffer_flips = false;
3858                         if (update_type == UPDATE_TYPE_FAST &&
3859                                 dc->hwss.program_triplebuffer != NULL &&
3860                                 !pipe_ctx->plane_state->flip_immediate && dc->debug.enable_tri_buf) {
3861                                         /*triple buffer for VUpdate  only*/
3862                                         pipe_ctx->plane_state->triplebuffer_flips = true;
3863                         }
3864                 }
3865                 if (update_type == UPDATE_TYPE_FULL) {
3866                         /* force vsync flip when reconfiguring pipes to prevent underflow */
3867                         plane_state->flip_immediate = false;
3868                 }
3869         }
3870
3871         // Update Type FULL, Surface updates
3872         for (j = 0; j < dc->res_pool->pipe_count; j++) {
3873                 struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[j];
3874
3875                 if (!pipe_ctx->top_pipe &&
3876                         !pipe_ctx->prev_odm_pipe &&
3877                         should_update_pipe_for_stream(context, pipe_ctx, stream)) {
3878                         struct dc_stream_status *stream_status = NULL;
3879
3880                         if (!pipe_ctx->plane_state)
3881                                 continue;
3882
3883                         /* Full fe update*/
3884                         if (update_type == UPDATE_TYPE_FAST)
3885                                 continue;
3886
3887                         ASSERT(!pipe_ctx->plane_state->triplebuffer_flips);
3888
3889                         if (dc->hwss.program_triplebuffer != NULL && dc->debug.enable_tri_buf) {
3890                                 /*turn off triple buffer for full update*/
3891                                 dc->hwss.program_triplebuffer(
3892                                         dc, pipe_ctx, pipe_ctx->plane_state->triplebuffer_flips);
3893                         }
3894                         stream_status =
3895                                 stream_get_status(context, pipe_ctx->stream);
3896
3897                         if (dc->hwss.apply_ctx_for_surface)
3898                                 dc->hwss.apply_ctx_for_surface(
3899                                         dc, pipe_ctx->stream, stream_status->plane_count, context);
3900                 }
3901         }
3902         if (dc->hwss.program_front_end_for_ctx && update_type != UPDATE_TYPE_FAST) {
3903                 dc->hwss.program_front_end_for_ctx(dc, context);
3904                 if (dc->debug.validate_dml_output) {
3905                         for (i = 0; i < dc->res_pool->pipe_count; i++) {
3906                                 struct pipe_ctx *cur_pipe = &context->res_ctx.pipe_ctx[i];
3907                                 if (cur_pipe->stream == NULL)
3908                                         continue;
3909
3910                                 cur_pipe->plane_res.hubp->funcs->validate_dml_output(
3911                                                 cur_pipe->plane_res.hubp, dc->ctx,
3912                                                 &context->res_ctx.pipe_ctx[i].rq_regs,
3913                                                 &context->res_ctx.pipe_ctx[i].dlg_regs,
3914                                                 &context->res_ctx.pipe_ctx[i].ttu_regs);
3915                         }
3916                 }
3917         }
3918
3919         // Update Type FAST, Surface updates
3920         if (update_type == UPDATE_TYPE_FAST) {
3921                 if (dc->hwss.set_flip_control_gsl)
3922                         for (i = 0; i < surface_count; i++) {
3923                                 struct dc_plane_state *plane_state = srf_updates[i].surface;
3924
3925                                 for (j = 0; j < dc->res_pool->pipe_count; j++) {
3926                                         struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[j];
3927
3928                                         if (!should_update_pipe_for_stream(context, pipe_ctx, stream))
3929                                                 continue;
3930
3931                                         if (!should_update_pipe_for_plane(context, pipe_ctx, plane_state))
3932                                                 continue;
3933
3934                                         // GSL has to be used for flip immediate
3935                                         dc->hwss.set_flip_control_gsl(pipe_ctx,
3936                                                         pipe_ctx->plane_state->flip_immediate);
3937                                 }
3938                         }
3939
3940                 /* Perform requested Updates */
3941                 for (i = 0; i < surface_count; i++) {
3942                         struct dc_plane_state *plane_state = srf_updates[i].surface;
3943
3944                         for (j = 0; j < dc->res_pool->pipe_count; j++) {
3945                                 struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[j];
3946
3947                                 if (!should_update_pipe_for_stream(context, pipe_ctx, stream))
3948                                         continue;
3949
3950                                 if (!should_update_pipe_for_plane(context, pipe_ctx, plane_state))
3951                                         continue;
3952
3953                                 /*program triple buffer after lock based on flip type*/
3954                                 if (dc->hwss.program_triplebuffer != NULL && dc->debug.enable_tri_buf) {
3955                                         /*only enable triplebuffer for  fast_update*/
3956                                         dc->hwss.program_triplebuffer(
3957                                                 dc, pipe_ctx, pipe_ctx->plane_state->triplebuffer_flips);
3958                                 }
3959                                 if (pipe_ctx->plane_state->update_flags.bits.addr_update)
3960                                         dc->hwss.update_plane_addr(dc, pipe_ctx);
3961                         }
3962                 }
3963         }
3964
3965         if (should_lock_all_pipes && dc->hwss.interdependent_update_lock) {
3966                 dc->hwss.interdependent_update_lock(dc, context, false);
3967         } else {
3968                 dc->hwss.pipe_control_lock(dc, top_pipe_to_program, false);
3969         }
3970
3971         if ((update_type != UPDATE_TYPE_FAST) && stream->update_flags.bits.dsc_changed)
3972                 if (top_pipe_to_program->stream_res.tg->funcs->lock_doublebuffer_enable) {
3973                         top_pipe_to_program->stream_res.tg->funcs->wait_for_state(
3974                                 top_pipe_to_program->stream_res.tg,
3975                                 CRTC_STATE_VACTIVE);
3976                         top_pipe_to_program->stream_res.tg->funcs->wait_for_state(
3977                                 top_pipe_to_program->stream_res.tg,
3978                                 CRTC_STATE_VBLANK);
3979                         top_pipe_to_program->stream_res.tg->funcs->wait_for_state(
3980                                 top_pipe_to_program->stream_res.tg,
3981                                 CRTC_STATE_VACTIVE);
3982
3983                         if (should_use_dmub_lock(stream->link)) {
3984                                 union dmub_hw_lock_flags hw_locks = { 0 };
3985                                 struct dmub_hw_lock_inst_flags inst_flags = { 0 };
3986
3987                                 hw_locks.bits.lock_dig = 1;
3988                                 inst_flags.dig_inst = top_pipe_to_program->stream_res.tg->inst;
3989
3990                                 dmub_hw_lock_mgr_cmd(dc->ctx->dmub_srv,
3991                                                         false,
3992                                                         &hw_locks,
3993                                                         &inst_flags);
3994                         } else
3995                                 top_pipe_to_program->stream_res.tg->funcs->lock_doublebuffer_disable(
3996                                         top_pipe_to_program->stream_res.tg);
3997                 }
3998
3999         if (subvp_curr_use) {
4000                 /* If enabling subvp or transitioning from subvp->subvp, enable the
4001                  * phantom streams before we program front end for the phantom pipes.
4002                  */
4003                 if (update_type != UPDATE_TYPE_FAST) {
4004                         if (dc->hwss.enable_phantom_streams)
4005                                 dc->hwss.enable_phantom_streams(dc, context);
4006                 }
4007         }
4008
4009         if (update_type != UPDATE_TYPE_FAST)
4010                 dc->hwss.post_unlock_program_front_end(dc, context);
4011
4012         if (subvp_prev_use && !subvp_curr_use) {
4013                 /* If disabling subvp, disable phantom streams after front end
4014                  * programming has completed (we turn on phantom OTG in order
4015                  * to complete the plane disable for phantom pipes).
4016                  */
4017
4018                 if (dc->hwss.disable_phantom_streams)
4019                         dc->hwss.disable_phantom_streams(dc, context);
4020         }
4021
4022         if (update_type != UPDATE_TYPE_FAST)
4023                 if (dc->hwss.commit_subvp_config)
4024                         dc->hwss.commit_subvp_config(dc, context);
4025         /* Since phantom pipe programming is moved to post_unlock_program_front_end,
4026          * move the SubVP lock to after the phantom pipes have been setup
4027          */
4028         if (should_lock_all_pipes && dc->hwss.interdependent_update_lock) {
4029                 if (dc->hwss.subvp_pipe_control_lock)
4030                         dc->hwss.subvp_pipe_control_lock(dc, context, false, should_lock_all_pipes, NULL, subvp_prev_use);
4031         } else {
4032                 if (dc->hwss.subvp_pipe_control_lock)
4033                         dc->hwss.subvp_pipe_control_lock(dc, context, false, should_lock_all_pipes, top_pipe_to_program, subvp_prev_use);
4034         }
4035
4036         // Fire manual trigger only when bottom plane is flipped
4037         for (j = 0; j < dc->res_pool->pipe_count; j++) {
4038                 struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[j];
4039
4040                 if (!pipe_ctx->plane_state)
4041                         continue;
4042
4043                 if (pipe_ctx->bottom_pipe || pipe_ctx->next_odm_pipe ||
4044                                 !pipe_ctx->stream || !should_update_pipe_for_stream(context, pipe_ctx, stream) ||
4045                                 !pipe_ctx->plane_state->update_flags.bits.addr_update ||
4046                                 pipe_ctx->plane_state->skip_manual_trigger)
4047                         continue;
4048
4049                 if (pipe_ctx->stream_res.tg->funcs->program_manual_trigger)
4050                         pipe_ctx->stream_res.tg->funcs->program_manual_trigger(pipe_ctx->stream_res.tg);
4051         }
4052
4053         current_stream_mask = get_stream_mask(dc, context);
4054         if (current_stream_mask != context->stream_mask) {
4055                 context->stream_mask = current_stream_mask;
4056                 dc_dmub_srv_notify_stream_mask(dc->ctx->dmub_srv, current_stream_mask);
4057         }
4058 }
4059
4060 /**
4061  * could_mpcc_tree_change_for_active_pipes - Check if an OPP associated with MPCC might change
4062  *
4063  * @dc: Used to get the current state status
4064  * @stream: Target stream, which we want to remove the attached planes
4065  * @srf_updates: Array of surface updates
4066  * @surface_count: Number of surface update
4067  * @is_plane_addition: [in] Fill out with true if it is a plane addition case
4068  *
4069  * DCN32x and newer support a feature named Dynamic ODM which can conflict with
4070  * the MPO if used simultaneously in some specific configurations (e.g.,
4071  * 4k@144). This function checks if the incoming context requires applying a
4072  * transition state with unnecessary pipe splitting and ODM disabled to
4073  * circumvent our hardware limitations to prevent this edge case. If the OPP
4074  * associated with an MPCC might change due to plane additions, this function
4075  * returns true.
4076  *
4077  * Return:
4078  * Return true if OPP and MPCC might change, otherwise, return false.
4079  */
4080 static bool could_mpcc_tree_change_for_active_pipes(struct dc *dc,
4081                 struct dc_stream_state *stream,
4082                 struct dc_surface_update *srf_updates,
4083                 int surface_count,
4084                 bool *is_plane_addition)
4085 {
4086
4087         struct dc_stream_status *cur_stream_status = stream_get_status(dc->current_state, stream);
4088         bool force_minimal_pipe_splitting = false;
4089         bool subvp_active = false;
4090         uint32_t i;
4091
4092         *is_plane_addition = false;
4093
4094         if (cur_stream_status &&
4095                         dc->current_state->stream_count > 0 &&
4096                         dc->debug.pipe_split_policy != MPC_SPLIT_AVOID) {
4097                 /* determine if minimal transition is required due to MPC*/
4098                 if (surface_count > 0) {
4099                         if (cur_stream_status->plane_count > surface_count) {
4100                                 force_minimal_pipe_splitting = true;
4101                         } else if (cur_stream_status->plane_count < surface_count) {
4102                                 force_minimal_pipe_splitting = true;
4103                                 *is_plane_addition = true;
4104                         }
4105                 }
4106         }
4107
4108         if (cur_stream_status &&
4109                         dc->current_state->stream_count == 1 &&
4110                         dc->debug.enable_single_display_2to1_odm_policy) {
4111                 /* determine if minimal transition is required due to dynamic ODM*/
4112                 if (surface_count > 0) {
4113                         if (cur_stream_status->plane_count > 2 && cur_stream_status->plane_count > surface_count) {
4114                                 force_minimal_pipe_splitting = true;
4115                         } else if (surface_count > 2 && cur_stream_status->plane_count < surface_count) {
4116                                 force_minimal_pipe_splitting = true;
4117                                 *is_plane_addition = true;
4118                         }
4119                 }
4120         }
4121
4122         for (i = 0; i < dc->res_pool->pipe_count; i++) {
4123                 struct pipe_ctx *pipe = &dc->current_state->res_ctx.pipe_ctx[i];
4124
4125                 if (dc_state_get_pipe_subvp_type(dc->current_state, pipe) != SUBVP_NONE) {
4126                         subvp_active = true;
4127                         break;
4128                 }
4129         }
4130
4131         /* For SubVP when adding or removing planes we need to add a minimal transition
4132          * (even when disabling all planes). Whenever disabling a phantom pipe, we
4133          * must use the minimal transition path to disable the pipe correctly.
4134          *
4135          * We want to use the minimal transition whenever subvp is active, not only if
4136          * a plane is being added / removed from a subvp stream (MPO plane can be added
4137          * to a DRR pipe of SubVP + DRR config, in which case we still want to run through
4138          * a min transition to disable subvp.
4139          */
4140         if (cur_stream_status && subvp_active) {
4141                 /* determine if minimal transition is required due to SubVP*/
4142                 if (cur_stream_status->plane_count > surface_count) {
4143                         force_minimal_pipe_splitting = true;
4144                 } else if (cur_stream_status->plane_count < surface_count) {
4145                         force_minimal_pipe_splitting = true;
4146                         *is_plane_addition = true;
4147                 }
4148         }
4149
4150         return force_minimal_pipe_splitting;
4151 }
4152
4153 struct pipe_split_policy_backup {
4154         bool dynamic_odm_policy;
4155         bool subvp_policy;
4156         enum pipe_split_policy mpc_policy;
4157         char force_odm[MAX_PIPES];
4158 };
4159
4160 static void backup_and_set_minimal_pipe_split_policy(struct dc *dc,
4161                 struct dc_state *context,
4162                 struct pipe_split_policy_backup *policy)
4163 {
4164         int i;
4165
4166         if (!dc->config.is_vmin_only_asic) {
4167                 policy->mpc_policy = dc->debug.pipe_split_policy;
4168                 dc->debug.pipe_split_policy = MPC_SPLIT_AVOID;
4169         }
4170         policy->dynamic_odm_policy = dc->debug.enable_single_display_2to1_odm_policy;
4171         dc->debug.enable_single_display_2to1_odm_policy = false;
4172         policy->subvp_policy = dc->debug.force_disable_subvp;
4173         dc->debug.force_disable_subvp = true;
4174         for (i = 0; i < context->stream_count; i++) {
4175                 policy->force_odm[i] = context->streams[i]->debug.force_odm_combine_segments;
4176                 context->streams[i]->debug.force_odm_combine_segments = 0;
4177         }
4178 }
4179
4180 static void restore_minimal_pipe_split_policy(struct dc *dc,
4181                 struct dc_state *context,
4182                 struct pipe_split_policy_backup *policy)
4183 {
4184         uint8_t i;
4185
4186         if (!dc->config.is_vmin_only_asic)
4187                 dc->debug.pipe_split_policy = policy->mpc_policy;
4188         dc->debug.enable_single_display_2to1_odm_policy =
4189                         policy->dynamic_odm_policy;
4190         dc->debug.force_disable_subvp = policy->subvp_policy;
4191         for (i = 0; i < context->stream_count; i++)
4192                 context->streams[i]->debug.force_odm_combine_segments = policy->force_odm[i];
4193 }
4194
4195 static void release_minimal_transition_state(struct dc *dc,
4196                 struct dc_state *minimal_transition_context,
4197                 struct dc_state *base_context,
4198                 struct pipe_split_policy_backup *policy)
4199 {
4200         restore_minimal_pipe_split_policy(dc, base_context, policy);
4201         dc_state_release(minimal_transition_context);
4202 }
4203
4204 static void force_vsync_flip_in_minimal_transition_context(struct dc_state *context)
4205 {
4206         uint8_t i;
4207         int j;
4208         struct dc_stream_status *stream_status;
4209
4210         for (i = 0; i < context->stream_count; i++) {
4211                 stream_status = &context->stream_status[i];
4212
4213                 for (j = 0; j < stream_status->plane_count; j++)
4214                         stream_status->plane_states[j]->flip_immediate = false;
4215         }
4216 }
4217
4218 static struct dc_state *create_minimal_transition_state(struct dc *dc,
4219                 struct dc_state *base_context, struct pipe_split_policy_backup *policy)
4220 {
4221         struct dc_state *minimal_transition_context = NULL;
4222
4223         minimal_transition_context = dc_state_create_copy(base_context);
4224         if (!minimal_transition_context)
4225                 return NULL;
4226
4227         backup_and_set_minimal_pipe_split_policy(dc, base_context, policy);
4228         /* commit minimal state */
4229         if (dc->res_pool->funcs->validate_bandwidth(dc, minimal_transition_context, false)) {
4230                 /* prevent underflow and corruption when reconfiguring pipes */
4231                 force_vsync_flip_in_minimal_transition_context(minimal_transition_context);
4232         } else {
4233                 /*
4234                  * This should never happen, minimal transition state should
4235                  * always be validated first before adding pipe split features.
4236                  */
4237                 release_minimal_transition_state(dc, minimal_transition_context, base_context, policy);
4238                 BREAK_TO_DEBUGGER();
4239                 minimal_transition_context = NULL;
4240         }
4241         return minimal_transition_context;
4242 }
4243
4244 static bool is_pipe_topology_transition_seamless_with_intermediate_step(
4245                 struct dc *dc,
4246                 struct dc_state *initial_state,
4247                 struct dc_state *intermediate_state,
4248                 struct dc_state *final_state)
4249 {
4250         return dc->hwss.is_pipe_topology_transition_seamless(dc, initial_state,
4251                         intermediate_state) &&
4252                         dc->hwss.is_pipe_topology_transition_seamless(dc,
4253                                         intermediate_state, final_state);
4254 }
4255
4256 static void swap_and_release_current_context(struct dc *dc,
4257                 struct dc_state *new_context, struct dc_stream_state *stream)
4258 {
4259
4260         int i;
4261         struct dc_state *old = dc->current_state;
4262         struct pipe_ctx *pipe_ctx;
4263
4264         /* Since memory free requires elevated IRQ, an interrupt
4265          * request is generated by mem free. If this happens
4266          * between freeing and reassigning the context, our vsync
4267          * interrupt will call into dc and cause a memory
4268          * corruption. Hence, we first reassign the context,
4269          * then free the old context.
4270          */
4271         dc->current_state = new_context;
4272         dc_state_release(old);
4273
4274         // clear any forced full updates
4275         for (i = 0; i < dc->res_pool->pipe_count; i++) {
4276                 pipe_ctx = &new_context->res_ctx.pipe_ctx[i];
4277
4278                 if (pipe_ctx->plane_state && pipe_ctx->stream == stream)
4279                         pipe_ctx->plane_state->force_full_update = false;
4280         }
4281 }
4282
4283 static int initialize_empty_surface_updates(
4284                 struct dc_stream_state *stream,
4285                 struct dc_surface_update *srf_updates)
4286 {
4287         struct dc_stream_status *status = dc_stream_get_status(stream);
4288         int i;
4289
4290         if (!status)
4291                 return 0;
4292
4293         for (i = 0; i < status->plane_count; i++)
4294                 srf_updates[i].surface = status->plane_states[i];
4295
4296         return status->plane_count;
4297 }
4298
4299 static bool commit_minimal_transition_based_on_new_context(struct dc *dc,
4300                 struct dc_state *new_context,
4301                 struct dc_stream_state *stream,
4302                 struct dc_surface_update *srf_updates,
4303                 int surface_count)
4304 {
4305         bool success = false;
4306         struct pipe_split_policy_backup policy;
4307         struct dc_state *intermediate_context =
4308                         create_minimal_transition_state(dc, new_context,
4309                                         &policy);
4310
4311         if (intermediate_context) {
4312                 if (is_pipe_topology_transition_seamless_with_intermediate_step(
4313                                 dc,
4314                                 dc->current_state,
4315                                 intermediate_context,
4316                                 new_context)) {
4317                         DC_LOG_DC("commit minimal transition state: base = new state\n");
4318                         commit_planes_for_stream(dc, srf_updates,
4319                                         surface_count, stream, NULL,
4320                                         UPDATE_TYPE_FULL, intermediate_context);
4321                         swap_and_release_current_context(
4322                                         dc, intermediate_context, stream);
4323                         dc_state_retain(dc->current_state);
4324                         success = true;
4325                 }
4326                 release_minimal_transition_state(
4327                                 dc, intermediate_context, new_context, &policy);
4328         }
4329         return success;
4330 }
4331
4332 static bool commit_minimal_transition_based_on_current_context(struct dc *dc,
4333                 struct dc_state *new_context, struct dc_stream_state *stream)
4334 {
4335         bool success = false;
4336         struct pipe_split_policy_backup policy;
4337         struct dc_state *intermediate_context;
4338         struct dc_state *old_current_state = dc->current_state;
4339         struct dc_surface_update srf_updates[MAX_SURFACE_NUM] = {0};
4340         int surface_count;
4341
4342         /*
4343          * Both current and new contexts share the same stream and plane state
4344          * pointers. When new context is validated, stream and planes get
4345          * populated with new updates such as new plane addresses. This makes
4346          * the current context no longer valid because stream and planes are
4347          * modified from the original. We backup current stream and plane states
4348          * into scratch space whenever we are populating new context. So we can
4349          * restore the original values back by calling the restore function now.
4350          * This restores back the original stream and plane states associated
4351          * with the current state.
4352          */
4353         restore_planes_and_stream_state(&dc->scratch.current_state, stream);
4354         dc_state_retain(old_current_state);
4355         intermediate_context = create_minimal_transition_state(dc,
4356                         old_current_state, &policy);
4357
4358         if (intermediate_context) {
4359                 if (is_pipe_topology_transition_seamless_with_intermediate_step(
4360                                 dc,
4361                                 dc->current_state,
4362                                 intermediate_context,
4363                                 new_context)) {
4364                         DC_LOG_DC("commit minimal transition state: base = current state\n");
4365                         surface_count = initialize_empty_surface_updates(
4366                                         stream, srf_updates);
4367                         commit_planes_for_stream(dc, srf_updates,
4368                                         surface_count, stream, NULL,
4369                                         UPDATE_TYPE_FULL, intermediate_context);
4370                         swap_and_release_current_context(
4371                                         dc, intermediate_context, stream);
4372                         dc_state_retain(dc->current_state);
4373                         success = true;
4374                 }
4375                 release_minimal_transition_state(dc, intermediate_context,
4376                                 old_current_state, &policy);
4377         }
4378         dc_state_release(old_current_state);
4379         /*
4380          * Restore stream and plane states back to the values associated with
4381          * new context.
4382          */
4383         restore_planes_and_stream_state(&dc->scratch.new_state, stream);
4384         return success;
4385 }
4386
4387 /**
4388  * commit_minimal_transition_state_in_dc_update - Commit a minimal state based
4389  * on current or new context
4390  *
4391  * @dc: DC structure, used to get the current state
4392  * @new_context: New context
4393  * @stream: Stream getting the update for the flip
4394  * @srf_updates: Surface updates
4395  * @surface_count: Number of surfaces
4396  *
4397  * The function takes in current state and new state and determine a minimal
4398  * transition state as the intermediate step which could make the transition
4399  * between current and new states seamless. If found, it will commit the minimal
4400  * transition state and update current state to this minimal transition state
4401  * and return true, if not, it will return false.
4402  *
4403  * Return:
4404  * Return True if the minimal transition succeeded, false otherwise
4405  */
4406 static bool commit_minimal_transition_state_in_dc_update(struct dc *dc,
4407                 struct dc_state *new_context,
4408                 struct dc_stream_state *stream,
4409                 struct dc_surface_update *srf_updates,
4410                 int surface_count)
4411 {
4412         bool success = commit_minimal_transition_based_on_new_context(
4413                                 dc, new_context, stream, srf_updates,
4414                                 surface_count);
4415         if (!success)
4416                 success = commit_minimal_transition_based_on_current_context(dc,
4417                                 new_context, stream);
4418         if (!success)
4419                 DC_LOG_ERROR("Fail to commit a seamless minimal transition state between current and new states.\nThis pipe topology update is non-seamless!\n");
4420         return success;
4421 }
4422
4423 /**
4424  * commit_minimal_transition_state - Create a transition pipe split state
4425  *
4426  * @dc: Used to get the current state status
4427  * @transition_base_context: New transition state
4428  *
4429  * In some specific configurations, such as pipe split on multi-display with
4430  * MPO and/or Dynamic ODM, removing a plane may cause unsupported pipe
4431  * programming when moving to new planes. To mitigate those types of problems,
4432  * this function adds a transition state that minimizes pipe usage before
4433  * programming the new configuration. When adding a new plane, the current
4434  * state requires the least pipes, so it is applied without splitting. When
4435  * removing a plane, the new state requires the least pipes, so it is applied
4436  * without splitting.
4437  *
4438  * Return:
4439  * Return false if something is wrong in the transition state.
4440  */
4441 static bool commit_minimal_transition_state(struct dc *dc,
4442                 struct dc_state *transition_base_context)
4443 {
4444         struct dc_state *transition_context;
4445         struct pipe_split_policy_backup policy;
4446         enum dc_status ret = DC_ERROR_UNEXPECTED;
4447         unsigned int i, j;
4448         unsigned int pipe_in_use = 0;
4449         bool subvp_in_use = false;
4450         bool odm_in_use = false;
4451
4452         /* check current pipes in use*/
4453         for (i = 0; i < dc->res_pool->pipe_count; i++) {
4454                 struct pipe_ctx *pipe = &transition_base_context->res_ctx.pipe_ctx[i];
4455
4456                 if (pipe->plane_state)
4457                         pipe_in_use++;
4458         }
4459
4460         /* If SubVP is enabled and we are adding or removing planes from any main subvp
4461          * pipe, we must use the minimal transition.
4462          */
4463         for (i = 0; i < dc->res_pool->pipe_count; i++) {
4464                 struct pipe_ctx *pipe = &dc->current_state->res_ctx.pipe_ctx[i];
4465
4466                 if (pipe->stream && dc_state_get_pipe_subvp_type(dc->current_state, pipe) == SUBVP_PHANTOM) {
4467                         subvp_in_use = true;
4468                         break;
4469                 }
4470         }
4471
4472         /* If ODM is enabled and we are adding or removing planes from any ODM
4473          * pipe, we must use the minimal transition.
4474          */
4475         for (i = 0; i < dc->res_pool->pipe_count; i++) {
4476                 struct pipe_ctx *pipe = &transition_base_context->res_ctx.pipe_ctx[i];
4477
4478                 if (resource_is_pipe_type(pipe, OTG_MASTER)) {
4479                         odm_in_use = resource_get_odm_slice_count(pipe) > 1;
4480                         break;
4481                 }
4482         }
4483
4484         /* When the OS add a new surface if we have been used all of pipes with odm combine
4485          * and mpc split feature, it need use commit_minimal_transition_state to transition safely.
4486          * After OS exit MPO, it will back to use odm and mpc split with all of pipes, we need
4487          * call it again. Otherwise return true to skip.
4488          *
4489          * Reduce the scenarios to use dc_commit_state_no_check in the stage of flip. Especially
4490          * enter/exit MPO when DCN still have enough resources.
4491          */
4492         if (pipe_in_use != dc->res_pool->pipe_count && !subvp_in_use && !odm_in_use)
4493                 return true;
4494
4495         DC_LOG_DC("%s base = %s state, reason = %s\n", __func__,
4496                         dc->current_state == transition_base_context ? "current" : "new",
4497                         subvp_in_use ? "Subvp In Use" :
4498                         odm_in_use ? "ODM in Use" :
4499                         dc->debug.pipe_split_policy != MPC_SPLIT_AVOID ? "MPC in Use" :
4500                         "Unknown");
4501
4502         dc_state_retain(transition_base_context);
4503         transition_context = create_minimal_transition_state(dc,
4504                         transition_base_context, &policy);
4505         if (transition_context) {
4506                 ret = dc_commit_state_no_check(dc, transition_context);
4507                 release_minimal_transition_state(dc, transition_context, transition_base_context, &policy);
4508         }
4509         dc_state_release(transition_base_context);
4510
4511         if (ret != DC_OK) {
4512                 /* this should never happen */
4513                 BREAK_TO_DEBUGGER();
4514                 return false;
4515         }
4516
4517         /* force full surface update */
4518         for (i = 0; i < dc->current_state->stream_count; i++) {
4519                 for (j = 0; j < dc->current_state->stream_status[i].plane_count; j++) {
4520                         dc->current_state->stream_status[i].plane_states[j]->update_flags.raw = 0xFFFFFFFF;
4521                 }
4522         }
4523
4524         return true;
4525 }
4526
4527 static void populate_fast_updates(struct dc_fast_update *fast_update,
4528                 struct dc_surface_update *srf_updates,
4529                 int surface_count,
4530                 struct dc_stream_update *stream_update)
4531 {
4532         int i = 0;
4533
4534         if (stream_update) {
4535                 fast_update[0].out_transfer_func = stream_update->out_transfer_func;
4536                 fast_update[0].output_csc_transform = stream_update->output_csc_transform;
4537         }
4538
4539         for (i = 0; i < surface_count; i++) {
4540                 fast_update[i].flip_addr = srf_updates[i].flip_addr;
4541                 fast_update[i].gamma = srf_updates[i].gamma;
4542                 fast_update[i].gamut_remap_matrix = srf_updates[i].gamut_remap_matrix;
4543                 fast_update[i].input_csc_color_matrix = srf_updates[i].input_csc_color_matrix;
4544                 fast_update[i].coeff_reduction_factor = srf_updates[i].coeff_reduction_factor;
4545         }
4546 }
4547
4548 static bool fast_updates_exist(struct dc_fast_update *fast_update, int surface_count)
4549 {
4550         int i;
4551
4552         if (fast_update[0].out_transfer_func ||
4553                 fast_update[0].output_csc_transform)
4554                 return true;
4555
4556         for (i = 0; i < surface_count; i++) {
4557                 if (fast_update[i].flip_addr ||
4558                                 fast_update[i].gamma ||
4559                                 fast_update[i].gamut_remap_matrix ||
4560                                 fast_update[i].input_csc_color_matrix ||
4561                                 fast_update[i].coeff_reduction_factor)
4562                         return true;
4563         }
4564
4565         return false;
4566 }
4567
4568 static bool full_update_required(struct dc *dc,
4569                 struct dc_surface_update *srf_updates,
4570                 int surface_count,
4571                 struct dc_stream_update *stream_update,
4572                 struct dc_stream_state *stream)
4573 {
4574
4575         int i;
4576         struct dc_stream_status *stream_status;
4577         const struct dc_state *context = dc->current_state;
4578
4579         for (i = 0; i < surface_count; i++) {
4580                 if (srf_updates &&
4581                                 (srf_updates[i].plane_info ||
4582                                 srf_updates[i].scaling_info ||
4583                                 (srf_updates[i].hdr_mult.value &&
4584                                 srf_updates[i].hdr_mult.value != srf_updates->surface->hdr_mult.value) ||
4585                                 srf_updates[i].in_transfer_func ||
4586                                 srf_updates[i].func_shaper ||
4587                                 srf_updates[i].lut3d_func ||
4588                                 srf_updates[i].surface->force_full_update ||
4589                                 (srf_updates[i].flip_addr &&
4590                                 srf_updates[i].flip_addr->address.tmz_surface != srf_updates[i].surface->address.tmz_surface) ||
4591                                 !is_surface_in_context(context, srf_updates[i].surface)))
4592                         return true;
4593         }
4594
4595         if (stream_update &&
4596                         (((stream_update->src.height != 0 && stream_update->src.width != 0) ||
4597                         (stream_update->dst.height != 0 && stream_update->dst.width != 0) ||
4598                         stream_update->integer_scaling_update) ||
4599                         stream_update->hdr_static_metadata ||
4600                         stream_update->abm_level ||
4601                         stream_update->periodic_interrupt ||
4602                         stream_update->vrr_infopacket ||
4603                         stream_update->vsc_infopacket ||
4604                         stream_update->vsp_infopacket ||
4605                         stream_update->hfvsif_infopacket ||
4606                         stream_update->vtem_infopacket ||
4607                         stream_update->adaptive_sync_infopacket ||
4608                         stream_update->dpms_off ||
4609                         stream_update->allow_freesync ||
4610                         stream_update->vrr_active_variable ||
4611                         stream_update->vrr_active_fixed ||
4612                         stream_update->gamut_remap ||
4613                         stream_update->output_color_space ||
4614                         stream_update->dither_option ||
4615                         stream_update->wb_update ||
4616                         stream_update->dsc_config ||
4617                         stream_update->mst_bw_update ||
4618                         stream_update->func_shaper ||
4619                         stream_update->lut3d_func ||
4620                         stream_update->pending_test_pattern ||
4621                         stream_update->crtc_timing_adjust))
4622                 return true;
4623
4624         if (stream) {
4625                 stream_status = dc_stream_get_status(stream);
4626                 if (stream_status == NULL || stream_status->plane_count != surface_count)
4627                         return true;
4628         }
4629         if (dc->idle_optimizations_allowed)
4630                 return true;
4631
4632         return false;
4633 }
4634
4635 static bool fast_update_only(struct dc *dc,
4636                 struct dc_fast_update *fast_update,
4637                 struct dc_surface_update *srf_updates,
4638                 int surface_count,
4639                 struct dc_stream_update *stream_update,
4640                 struct dc_stream_state *stream)
4641 {
4642         return fast_updates_exist(fast_update, surface_count)
4643                         && !full_update_required(dc, srf_updates, surface_count, stream_update, stream);
4644 }
4645
4646 static bool update_planes_and_stream_v1(struct dc *dc,
4647                 struct dc_surface_update *srf_updates, int surface_count,
4648                 struct dc_stream_state *stream,
4649                 struct dc_stream_update *stream_update,
4650                 struct dc_state *state)
4651 {
4652         const struct dc_stream_status *stream_status;
4653         enum surface_update_type update_type;
4654         struct dc_state *context;
4655         struct dc_context *dc_ctx = dc->ctx;
4656         int i, j;
4657         struct dc_fast_update fast_update[MAX_SURFACES] = {0};
4658
4659         dc_exit_ips_for_hw_access(dc);
4660
4661         populate_fast_updates(fast_update, srf_updates, surface_count, stream_update);
4662         stream_status = dc_stream_get_status(stream);
4663         context = dc->current_state;
4664
4665         update_type = dc_check_update_surfaces_for_stream(
4666                                 dc, srf_updates, surface_count, stream_update, stream_status);
4667
4668         if (update_type >= UPDATE_TYPE_FULL) {
4669
4670                 /* initialize scratch memory for building context */
4671                 context = dc_state_create_copy(state);
4672                 if (context == NULL) {
4673                         DC_ERROR("Failed to allocate new validate context!\n");
4674                         return false;
4675                 }
4676
4677                 for (i = 0; i < dc->res_pool->pipe_count; i++) {
4678                         struct pipe_ctx *new_pipe = &context->res_ctx.pipe_ctx[i];
4679                         struct pipe_ctx *old_pipe = &dc->current_state->res_ctx.pipe_ctx[i];
4680
4681                         if (new_pipe->plane_state && new_pipe->plane_state != old_pipe->plane_state)
4682                                 new_pipe->plane_state->force_full_update = true;
4683                 }
4684         } else if (update_type == UPDATE_TYPE_FAST) {
4685                 /*
4686                  * Previous frame finished and HW is ready for optimization.
4687                  */
4688                 dc_post_update_surfaces_to_stream(dc);
4689         }
4690
4691         for (i = 0; i < surface_count; i++) {
4692                 struct dc_plane_state *surface = srf_updates[i].surface;
4693
4694                 copy_surface_update_to_plane(surface, &srf_updates[i]);
4695
4696                 if (update_type >= UPDATE_TYPE_MED) {
4697                         for (j = 0; j < dc->res_pool->pipe_count; j++) {
4698                                 struct pipe_ctx *pipe_ctx =
4699                                         &context->res_ctx.pipe_ctx[j];
4700
4701                                 if (pipe_ctx->plane_state != surface)
4702                                         continue;
4703
4704                                 resource_build_scaling_params(pipe_ctx);
4705                         }
4706                 }
4707         }
4708
4709         copy_stream_update_to_stream(dc, context, stream, stream_update);
4710
4711         if (update_type >= UPDATE_TYPE_FULL) {
4712                 if (!dc->res_pool->funcs->validate_bandwidth(dc, context, false)) {
4713                         DC_ERROR("Mode validation failed for stream update!\n");
4714                         dc_state_release(context);
4715                         return false;
4716                 }
4717         }
4718
4719         TRACE_DC_PIPE_STATE(pipe_ctx, i, MAX_PIPES);
4720
4721         if (fast_update_only(dc, fast_update, srf_updates, surface_count, stream_update, stream) &&
4722                         !dc->debug.enable_legacy_fast_update) {
4723                 commit_planes_for_stream_fast(dc,
4724                                 srf_updates,
4725                                 surface_count,
4726                                 stream,
4727                                 stream_update,
4728                                 update_type,
4729                                 context);
4730         } else {
4731                 commit_planes_for_stream(
4732                                 dc,
4733                                 srf_updates,
4734                                 surface_count,
4735                                 stream,
4736                                 stream_update,
4737                                 update_type,
4738                                 context);
4739         }
4740         /*update current_State*/
4741         if (dc->current_state != context) {
4742
4743                 struct dc_state *old = dc->current_state;
4744
4745                 dc->current_state = context;
4746                 dc_state_release(old);
4747
4748                 for (i = 0; i < dc->res_pool->pipe_count; i++) {
4749                         struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i];
4750
4751                         if (pipe_ctx->plane_state && pipe_ctx->stream == stream)
4752                                 pipe_ctx->plane_state->force_full_update = false;
4753                 }
4754         }
4755
4756         /* Legacy optimization path for DCE. */
4757         if (update_type >= UPDATE_TYPE_FULL && dc_ctx->dce_version < DCE_VERSION_MAX) {
4758                 dc_post_update_surfaces_to_stream(dc);
4759                 TRACE_DCE_CLOCK_STATE(&context->bw_ctx.bw.dce);
4760         }
4761         return true;
4762 }
4763
4764 static bool update_planes_and_stream_v2(struct dc *dc,
4765                 struct dc_surface_update *srf_updates, int surface_count,
4766                 struct dc_stream_state *stream,
4767                 struct dc_stream_update *stream_update)
4768 {
4769         struct dc_state *context;
4770         enum surface_update_type update_type;
4771         struct dc_fast_update fast_update[MAX_SURFACES] = {0};
4772
4773         /* In cases where MPO and split or ODM are used transitions can
4774          * cause underflow. Apply stream configuration with minimal pipe
4775          * split first to avoid unsupported transitions for active pipes.
4776          */
4777         bool force_minimal_pipe_splitting = 0;
4778         bool is_plane_addition = 0;
4779         bool is_fast_update_only;
4780
4781         populate_fast_updates(fast_update, srf_updates, surface_count, stream_update);
4782         is_fast_update_only = fast_update_only(dc, fast_update, srf_updates,
4783                         surface_count, stream_update, stream);
4784         force_minimal_pipe_splitting = could_mpcc_tree_change_for_active_pipes(
4785                         dc,
4786                         stream,
4787                         srf_updates,
4788                         surface_count,
4789                         &is_plane_addition);
4790
4791         /* on plane addition, minimal state is the current one */
4792         if (force_minimal_pipe_splitting && is_plane_addition &&
4793                 !commit_minimal_transition_state(dc, dc->current_state))
4794                 return false;
4795
4796         if (!update_planes_and_stream_state(
4797                         dc,
4798                         srf_updates,
4799                         surface_count,
4800                         stream,
4801                         stream_update,
4802                         &update_type,
4803                         &context))
4804                 return false;
4805
4806         /* on plane removal, minimal state is the new one */
4807         if (force_minimal_pipe_splitting && !is_plane_addition) {
4808                 if (!commit_minimal_transition_state(dc, context)) {
4809                         dc_state_release(context);
4810                         return false;
4811                 }
4812                 update_type = UPDATE_TYPE_FULL;
4813         }
4814
4815         if (dc->hwss.is_pipe_topology_transition_seamless &&
4816                         !dc->hwss.is_pipe_topology_transition_seamless(
4817                                         dc, dc->current_state, context))
4818                 commit_minimal_transition_state_in_dc_update(dc, context, stream,
4819                                 srf_updates, surface_count);
4820
4821         if (is_fast_update_only && !dc->debug.enable_legacy_fast_update) {
4822                 commit_planes_for_stream_fast(dc,
4823                                 srf_updates,
4824                                 surface_count,
4825                                 stream,
4826                                 stream_update,
4827                                 update_type,
4828                                 context);
4829         } else {
4830                 if (!stream_update &&
4831                                 dc->hwss.is_pipe_topology_transition_seamless &&
4832                                 !dc->hwss.is_pipe_topology_transition_seamless(
4833                                                 dc, dc->current_state, context)) {
4834                         DC_LOG_ERROR("performing non-seamless pipe topology transition with surface only update!\n");
4835                         BREAK_TO_DEBUGGER();
4836                 }
4837                 commit_planes_for_stream(
4838                                 dc,
4839                                 srf_updates,
4840                                 surface_count,
4841                                 stream,
4842                                 stream_update,
4843                                 update_type,
4844                                 context);
4845         }
4846         if (dc->current_state != context)
4847                 swap_and_release_current_context(dc, context, stream);
4848         return true;
4849 }
4850
4851 static void commit_planes_and_stream_update_on_current_context(struct dc *dc,
4852                 struct dc_surface_update *srf_updates, int surface_count,
4853                 struct dc_stream_state *stream,
4854                 struct dc_stream_update *stream_update,
4855                 enum surface_update_type update_type)
4856 {
4857         struct dc_fast_update fast_update[MAX_SURFACES] = {0};
4858
4859         ASSERT(update_type < UPDATE_TYPE_FULL);
4860         populate_fast_updates(fast_update, srf_updates, surface_count,
4861                         stream_update);
4862         if (fast_update_only(dc, fast_update, srf_updates, surface_count,
4863                         stream_update, stream) &&
4864                         !dc->debug.enable_legacy_fast_update)
4865                 commit_planes_for_stream_fast(dc,
4866                                 srf_updates,
4867                                 surface_count,
4868                                 stream,
4869                                 stream_update,
4870                                 update_type,
4871                                 dc->current_state);
4872         else
4873                 commit_planes_for_stream(
4874                                 dc,
4875                                 srf_updates,
4876                                 surface_count,
4877                                 stream,
4878                                 stream_update,
4879                                 update_type,
4880                                 dc->current_state);
4881 }
4882
4883 static void commit_planes_and_stream_update_with_new_context(struct dc *dc,
4884                 struct dc_surface_update *srf_updates, int surface_count,
4885                 struct dc_stream_state *stream,
4886                 struct dc_stream_update *stream_update,
4887                 enum surface_update_type update_type,
4888                 struct dc_state *new_context)
4889 {
4890         ASSERT(update_type >= UPDATE_TYPE_FULL);
4891         if (!dc->hwss.is_pipe_topology_transition_seamless(dc,
4892                         dc->current_state, new_context))
4893                 /*
4894                  * It is required by the feature design that all pipe topologies
4895                  * using extra free pipes for power saving purposes such as
4896                  * dynamic ODM or SubVp shall only be enabled when it can be
4897                  * transitioned seamlessly to AND from its minimal transition
4898                  * state. A minimal transition state is defined as the same dc
4899                  * state but with all power saving features disabled. So it uses
4900                  * the minimum pipe topology. When we can't seamlessly
4901                  * transition from state A to state B, we will insert the
4902                  * minimal transition state A' or B' in between so seamless
4903                  * transition between A and B can be made possible.
4904                  */
4905                 commit_minimal_transition_state_in_dc_update(dc, new_context,
4906                                 stream, srf_updates, surface_count);
4907
4908         commit_planes_for_stream(
4909                         dc,
4910                         srf_updates,
4911                         surface_count,
4912                         stream,
4913                         stream_update,
4914                         update_type,
4915                         new_context);
4916 }
4917
4918 static bool update_planes_and_stream_v3(struct dc *dc,
4919                 struct dc_surface_update *srf_updates, int surface_count,
4920                 struct dc_stream_state *stream,
4921                 struct dc_stream_update *stream_update)
4922 {
4923         struct dc_state *new_context;
4924         enum surface_update_type update_type;
4925
4926         /*
4927          * When this function returns true and new_context is not equal to
4928          * current state, the function allocates and validates a new dc state
4929          * and assigns it to new_context. The function expects that the caller
4930          * is responsible to free this memory when new_context is no longer
4931          * used. We swap current with new context and free current instead. So
4932          * new_context's memory will live until the next full update after it is
4933          * replaced by a newer context. Refer to the use of
4934          * swap_and_free_current_context below.
4935          */
4936         if (!update_planes_and_stream_state(dc, srf_updates, surface_count,
4937                                 stream, stream_update, &update_type,
4938                                 &new_context))
4939                 return false;
4940
4941         if (new_context == dc->current_state) {
4942                 commit_planes_and_stream_update_on_current_context(dc,
4943                                 srf_updates, surface_count, stream,
4944                                 stream_update, update_type);
4945         } else {
4946                 commit_planes_and_stream_update_with_new_context(dc,
4947                                 srf_updates, surface_count, stream,
4948                                 stream_update, update_type, new_context);
4949                 swap_and_release_current_context(dc, new_context, stream);
4950         }
4951
4952         return true;
4953 }
4954
4955 bool dc_update_planes_and_stream(struct dc *dc,
4956                 struct dc_surface_update *srf_updates, int surface_count,
4957                 struct dc_stream_state *stream,
4958                 struct dc_stream_update *stream_update)
4959 {
4960         dc_exit_ips_for_hw_access(dc);
4961         /*
4962          * update planes and stream version 3 separates FULL and FAST updates
4963          * to their own sequences. It aims to clean up frequent checks for
4964          * update type resulting unnecessary branching in logic flow. It also
4965          * adds a new commit minimal transition sequence, which detects the need
4966          * for minimal transition based on the actual comparison of current and
4967          * new states instead of "predicting" it based on per feature software
4968          * policy.i.e could_mpcc_tree_change_for_active_pipes. The new commit
4969          * minimal transition sequence is made universal to any power saving
4970          * features that would use extra free pipes such as Dynamic ODM/MPC
4971          * Combine, MPO or SubVp. Therefore there is no longer a need to
4972          * specially handle compatibility problems with transitions among those
4973          * features as they are now transparent to the new sequence.
4974          */
4975         if (dc->ctx->dce_version > DCN_VERSION_3_51)
4976                 return update_planes_and_stream_v3(dc, srf_updates,
4977                                 surface_count, stream, stream_update);
4978         return update_planes_and_stream_v2(dc, srf_updates,
4979                         surface_count, stream, stream_update);
4980 }
4981
4982 void dc_commit_updates_for_stream(struct dc *dc,
4983                 struct dc_surface_update *srf_updates,
4984                 int surface_count,
4985                 struct dc_stream_state *stream,
4986                 struct dc_stream_update *stream_update,
4987                 struct dc_state *state)
4988 {
4989         dc_exit_ips_for_hw_access(dc);
4990         /* TODO: Since change commit sequence can have a huge impact,
4991          * we decided to only enable it for DCN3x. However, as soon as
4992          * we get more confident about this change we'll need to enable
4993          * the new sequence for all ASICs.
4994          */
4995         if (dc->ctx->dce_version > DCN_VERSION_3_51) {
4996                 update_planes_and_stream_v3(dc, srf_updates, surface_count,
4997                                 stream, stream_update);
4998                 return;
4999         }
5000         if (dc->ctx->dce_version >= DCN_VERSION_3_2) {
5001                 update_planes_and_stream_v2(dc, srf_updates, surface_count,
5002                                 stream, stream_update);
5003                 return;
5004         }
5005         update_planes_and_stream_v1(dc, srf_updates, surface_count, stream,
5006                         stream_update, state);
5007 }
5008
5009 uint8_t dc_get_current_stream_count(struct dc *dc)
5010 {
5011         return dc->current_state->stream_count;
5012 }
5013
5014 struct dc_stream_state *dc_get_stream_at_index(struct dc *dc, uint8_t i)
5015 {
5016         if (i < dc->current_state->stream_count)
5017                 return dc->current_state->streams[i];
5018         return NULL;
5019 }
5020
5021 enum dc_irq_source dc_interrupt_to_irq_source(
5022                 struct dc *dc,
5023                 uint32_t src_id,
5024                 uint32_t ext_id)
5025 {
5026         return dal_irq_service_to_irq_source(dc->res_pool->irqs, src_id, ext_id);
5027 }
5028
5029 /*
5030  * dc_interrupt_set() - Enable/disable an AMD hw interrupt source
5031  */
5032 bool dc_interrupt_set(struct dc *dc, enum dc_irq_source src, bool enable)
5033 {
5034
5035         if (dc == NULL)
5036                 return false;
5037
5038         return dal_irq_service_set(dc->res_pool->irqs, src, enable);
5039 }
5040
5041 void dc_interrupt_ack(struct dc *dc, enum dc_irq_source src)
5042 {
5043         dal_irq_service_ack(dc->res_pool->irqs, src);
5044 }
5045
5046 void dc_power_down_on_boot(struct dc *dc)
5047 {
5048         if (dc->ctx->dce_environment != DCE_ENV_VIRTUAL_HW &&
5049                         dc->hwss.power_down_on_boot) {
5050
5051                         if (dc->caps.ips_support)
5052                                 dc_exit_ips_for_hw_access(dc);
5053
5054                 dc->hwss.power_down_on_boot(dc);
5055         }
5056 }
5057
5058 void dc_set_power_state(
5059         struct dc *dc,
5060         enum dc_acpi_cm_power_state power_state)
5061 {
5062         if (!dc->current_state)
5063                 return;
5064
5065         switch (power_state) {
5066         case DC_ACPI_CM_POWER_STATE_D0:
5067                 dc_state_construct(dc, dc->current_state);
5068
5069                 dc_exit_ips_for_hw_access(dc);
5070
5071                 dc_z10_restore(dc);
5072
5073                 dc->hwss.init_hw(dc);
5074
5075                 if (dc->hwss.init_sys_ctx != NULL &&
5076                         dc->vm_pa_config.valid) {
5077                         dc->hwss.init_sys_ctx(dc->hwseq, dc, &dc->vm_pa_config);
5078                 }
5079
5080                 break;
5081         default:
5082                 ASSERT(dc->current_state->stream_count == 0);
5083
5084                 dc_state_destruct(dc->current_state);
5085
5086                 break;
5087         }
5088 }
5089
5090 void dc_resume(struct dc *dc)
5091 {
5092         uint32_t i;
5093
5094         for (i = 0; i < dc->link_count; i++)
5095                 dc->link_srv->resume(dc->links[i]);
5096 }
5097
5098 bool dc_is_dmcu_initialized(struct dc *dc)
5099 {
5100         struct dmcu *dmcu = dc->res_pool->dmcu;
5101
5102         if (dmcu)
5103                 return dmcu->funcs->is_dmcu_initialized(dmcu);
5104         return false;
5105 }
5106
5107 void get_clock_requirements_for_state(struct dc_state *state, struct AsicStateEx *info)
5108 {
5109         info->displayClock                              = (unsigned int)state->bw_ctx.bw.dcn.clk.dispclk_khz;
5110         info->engineClock                               = (unsigned int)state->bw_ctx.bw.dcn.clk.dcfclk_khz;
5111         info->memoryClock                               = (unsigned int)state->bw_ctx.bw.dcn.clk.dramclk_khz;
5112         info->maxSupportedDppClock              = (unsigned int)state->bw_ctx.bw.dcn.clk.max_supported_dppclk_khz;
5113         info->dppClock                                  = (unsigned int)state->bw_ctx.bw.dcn.clk.dppclk_khz;
5114         info->socClock                                  = (unsigned int)state->bw_ctx.bw.dcn.clk.socclk_khz;
5115         info->dcfClockDeepSleep                 = (unsigned int)state->bw_ctx.bw.dcn.clk.dcfclk_deep_sleep_khz;
5116         info->fClock                                    = (unsigned int)state->bw_ctx.bw.dcn.clk.fclk_khz;
5117         info->phyClock                                  = (unsigned int)state->bw_ctx.bw.dcn.clk.phyclk_khz;
5118 }
5119 enum dc_status dc_set_clock(struct dc *dc, enum dc_clock_type clock_type, uint32_t clk_khz, uint32_t stepping)
5120 {
5121         if (dc->hwss.set_clock)
5122                 return dc->hwss.set_clock(dc, clock_type, clk_khz, stepping);
5123         return DC_ERROR_UNEXPECTED;
5124 }
5125 void dc_get_clock(struct dc *dc, enum dc_clock_type clock_type, struct dc_clock_config *clock_cfg)
5126 {
5127         if (dc->hwss.get_clock)
5128                 dc->hwss.get_clock(dc, clock_type, clock_cfg);
5129 }
5130
5131 /* enable/disable eDP PSR without specify stream for eDP */
5132 bool dc_set_psr_allow_active(struct dc *dc, bool enable)
5133 {
5134         int i;
5135         bool allow_active;
5136
5137         for (i = 0; i < dc->current_state->stream_count ; i++) {
5138                 struct dc_link *link;
5139                 struct dc_stream_state *stream = dc->current_state->streams[i];
5140
5141                 link = stream->link;
5142                 if (!link)
5143                         continue;
5144
5145                 if (link->psr_settings.psr_feature_enabled) {
5146                         if (enable && !link->psr_settings.psr_allow_active) {
5147                                 allow_active = true;
5148                                 if (!dc_link_set_psr_allow_active(link, &allow_active, false, false, NULL))
5149                                         return false;
5150                         } else if (!enable && link->psr_settings.psr_allow_active) {
5151                                 allow_active = false;
5152                                 if (!dc_link_set_psr_allow_active(link, &allow_active, true, false, NULL))
5153                                         return false;
5154                         }
5155                 }
5156         }
5157
5158         return true;
5159 }
5160
5161 /* enable/disable eDP Replay without specify stream for eDP */
5162 bool dc_set_replay_allow_active(struct dc *dc, bool active)
5163 {
5164         int i;
5165         bool allow_active;
5166
5167         for (i = 0; i < dc->current_state->stream_count; i++) {
5168                 struct dc_link *link;
5169                 struct dc_stream_state *stream = dc->current_state->streams[i];
5170
5171                 link = stream->link;
5172                 if (!link)
5173                         continue;
5174
5175                 if (link->replay_settings.replay_feature_enabled) {
5176                         if (active && !link->replay_settings.replay_allow_active) {
5177                                 allow_active = true;
5178                                 if (!dc_link_set_replay_allow_active(link, &allow_active,
5179                                         false, false, NULL))
5180                                         return false;
5181                         } else if (!active && link->replay_settings.replay_allow_active) {
5182                                 allow_active = false;
5183                                 if (!dc_link_set_replay_allow_active(link, &allow_active,
5184                                         true, false, NULL))
5185                                         return false;
5186                         }
5187                 }
5188         }
5189
5190         return true;
5191 }
5192
5193 void dc_allow_idle_optimizations_internal(struct dc *dc, bool allow, char const *caller_name)
5194 {
5195         if (dc->debug.disable_idle_power_optimizations)
5196                 return;
5197
5198         if (allow != dc->idle_optimizations_allowed)
5199                 DC_LOG_IPS("%s: allow_idle old=%d new=%d (caller=%s)\n", __func__,
5200                            dc->idle_optimizations_allowed, allow, caller_name);
5201
5202         if (dc->caps.ips_support && (dc->config.disable_ips == DMUB_IPS_DISABLE_ALL))
5203                 return;
5204
5205         if (dc->clk_mgr != NULL && dc->clk_mgr->funcs->is_smu_present)
5206                 if (!dc->clk_mgr->funcs->is_smu_present(dc->clk_mgr))
5207                         return;
5208
5209         if (allow == dc->idle_optimizations_allowed)
5210                 return;
5211
5212         if (dc->hwss.apply_idle_power_optimizations && dc->hwss.apply_idle_power_optimizations(dc, allow))
5213                 dc->idle_optimizations_allowed = allow;
5214 }
5215
5216 void dc_exit_ips_for_hw_access_internal(struct dc *dc, const char *caller_name)
5217 {
5218         if (dc->caps.ips_support)
5219                 dc_allow_idle_optimizations_internal(dc, false, caller_name);
5220 }
5221
5222 bool dc_dmub_is_ips_idle_state(struct dc *dc)
5223 {
5224         if (dc->debug.disable_idle_power_optimizations)
5225                 return false;
5226
5227         if (!dc->caps.ips_support || (dc->config.disable_ips == DMUB_IPS_DISABLE_ALL))
5228                 return false;
5229
5230         if (!dc->ctx->dmub_srv)
5231                 return false;
5232
5233         return dc->ctx->dmub_srv->idle_allowed;
5234 }
5235
5236 /* set min and max memory clock to lowest and highest DPM level, respectively */
5237 void dc_unlock_memory_clock_frequency(struct dc *dc)
5238 {
5239         if (dc->clk_mgr->funcs->set_hard_min_memclk)
5240                 dc->clk_mgr->funcs->set_hard_min_memclk(dc->clk_mgr, false);
5241
5242         if (dc->clk_mgr->funcs->set_hard_max_memclk)
5243                 dc->clk_mgr->funcs->set_hard_max_memclk(dc->clk_mgr);
5244 }
5245
5246 /* set min memory clock to the min required for current mode, max to maxDPM */
5247 void dc_lock_memory_clock_frequency(struct dc *dc)
5248 {
5249         if (dc->clk_mgr->funcs->get_memclk_states_from_smu)
5250                 dc->clk_mgr->funcs->get_memclk_states_from_smu(dc->clk_mgr);
5251
5252         if (dc->clk_mgr->funcs->set_hard_min_memclk)
5253                 dc->clk_mgr->funcs->set_hard_min_memclk(dc->clk_mgr, true);
5254
5255         if (dc->clk_mgr->funcs->set_hard_max_memclk)
5256                 dc->clk_mgr->funcs->set_hard_max_memclk(dc->clk_mgr);
5257 }
5258
5259 static void blank_and_force_memclk(struct dc *dc, bool apply, unsigned int memclk_mhz)
5260 {
5261         struct dc_state *context = dc->current_state;
5262         struct hubp *hubp;
5263         struct pipe_ctx *pipe;
5264         int i;
5265
5266         for (i = 0; i < dc->res_pool->pipe_count; i++) {
5267                 pipe = &context->res_ctx.pipe_ctx[i];
5268
5269                 if (pipe->stream != NULL) {
5270                         dc->hwss.disable_pixel_data(dc, pipe, true);
5271
5272                         // wait for double buffer
5273                         pipe->stream_res.tg->funcs->wait_for_state(pipe->stream_res.tg, CRTC_STATE_VACTIVE);
5274                         pipe->stream_res.tg->funcs->wait_for_state(pipe->stream_res.tg, CRTC_STATE_VBLANK);
5275                         pipe->stream_res.tg->funcs->wait_for_state(pipe->stream_res.tg, CRTC_STATE_VACTIVE);
5276
5277                         hubp = pipe->plane_res.hubp;
5278                         hubp->funcs->set_blank_regs(hubp, true);
5279                 }
5280         }
5281
5282         dc->clk_mgr->funcs->set_max_memclk(dc->clk_mgr, memclk_mhz);
5283         dc->clk_mgr->funcs->set_min_memclk(dc->clk_mgr, memclk_mhz);
5284
5285         for (i = 0; i < dc->res_pool->pipe_count; i++) {
5286                 pipe = &context->res_ctx.pipe_ctx[i];
5287
5288                 if (pipe->stream != NULL) {
5289                         dc->hwss.disable_pixel_data(dc, pipe, false);
5290
5291                         hubp = pipe->plane_res.hubp;
5292                         hubp->funcs->set_blank_regs(hubp, false);
5293                 }
5294         }
5295 }
5296
5297
5298 /**
5299  * dc_enable_dcmode_clk_limit() - lower clocks in dc (battery) mode
5300  * @dc: pointer to dc of the dm calling this
5301  * @enable: True = transition to DC mode, false = transition back to AC mode
5302  *
5303  * Some SoCs define additional clock limits when in DC mode, DM should
5304  * invoke this function when the platform undergoes a power source transition
5305  * so DC can apply/unapply the limit. This interface may be disruptive to
5306  * the onscreen content.
5307  *
5308  * Context: Triggered by OS through DM interface, or manually by escape calls.
5309  * Need to hold a dclock when doing so.
5310  *
5311  * Return: none (void function)
5312  *
5313  */
5314 void dc_enable_dcmode_clk_limit(struct dc *dc, bool enable)
5315 {
5316         unsigned int softMax = 0, maxDPM = 0, funcMin = 0, i;
5317         bool p_state_change_support;
5318
5319         if (!dc->config.dc_mode_clk_limit_support)
5320                 return;
5321
5322         softMax = dc->clk_mgr->bw_params->dc_mode_softmax_memclk;
5323         for (i = 0; i < dc->clk_mgr->bw_params->clk_table.num_entries; i++) {
5324                 if (dc->clk_mgr->bw_params->clk_table.entries[i].memclk_mhz > maxDPM)
5325                         maxDPM = dc->clk_mgr->bw_params->clk_table.entries[i].memclk_mhz;
5326         }
5327         funcMin = (dc->clk_mgr->clks.dramclk_khz + 999) / 1000;
5328         p_state_change_support = dc->clk_mgr->clks.p_state_change_support;
5329
5330         if (enable && !dc->clk_mgr->dc_mode_softmax_enabled) {
5331                 if (p_state_change_support) {
5332                         if (funcMin <= softMax)
5333                                 dc->clk_mgr->funcs->set_max_memclk(dc->clk_mgr, softMax);
5334                         // else: No-Op
5335                 } else {
5336                         if (funcMin <= softMax)
5337                                 blank_and_force_memclk(dc, true, softMax);
5338                         // else: No-Op
5339                 }
5340         } else if (!enable && dc->clk_mgr->dc_mode_softmax_enabled) {
5341                 if (p_state_change_support) {
5342                         if (funcMin <= softMax)
5343                                 dc->clk_mgr->funcs->set_max_memclk(dc->clk_mgr, maxDPM);
5344                         // else: No-Op
5345                 } else {
5346                         if (funcMin <= softMax)
5347                                 blank_and_force_memclk(dc, true, maxDPM);
5348                         // else: No-Op
5349                 }
5350         }
5351         dc->clk_mgr->dc_mode_softmax_enabled = enable;
5352 }
5353 bool dc_is_plane_eligible_for_idle_optimizations(struct dc *dc,
5354                 unsigned int pitch,
5355                 unsigned int height,
5356                 enum surface_pixel_format format,
5357                 struct dc_cursor_attributes *cursor_attr)
5358 {
5359         if (dc->hwss.does_plane_fit_in_mall && dc->hwss.does_plane_fit_in_mall(dc, pitch, height, format, cursor_attr))
5360                 return true;
5361         return false;
5362 }
5363
5364 /* cleanup on driver unload */
5365 void dc_hardware_release(struct dc *dc)
5366 {
5367         dc_mclk_switch_using_fw_based_vblank_stretch_shut_down(dc);
5368
5369         if (dc->hwss.hardware_release)
5370                 dc->hwss.hardware_release(dc);
5371 }
5372
5373 void dc_mclk_switch_using_fw_based_vblank_stretch_shut_down(struct dc *dc)
5374 {
5375         if (dc->current_state)
5376                 dc->current_state->bw_ctx.bw.dcn.clk.fw_based_mclk_switching_shut_down = true;
5377 }
5378
5379 /**
5380  * dc_is_dmub_outbox_supported - Check if DMUB firmware support outbox notification
5381  *
5382  * @dc: [in] dc structure
5383  *
5384  * Checks whether DMUB FW supports outbox notifications, if supported DM
5385  * should register outbox interrupt prior to actually enabling interrupts
5386  * via dc_enable_dmub_outbox
5387  *
5388  * Return:
5389  * True if DMUB FW supports outbox notifications, False otherwise
5390  */
5391 bool dc_is_dmub_outbox_supported(struct dc *dc)
5392 {
5393         switch (dc->ctx->asic_id.chip_family) {
5394
5395         case FAMILY_YELLOW_CARP:
5396                 /* DCN31 B0 USB4 DPIA needs dmub notifications for interrupts */
5397                 if (dc->ctx->asic_id.hw_internal_rev == YELLOW_CARP_B0 &&
5398                     !dc->debug.dpia_debug.bits.disable_dpia)
5399                         return true;
5400         break;
5401
5402         case AMDGPU_FAMILY_GC_11_0_1:
5403         case AMDGPU_FAMILY_GC_11_5_0:
5404                 if (!dc->debug.dpia_debug.bits.disable_dpia)
5405                         return true;
5406         break;
5407
5408         default:
5409                 break;
5410         }
5411
5412         /* dmub aux needs dmub notifications to be enabled */
5413         return dc->debug.enable_dmub_aux_for_legacy_ddc;
5414
5415 }
5416
5417 /**
5418  * dc_enable_dmub_notifications - Check if dmub fw supports outbox
5419  *
5420  * @dc: [in] dc structure
5421  *
5422  * Calls dc_is_dmub_outbox_supported to check if dmub fw supports outbox
5423  * notifications. All DMs shall switch to dc_is_dmub_outbox_supported.  This
5424  * API shall be removed after switching.
5425  *
5426  * Return:
5427  * True if DMUB FW supports outbox notifications, False otherwise
5428  */
5429 bool dc_enable_dmub_notifications(struct dc *dc)
5430 {
5431         return dc_is_dmub_outbox_supported(dc);
5432 }
5433
5434 /**
5435  * dc_enable_dmub_outbox - Enables DMUB unsolicited notification
5436  *
5437  * @dc: [in] dc structure
5438  *
5439  * Enables DMUB unsolicited notifications to x86 via outbox.
5440  */
5441 void dc_enable_dmub_outbox(struct dc *dc)
5442 {
5443         struct dc_context *dc_ctx = dc->ctx;
5444
5445         dmub_enable_outbox_notification(dc_ctx->dmub_srv);
5446         DC_LOG_DC("%s: dmub outbox notifications enabled\n", __func__);
5447 }
5448
5449 /**
5450  * dc_process_dmub_aux_transfer_async - Submits aux command to dmub via inbox message
5451  *                                      Sets port index appropriately for legacy DDC
5452  * @dc: dc structure
5453  * @link_index: link index
5454  * @payload: aux payload
5455  *
5456  * Returns: True if successful, False if failure
5457  */
5458 bool dc_process_dmub_aux_transfer_async(struct dc *dc,
5459                                 uint32_t link_index,
5460                                 struct aux_payload *payload)
5461 {
5462         uint8_t action;
5463         union dmub_rb_cmd cmd = {0};
5464
5465         ASSERT(payload->length <= 16);
5466
5467         cmd.dp_aux_access.header.type = DMUB_CMD__DP_AUX_ACCESS;
5468         cmd.dp_aux_access.header.payload_bytes = 0;
5469         /* For dpia, ddc_pin is set to NULL */
5470         if (!dc->links[link_index]->ddc->ddc_pin)
5471                 cmd.dp_aux_access.aux_control.type = AUX_CHANNEL_DPIA;
5472         else
5473                 cmd.dp_aux_access.aux_control.type = AUX_CHANNEL_LEGACY_DDC;
5474
5475         cmd.dp_aux_access.aux_control.instance = dc->links[link_index]->ddc_hw_inst;
5476         cmd.dp_aux_access.aux_control.sw_crc_enabled = 0;
5477         cmd.dp_aux_access.aux_control.timeout = 0;
5478         cmd.dp_aux_access.aux_control.dpaux.address = payload->address;
5479         cmd.dp_aux_access.aux_control.dpaux.is_i2c_over_aux = payload->i2c_over_aux;
5480         cmd.dp_aux_access.aux_control.dpaux.length = payload->length;
5481
5482         /* set aux action */
5483         if (payload->i2c_over_aux) {
5484                 if (payload->write) {
5485                         if (payload->mot)
5486                                 action = DP_AUX_REQ_ACTION_I2C_WRITE_MOT;
5487                         else
5488                                 action = DP_AUX_REQ_ACTION_I2C_WRITE;
5489                 } else {
5490                         if (payload->mot)
5491                                 action = DP_AUX_REQ_ACTION_I2C_READ_MOT;
5492                         else
5493                                 action = DP_AUX_REQ_ACTION_I2C_READ;
5494                         }
5495         } else {
5496                 if (payload->write)
5497                         action = DP_AUX_REQ_ACTION_DPCD_WRITE;
5498                 else
5499                         action = DP_AUX_REQ_ACTION_DPCD_READ;
5500         }
5501
5502         cmd.dp_aux_access.aux_control.dpaux.action = action;
5503
5504         if (payload->length && payload->write) {
5505                 memcpy(cmd.dp_aux_access.aux_control.dpaux.data,
5506                         payload->data,
5507                         payload->length
5508                         );
5509         }
5510
5511         dc_wake_and_execute_dmub_cmd(dc->ctx, &cmd, DM_DMUB_WAIT_TYPE_WAIT);
5512
5513         return true;
5514 }
5515
5516 uint8_t get_link_index_from_dpia_port_index(const struct dc *dc,
5517                                             uint8_t dpia_port_index)
5518 {
5519         uint8_t index, link_index = 0xFF;
5520
5521         for (index = 0; index < dc->link_count; index++) {
5522                 /* ddc_hw_inst has dpia port index for dpia links
5523                  * and ddc instance for legacy links
5524                  */
5525                 if (!dc->links[index]->ddc->ddc_pin) {
5526                         if (dc->links[index]->ddc_hw_inst == dpia_port_index) {
5527                                 link_index = index;
5528                                 break;
5529                         }
5530                 }
5531         }
5532         ASSERT(link_index != 0xFF);
5533         return link_index;
5534 }
5535
5536 /**
5537  * dc_process_dmub_set_config_async - Submits set_config command
5538  *
5539  * @dc: [in] dc structure
5540  * @link_index: [in] link_index: link index
5541  * @payload: [in] aux payload
5542  * @notify: [out] set_config immediate reply
5543  *
5544  * Submits set_config command to dmub via inbox message.
5545  *
5546  * Return:
5547  * True if successful, False if failure
5548  */
5549 bool dc_process_dmub_set_config_async(struct dc *dc,
5550                                 uint32_t link_index,
5551                                 struct set_config_cmd_payload *payload,
5552                                 struct dmub_notification *notify)
5553 {
5554         union dmub_rb_cmd cmd = {0};
5555         bool is_cmd_complete = true;
5556
5557         /* prepare SET_CONFIG command */
5558         cmd.set_config_access.header.type = DMUB_CMD__DPIA;
5559         cmd.set_config_access.header.sub_type = DMUB_CMD__DPIA_SET_CONFIG_ACCESS;
5560
5561         cmd.set_config_access.set_config_control.instance = dc->links[link_index]->ddc_hw_inst;
5562         cmd.set_config_access.set_config_control.cmd_pkt.msg_type = payload->msg_type;
5563         cmd.set_config_access.set_config_control.cmd_pkt.msg_data = payload->msg_data;
5564
5565         if (!dc_wake_and_execute_dmub_cmd(dc->ctx, &cmd, DM_DMUB_WAIT_TYPE_WAIT_WITH_REPLY)) {
5566                 /* command is not processed by dmub */
5567                 notify->sc_status = SET_CONFIG_UNKNOWN_ERROR;
5568                 return is_cmd_complete;
5569         }
5570
5571         /* command processed by dmub, if ret_status is 1, it is completed instantly */
5572         if (cmd.set_config_access.header.ret_status == 1)
5573                 notify->sc_status = cmd.set_config_access.set_config_control.immed_status;
5574         else
5575                 /* cmd pending, will receive notification via outbox */
5576                 is_cmd_complete = false;
5577
5578         return is_cmd_complete;
5579 }
5580
5581 /**
5582  * dc_process_dmub_set_mst_slots - Submits MST solt allocation
5583  *
5584  * @dc: [in] dc structure
5585  * @link_index: [in] link index
5586  * @mst_alloc_slots: [in] mst slots to be allotted
5587  * @mst_slots_in_use: [out] mst slots in use returned in failure case
5588  *
5589  * Submits mst slot allocation command to dmub via inbox message
5590  *
5591  * Return:
5592  * DC_OK if successful, DC_ERROR if failure
5593  */
5594 enum dc_status dc_process_dmub_set_mst_slots(const struct dc *dc,
5595                                 uint32_t link_index,
5596                                 uint8_t mst_alloc_slots,
5597                                 uint8_t *mst_slots_in_use)
5598 {
5599         union dmub_rb_cmd cmd = {0};
5600
5601         /* prepare MST_ALLOC_SLOTS command */
5602         cmd.set_mst_alloc_slots.header.type = DMUB_CMD__DPIA;
5603         cmd.set_mst_alloc_slots.header.sub_type = DMUB_CMD__DPIA_MST_ALLOC_SLOTS;
5604
5605         cmd.set_mst_alloc_slots.mst_slots_control.instance = dc->links[link_index]->ddc_hw_inst;
5606         cmd.set_mst_alloc_slots.mst_slots_control.mst_alloc_slots = mst_alloc_slots;
5607
5608         if (!dc_wake_and_execute_dmub_cmd(dc->ctx, &cmd, DM_DMUB_WAIT_TYPE_WAIT_WITH_REPLY))
5609                 /* command is not processed by dmub */
5610                 return DC_ERROR_UNEXPECTED;
5611
5612         /* command processed by dmub, if ret_status is 1 */
5613         if (cmd.set_config_access.header.ret_status != 1)
5614                 /* command processing error */
5615                 return DC_ERROR_UNEXPECTED;
5616
5617         /* command processed and we have a status of 2, mst not enabled in dpia */
5618         if (cmd.set_mst_alloc_slots.mst_slots_control.immed_status == 2)
5619                 return DC_FAIL_UNSUPPORTED_1;
5620
5621         /* previously configured mst alloc and used slots did not match */
5622         if (cmd.set_mst_alloc_slots.mst_slots_control.immed_status == 3) {
5623                 *mst_slots_in_use = cmd.set_mst_alloc_slots.mst_slots_control.mst_slots_in_use;
5624                 return DC_NOT_SUPPORTED;
5625         }
5626
5627         return DC_OK;
5628 }
5629
5630 /**
5631  * dc_process_dmub_dpia_hpd_int_enable - Submits DPIA DPD interruption
5632  *
5633  * @dc: [in] dc structure
5634  * @hpd_int_enable: [in] 1 for hpd int enable, 0 to disable
5635  *
5636  * Submits dpia hpd int enable command to dmub via inbox message
5637  */
5638 void dc_process_dmub_dpia_hpd_int_enable(const struct dc *dc,
5639                                 uint32_t hpd_int_enable)
5640 {
5641         union dmub_rb_cmd cmd = {0};
5642
5643         cmd.dpia_hpd_int_enable.header.type = DMUB_CMD__DPIA_HPD_INT_ENABLE;
5644         cmd.dpia_hpd_int_enable.enable = hpd_int_enable;
5645
5646         dc_wake_and_execute_dmub_cmd(dc->ctx, &cmd, DM_DMUB_WAIT_TYPE_WAIT);
5647
5648         DC_LOG_DEBUG("%s: hpd_int_enable(%d)\n", __func__, hpd_int_enable);
5649 }
5650
5651 /**
5652  * dc_print_dmub_diagnostic_data - Print DMUB diagnostic data for debugging
5653  *
5654  * @dc: [in] dc structure
5655  *
5656  *
5657  */
5658 void dc_print_dmub_diagnostic_data(const struct dc *dc)
5659 {
5660         dc_dmub_srv_log_diagnostic_data(dc->ctx->dmub_srv);
5661 }
5662
5663 /**
5664  * dc_disable_accelerated_mode - disable accelerated mode
5665  * @dc: dc structure
5666  */
5667 void dc_disable_accelerated_mode(struct dc *dc)
5668 {
5669         bios_set_scratch_acc_mode_change(dc->ctx->dc_bios, 0);
5670 }
5671
5672
5673 /**
5674  *  dc_notify_vsync_int_state - notifies vsync enable/disable state
5675  *  @dc: dc structure
5676  *  @stream: stream where vsync int state changed
5677  *  @enable: whether vsync is enabled or disabled
5678  *
5679  *  Called when vsync is enabled/disabled Will notify DMUB to start/stop ABM
5680  *  interrupts after steady state is reached.
5681  */
5682 void dc_notify_vsync_int_state(struct dc *dc, struct dc_stream_state *stream, bool enable)
5683 {
5684         int i;
5685         int edp_num;
5686         struct pipe_ctx *pipe = NULL;
5687         struct dc_link *link = stream->sink->link;
5688         struct dc_link *edp_links[MAX_NUM_EDP];
5689
5690
5691         if (link->psr_settings.psr_feature_enabled)
5692                 return;
5693
5694         if (link->replay_settings.replay_feature_enabled)
5695                 return;
5696
5697         /*find primary pipe associated with stream*/
5698         for (i = 0; i < MAX_PIPES; i++) {
5699                 pipe = &dc->current_state->res_ctx.pipe_ctx[i];
5700
5701                 if (pipe->stream == stream && pipe->stream_res.tg)
5702                         break;
5703         }
5704
5705         if (i == MAX_PIPES) {
5706                 ASSERT(0);
5707                 return;
5708         }
5709
5710         dc_get_edp_links(dc, edp_links, &edp_num);
5711
5712         /* Determine panel inst */
5713         for (i = 0; i < edp_num; i++) {
5714                 if (edp_links[i] == link)
5715                         break;
5716         }
5717
5718         if (i == edp_num) {
5719                 return;
5720         }
5721
5722         if (pipe->stream_res.abm && pipe->stream_res.abm->funcs->set_abm_pause)
5723                 pipe->stream_res.abm->funcs->set_abm_pause(pipe->stream_res.abm, !enable, i, pipe->stream_res.tg->inst);
5724 }
5725
5726 /*****************************************************************************
5727  *  dc_abm_save_restore() - Interface to DC for save+pause and restore+un-pause
5728  *                          ABM
5729  *  @dc: dc structure
5730  *      @stream: stream where vsync int state changed
5731  *  @pData: abm hw states
5732  *
5733  ****************************************************************************/
5734 bool dc_abm_save_restore(
5735                 struct dc *dc,
5736                 struct dc_stream_state *stream,
5737                 struct abm_save_restore *pData)
5738 {
5739         int i;
5740         int edp_num;
5741         struct pipe_ctx *pipe = NULL;
5742         struct dc_link *link = stream->sink->link;
5743         struct dc_link *edp_links[MAX_NUM_EDP];
5744
5745         if (link->replay_settings.replay_feature_enabled)
5746                 return false;
5747
5748         /*find primary pipe associated with stream*/
5749         for (i = 0; i < MAX_PIPES; i++) {
5750                 pipe = &dc->current_state->res_ctx.pipe_ctx[i];
5751
5752                 if (pipe->stream == stream && pipe->stream_res.tg)
5753                         break;
5754         }
5755
5756         if (i == MAX_PIPES) {
5757                 ASSERT(0);
5758                 return false;
5759         }
5760
5761         dc_get_edp_links(dc, edp_links, &edp_num);
5762
5763         /* Determine panel inst */
5764         for (i = 0; i < edp_num; i++)
5765                 if (edp_links[i] == link)
5766                         break;
5767
5768         if (i == edp_num)
5769                 return false;
5770
5771         if (pipe->stream_res.abm &&
5772                 pipe->stream_res.abm->funcs->save_restore)
5773                 return pipe->stream_res.abm->funcs->save_restore(
5774                                 pipe->stream_res.abm,
5775                                 i,
5776                                 pData);
5777         return false;
5778 }
5779
5780 void dc_query_current_properties(struct dc *dc, struct dc_current_properties *properties)
5781 {
5782         unsigned int i;
5783         bool subvp_sw_cursor_req = false;
5784
5785         for (i = 0; i < dc->current_state->stream_count; i++) {
5786                 if (check_subvp_sw_cursor_fallback_req(dc, dc->current_state->streams[i])) {
5787                         subvp_sw_cursor_req = true;
5788                         break;
5789                 }
5790         }
5791         properties->cursor_size_limit = subvp_sw_cursor_req ? 64 : dc->caps.max_cursor_size;
5792 }
5793
5794 /**
5795  * dc_set_edp_power() - DM controls eDP power to be ON/OFF
5796  *
5797  * Called when DM wants to power on/off eDP.
5798  *     Only work on links with flag skip_implict_edp_power_control is set.
5799  *
5800  * @dc: Current DC state
5801  * @edp_link: a link with eDP connector signal type
5802  * @powerOn: power on/off eDP
5803  *
5804  * Return: void
5805  */
5806 void dc_set_edp_power(const struct dc *dc, struct dc_link *edp_link,
5807                                  bool powerOn)
5808 {
5809         if (edp_link->connector_signal != SIGNAL_TYPE_EDP)
5810                 return;
5811
5812         if (edp_link->skip_implict_edp_power_control == false)
5813                 return;
5814
5815         edp_link->dc->link_srv->edp_set_panel_power(edp_link, powerOn);
5816 }
5817
5818 /*
5819  *****************************************************************************
5820  * dc_get_power_profile_for_dc_state() - extracts power profile from dc state
5821  *
5822  * Called when DM wants to make power policy decisions based on dc_state
5823  *
5824  *****************************************************************************
5825  */
5826 struct dc_power_profile dc_get_power_profile_for_dc_state(const struct dc_state *context)
5827 {
5828         struct dc_power_profile profile = { 0 };
5829
5830         profile.power_level += !context->bw_ctx.bw.dcn.clk.p_state_change_support;
5831
5832         return profile;
5833 }
5834
This page took 0.393407 seconds and 4 git commands to generate.