]> Git Repo - linux.git/blob - drivers/gpu/drm/amd/display/dc/dc_dmub_srv.c
Merge tag 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/rdma/rdma
[linux.git] / drivers / gpu / drm / amd / display / dc / dc_dmub_srv.c
1 /*
2  * Copyright 2019 Advanced Micro Devices, Inc.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20  * OTHER DEALINGS IN THE SOFTWARE.
21  *
22  * Authors: AMD
23  *
24  */
25
26 #include "dm_services.h"
27 #include "dc.h"
28 #include "dc_dmub_srv.h"
29 #include "../dmub/dmub_srv.h"
30 #include "dm_helpers.h"
31 #include "dc_hw_types.h"
32 #include "core_types.h"
33 #include "../basics/conversion.h"
34 #include "cursor_reg_cache.h"
35 #include "resource.h"
36 #include "clk_mgr.h"
37 #include "dc_state_priv.h"
38 #include "dc_plane_priv.h"
39
40 #define CTX dc_dmub_srv->ctx
41 #define DC_LOGGER CTX->logger
42
43 static void dc_dmub_srv_construct(struct dc_dmub_srv *dc_srv, struct dc *dc,
44                                   struct dmub_srv *dmub)
45 {
46         dc_srv->dmub = dmub;
47         dc_srv->ctx = dc->ctx;
48 }
49
50 struct dc_dmub_srv *dc_dmub_srv_create(struct dc *dc, struct dmub_srv *dmub)
51 {
52         struct dc_dmub_srv *dc_srv =
53                 kzalloc(sizeof(struct dc_dmub_srv), GFP_KERNEL);
54
55         if (dc_srv == NULL) {
56                 BREAK_TO_DEBUGGER();
57                 return NULL;
58         }
59
60         dc_dmub_srv_construct(dc_srv, dc, dmub);
61
62         return dc_srv;
63 }
64
65 void dc_dmub_srv_destroy(struct dc_dmub_srv **dmub_srv)
66 {
67         if (*dmub_srv) {
68                 kfree(*dmub_srv);
69                 *dmub_srv = NULL;
70         }
71 }
72
73 void dc_dmub_srv_wait_idle(struct dc_dmub_srv *dc_dmub_srv)
74 {
75         struct dmub_srv *dmub = dc_dmub_srv->dmub;
76         struct dc_context *dc_ctx = dc_dmub_srv->ctx;
77         enum dmub_status status;
78
79         do {
80                 status = dmub_srv_wait_for_idle(dmub, 100000);
81         } while (dc_dmub_srv->ctx->dc->debug.disable_timeout && status != DMUB_STATUS_OK);
82
83         if (status != DMUB_STATUS_OK) {
84                 DC_ERROR("Error waiting for DMUB idle: status=%d\n", status);
85                 dc_dmub_srv_log_diagnostic_data(dc_dmub_srv);
86         }
87 }
88
89 void dc_dmub_srv_clear_inbox0_ack(struct dc_dmub_srv *dc_dmub_srv)
90 {
91         struct dmub_srv *dmub = dc_dmub_srv->dmub;
92         struct dc_context *dc_ctx = dc_dmub_srv->ctx;
93         enum dmub_status status = DMUB_STATUS_OK;
94
95         status = dmub_srv_clear_inbox0_ack(dmub);
96         if (status != DMUB_STATUS_OK) {
97                 DC_ERROR("Error clearing INBOX0 ack: status=%d\n", status);
98                 dc_dmub_srv_log_diagnostic_data(dc_dmub_srv);
99         }
100 }
101
102 void dc_dmub_srv_wait_for_inbox0_ack(struct dc_dmub_srv *dc_dmub_srv)
103 {
104         struct dmub_srv *dmub = dc_dmub_srv->dmub;
105         struct dc_context *dc_ctx = dc_dmub_srv->ctx;
106         enum dmub_status status = DMUB_STATUS_OK;
107
108         status = dmub_srv_wait_for_inbox0_ack(dmub, 100000);
109         if (status != DMUB_STATUS_OK) {
110                 DC_ERROR("Error waiting for INBOX0 HW Lock Ack\n");
111                 dc_dmub_srv_log_diagnostic_data(dc_dmub_srv);
112         }
113 }
114
115 void dc_dmub_srv_send_inbox0_cmd(struct dc_dmub_srv *dc_dmub_srv,
116                                  union dmub_inbox0_data_register data)
117 {
118         struct dmub_srv *dmub = dc_dmub_srv->dmub;
119         struct dc_context *dc_ctx = dc_dmub_srv->ctx;
120         enum dmub_status status = DMUB_STATUS_OK;
121
122         status = dmub_srv_send_inbox0_cmd(dmub, data);
123         if (status != DMUB_STATUS_OK) {
124                 DC_ERROR("Error sending INBOX0 cmd\n");
125                 dc_dmub_srv_log_diagnostic_data(dc_dmub_srv);
126         }
127 }
128
129 bool dc_dmub_srv_cmd_list_queue_execute(struct dc_dmub_srv *dc_dmub_srv,
130                 unsigned int count,
131                 union dmub_rb_cmd *cmd_list)
132 {
133         struct dc_context *dc_ctx;
134         struct dmub_srv *dmub;
135         enum dmub_status status;
136         int i;
137
138         if (!dc_dmub_srv || !dc_dmub_srv->dmub)
139                 return false;
140
141         dc_ctx = dc_dmub_srv->ctx;
142         dmub = dc_dmub_srv->dmub;
143
144         for (i = 0 ; i < count; i++) {
145                 // Queue command
146                 status = dmub_srv_cmd_queue(dmub, &cmd_list[i]);
147
148                 if (status == DMUB_STATUS_QUEUE_FULL) {
149                         /* Execute and wait for queue to become empty again. */
150                         status = dmub_srv_cmd_execute(dmub);
151                         if (status == DMUB_STATUS_POWER_STATE_D3)
152                                 return false;
153
154                         do {
155                                 status = dmub_srv_wait_for_idle(dmub, 100000);
156                         } while (dc_dmub_srv->ctx->dc->debug.disable_timeout && status != DMUB_STATUS_OK);
157
158                         /* Requeue the command. */
159                         status = dmub_srv_cmd_queue(dmub, &cmd_list[i]);
160                 }
161
162                 if (status != DMUB_STATUS_OK) {
163                         if (status != DMUB_STATUS_POWER_STATE_D3) {
164                                 DC_ERROR("Error queueing DMUB command: status=%d\n", status);
165                                 dc_dmub_srv_log_diagnostic_data(dc_dmub_srv);
166                         }
167                         return false;
168                 }
169         }
170
171         status = dmub_srv_cmd_execute(dmub);
172         if (status != DMUB_STATUS_OK) {
173                 if (status != DMUB_STATUS_POWER_STATE_D3) {
174                         DC_ERROR("Error starting DMUB execution: status=%d\n", status);
175                         dc_dmub_srv_log_diagnostic_data(dc_dmub_srv);
176                 }
177                 return false;
178         }
179
180         return true;
181 }
182
183 bool dc_dmub_srv_wait_for_idle(struct dc_dmub_srv *dc_dmub_srv,
184                 enum dm_dmub_wait_type wait_type,
185                 union dmub_rb_cmd *cmd_list)
186 {
187         struct dmub_srv *dmub;
188         enum dmub_status status;
189
190         if (!dc_dmub_srv || !dc_dmub_srv->dmub)
191                 return false;
192
193         dmub = dc_dmub_srv->dmub;
194
195         // Wait for DMUB to process command
196         if (wait_type != DM_DMUB_WAIT_TYPE_NO_WAIT) {
197                 do {
198                         status = dmub_srv_wait_for_idle(dmub, 100000);
199                 } while (dc_dmub_srv->ctx->dc->debug.disable_timeout && status != DMUB_STATUS_OK);
200
201                 if (status != DMUB_STATUS_OK) {
202                         DC_LOG_DEBUG("No reply for DMUB command: status=%d\n", status);
203                         if (!dmub->debug.timeout_occured) {
204                                 dmub->debug.timeout_occured = true;
205                                 dmub->debug.timeout_cmd = *cmd_list;
206                                 dmub->debug.timestamp = dm_get_timestamp(dc_dmub_srv->ctx);
207                         }
208                         dc_dmub_srv_log_diagnostic_data(dc_dmub_srv);
209                         return false;
210                 }
211
212                 // Copy data back from ring buffer into command
213                 if (wait_type == DM_DMUB_WAIT_TYPE_WAIT_WITH_REPLY)
214                         dmub_rb_get_return_data(&dmub->inbox1_rb, cmd_list);
215         }
216
217         return true;
218 }
219
220 bool dc_dmub_srv_cmd_run(struct dc_dmub_srv *dc_dmub_srv, union dmub_rb_cmd *cmd, enum dm_dmub_wait_type wait_type)
221 {
222         return dc_dmub_srv_cmd_run_list(dc_dmub_srv, 1, cmd, wait_type);
223 }
224
225 bool dc_dmub_srv_cmd_run_list(struct dc_dmub_srv *dc_dmub_srv, unsigned int count, union dmub_rb_cmd *cmd_list, enum dm_dmub_wait_type wait_type)
226 {
227         struct dc_context *dc_ctx;
228         struct dmub_srv *dmub;
229         enum dmub_status status;
230         int i;
231
232         if (!dc_dmub_srv || !dc_dmub_srv->dmub)
233                 return false;
234
235         dc_ctx = dc_dmub_srv->ctx;
236         dmub = dc_dmub_srv->dmub;
237
238         for (i = 0 ; i < count; i++) {
239                 // Queue command
240                 status = dmub_srv_cmd_queue(dmub, &cmd_list[i]);
241
242                 if (status == DMUB_STATUS_QUEUE_FULL) {
243                         /* Execute and wait for queue to become empty again. */
244                         status = dmub_srv_cmd_execute(dmub);
245                         if (status == DMUB_STATUS_POWER_STATE_D3)
246                                 return false;
247
248                         dmub_srv_wait_for_idle(dmub, 100000);
249
250                         /* Requeue the command. */
251                         status = dmub_srv_cmd_queue(dmub, &cmd_list[i]);
252                 }
253
254                 if (status != DMUB_STATUS_OK) {
255                         if (status != DMUB_STATUS_POWER_STATE_D3) {
256                                 DC_ERROR("Error queueing DMUB command: status=%d\n", status);
257                                 dc_dmub_srv_log_diagnostic_data(dc_dmub_srv);
258                         }
259                         return false;
260                 }
261         }
262
263         status = dmub_srv_cmd_execute(dmub);
264         if (status != DMUB_STATUS_OK) {
265                 if (status != DMUB_STATUS_POWER_STATE_D3) {
266                         DC_ERROR("Error starting DMUB execution: status=%d\n", status);
267                         dc_dmub_srv_log_diagnostic_data(dc_dmub_srv);
268                 }
269                 return false;
270         }
271
272         // Wait for DMUB to process command
273         if (wait_type != DM_DMUB_WAIT_TYPE_NO_WAIT) {
274                 if (dc_dmub_srv->ctx->dc->debug.disable_timeout) {
275                         do {
276                                 status = dmub_srv_wait_for_idle(dmub, 100000);
277                         } while (status != DMUB_STATUS_OK);
278                 } else
279                         status = dmub_srv_wait_for_idle(dmub, 100000);
280
281                 if (status != DMUB_STATUS_OK) {
282                         DC_LOG_DEBUG("No reply for DMUB command: status=%d\n", status);
283                         dc_dmub_srv_log_diagnostic_data(dc_dmub_srv);
284                         return false;
285                 }
286
287                 // Copy data back from ring buffer into command
288                 if (wait_type == DM_DMUB_WAIT_TYPE_WAIT_WITH_REPLY)
289                         dmub_rb_get_return_data(&dmub->inbox1_rb, cmd_list);
290         }
291
292         return true;
293 }
294
295 bool dc_dmub_srv_optimized_init_done(struct dc_dmub_srv *dc_dmub_srv)
296 {
297         struct dmub_srv *dmub;
298         struct dc_context *dc_ctx;
299         union dmub_fw_boot_status boot_status;
300         enum dmub_status status;
301
302         if (!dc_dmub_srv || !dc_dmub_srv->dmub)
303                 return false;
304
305         dmub = dc_dmub_srv->dmub;
306         dc_ctx = dc_dmub_srv->ctx;
307
308         status = dmub_srv_get_fw_boot_status(dmub, &boot_status);
309         if (status != DMUB_STATUS_OK) {
310                 DC_ERROR("Error querying DMUB boot status: error=%d\n", status);
311                 return false;
312         }
313
314         return boot_status.bits.optimized_init_done;
315 }
316
317 bool dc_dmub_srv_notify_stream_mask(struct dc_dmub_srv *dc_dmub_srv,
318                                     unsigned int stream_mask)
319 {
320         if (!dc_dmub_srv || !dc_dmub_srv->dmub)
321                 return false;
322
323         return dc_wake_and_execute_gpint(dc_dmub_srv->ctx, DMUB_GPINT__IDLE_OPT_NOTIFY_STREAM_MASK,
324                                          stream_mask, NULL, DM_DMUB_WAIT_TYPE_WAIT);
325 }
326
327 bool dc_dmub_srv_is_restore_required(struct dc_dmub_srv *dc_dmub_srv)
328 {
329         struct dmub_srv *dmub;
330         struct dc_context *dc_ctx;
331         union dmub_fw_boot_status boot_status;
332         enum dmub_status status;
333
334         if (!dc_dmub_srv || !dc_dmub_srv->dmub)
335                 return false;
336
337         dmub = dc_dmub_srv->dmub;
338         dc_ctx = dc_dmub_srv->ctx;
339
340         status = dmub_srv_get_fw_boot_status(dmub, &boot_status);
341         if (status != DMUB_STATUS_OK) {
342                 DC_ERROR("Error querying DMUB boot status: error=%d\n", status);
343                 return false;
344         }
345
346         return boot_status.bits.restore_required;
347 }
348
349 bool dc_dmub_srv_get_dmub_outbox0_msg(const struct dc *dc, struct dmcub_trace_buf_entry *entry)
350 {
351         struct dmub_srv *dmub = dc->ctx->dmub_srv->dmub;
352         return dmub_srv_get_outbox0_msg(dmub, entry);
353 }
354
355 void dc_dmub_trace_event_control(struct dc *dc, bool enable)
356 {
357         dm_helpers_dmub_outbox_interrupt_control(dc->ctx, enable);
358 }
359
360 void dc_dmub_srv_drr_update_cmd(struct dc *dc, uint32_t tg_inst, uint32_t vtotal_min, uint32_t vtotal_max)
361 {
362         union dmub_rb_cmd cmd = { 0 };
363
364         cmd.drr_update.header.type = DMUB_CMD__FW_ASSISTED_MCLK_SWITCH;
365         cmd.drr_update.header.sub_type = DMUB_CMD__FAMS_DRR_UPDATE;
366         cmd.drr_update.dmub_optc_state_req.v_total_max = vtotal_max;
367         cmd.drr_update.dmub_optc_state_req.v_total_min = vtotal_min;
368         cmd.drr_update.dmub_optc_state_req.tg_inst = tg_inst;
369
370         cmd.drr_update.header.payload_bytes = sizeof(cmd.drr_update) - sizeof(cmd.drr_update.header);
371
372         // Send the command to the DMCUB.
373         dc_wake_and_execute_dmub_cmd(dc->ctx, &cmd, DM_DMUB_WAIT_TYPE_WAIT);
374 }
375
376 void dc_dmub_srv_set_drr_manual_trigger_cmd(struct dc *dc, uint32_t tg_inst)
377 {
378         union dmub_rb_cmd cmd = { 0 };
379
380         cmd.drr_update.header.type = DMUB_CMD__FW_ASSISTED_MCLK_SWITCH;
381         cmd.drr_update.header.sub_type = DMUB_CMD__FAMS_SET_MANUAL_TRIGGER;
382         cmd.drr_update.dmub_optc_state_req.tg_inst = tg_inst;
383
384         cmd.drr_update.header.payload_bytes = sizeof(cmd.drr_update) - sizeof(cmd.drr_update.header);
385
386         // Send the command to the DMCUB.
387         dc_wake_and_execute_dmub_cmd(dc->ctx, &cmd, DM_DMUB_WAIT_TYPE_WAIT);
388 }
389
390 static uint8_t dc_dmub_srv_get_pipes_for_stream(struct dc *dc, struct dc_stream_state *stream)
391 {
392         uint8_t pipes = 0;
393         int i = 0;
394
395         for (i = 0; i < MAX_PIPES; i++) {
396                 struct pipe_ctx *pipe = &dc->current_state->res_ctx.pipe_ctx[i];
397
398                 if (pipe->stream == stream && pipe->stream_res.tg)
399                         pipes = i;
400         }
401         return pipes;
402 }
403
404 static void dc_dmub_srv_populate_fams_pipe_info(struct dc *dc, struct dc_state *context,
405                 struct pipe_ctx *head_pipe,
406                 struct dmub_cmd_fw_assisted_mclk_switch_pipe_data *fams_pipe_data)
407 {
408         int j;
409         int pipe_idx = 0;
410
411         fams_pipe_data->pipe_index[pipe_idx++] = head_pipe->plane_res.hubp->inst;
412         for (j = 0; j < dc->res_pool->pipe_count; j++) {
413                 struct pipe_ctx *split_pipe = &context->res_ctx.pipe_ctx[j];
414
415                 if (split_pipe->stream == head_pipe->stream && (split_pipe->top_pipe || split_pipe->prev_odm_pipe)) {
416                         fams_pipe_data->pipe_index[pipe_idx++] = split_pipe->plane_res.hubp->inst;
417                 }
418         }
419         fams_pipe_data->pipe_count = pipe_idx;
420 }
421
422 bool dc_dmub_srv_p_state_delegate(struct dc *dc, bool should_manage_pstate, struct dc_state *context)
423 {
424         union dmub_rb_cmd cmd = { 0 };
425         struct dmub_cmd_fw_assisted_mclk_switch_config *config_data = &cmd.fw_assisted_mclk_switch.config_data;
426         int i = 0, k = 0;
427         int ramp_up_num_steps = 1; // TODO: Ramp is currently disabled. Reenable it.
428         uint8_t visual_confirm_enabled;
429         int pipe_idx = 0;
430
431         if (dc == NULL)
432                 return false;
433
434         visual_confirm_enabled = dc->debug.visual_confirm == VISUAL_CONFIRM_FAMS;
435
436         // Format command.
437         cmd.fw_assisted_mclk_switch.header.type = DMUB_CMD__FW_ASSISTED_MCLK_SWITCH;
438         cmd.fw_assisted_mclk_switch.header.sub_type = DMUB_CMD__FAMS_SETUP_FW_CTRL;
439         cmd.fw_assisted_mclk_switch.config_data.fams_enabled = should_manage_pstate;
440         cmd.fw_assisted_mclk_switch.config_data.visual_confirm_enabled = visual_confirm_enabled;
441
442         if (should_manage_pstate) {
443                 for (i = 0, pipe_idx = 0; i < dc->res_pool->pipe_count; i++) {
444                         struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i];
445
446                         if (!pipe->stream)
447                                 continue;
448
449                         /* If FAMS is being used to support P-State and there is a stream
450                          * that does not use FAMS, we are in an FPO + VActive scenario.
451                          * Assign vactive stretch margin in this case.
452                          */
453                         if (!pipe->stream->fpo_in_use) {
454                                 cmd.fw_assisted_mclk_switch.config_data.vactive_stretch_margin_us = dc->debug.fpo_vactive_margin_us;
455                                 break;
456                         }
457                         pipe_idx++;
458                 }
459         }
460
461         for (i = 0, k = 0; context && i < dc->res_pool->pipe_count; i++) {
462                 struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i];
463
464                 if (resource_is_pipe_type(pipe, OTG_MASTER) && pipe->stream->fpo_in_use) {
465                         struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i];
466                         uint8_t min_refresh_in_hz = (pipe->stream->timing.min_refresh_in_uhz + 999999) / 1000000;
467
468                         config_data->pipe_data[k].pix_clk_100hz = pipe->stream->timing.pix_clk_100hz;
469                         config_data->pipe_data[k].min_refresh_in_hz = min_refresh_in_hz;
470                         config_data->pipe_data[k].max_ramp_step = ramp_up_num_steps;
471                         config_data->pipe_data[k].pipes = dc_dmub_srv_get_pipes_for_stream(dc, pipe->stream);
472                         dc_dmub_srv_populate_fams_pipe_info(dc, context, pipe, &config_data->pipe_data[k]);
473                         k++;
474                 }
475         }
476         cmd.fw_assisted_mclk_switch.header.payload_bytes =
477                 sizeof(cmd.fw_assisted_mclk_switch) - sizeof(cmd.fw_assisted_mclk_switch.header);
478
479         // Send the command to the DMCUB.
480         dc_wake_and_execute_dmub_cmd(dc->ctx, &cmd, DM_DMUB_WAIT_TYPE_WAIT);
481
482         return true;
483 }
484
485 void dc_dmub_srv_query_caps_cmd(struct dc_dmub_srv *dc_dmub_srv)
486 {
487         union dmub_rb_cmd cmd = { 0 };
488
489         if (dc_dmub_srv->ctx->dc->debug.dmcub_emulation)
490                 return;
491
492         memset(&cmd, 0, sizeof(cmd));
493
494         /* Prepare fw command */
495         cmd.query_feature_caps.header.type = DMUB_CMD__QUERY_FEATURE_CAPS;
496         cmd.query_feature_caps.header.sub_type = 0;
497         cmd.query_feature_caps.header.ret_status = 1;
498         cmd.query_feature_caps.header.payload_bytes = sizeof(struct dmub_cmd_query_feature_caps_data);
499
500         /* If command was processed, copy feature caps to dmub srv */
501         if (dc_wake_and_execute_dmub_cmd(dc_dmub_srv->ctx, &cmd, DM_DMUB_WAIT_TYPE_WAIT_WITH_REPLY) &&
502             cmd.query_feature_caps.header.ret_status == 0) {
503                 memcpy(&dc_dmub_srv->dmub->feature_caps,
504                        &cmd.query_feature_caps.query_feature_caps_data,
505                        sizeof(struct dmub_feature_caps));
506         }
507 }
508
509 void dc_dmub_srv_get_visual_confirm_color_cmd(struct dc *dc, struct pipe_ctx *pipe_ctx)
510 {
511         union dmub_rb_cmd cmd = { 0 };
512         unsigned int panel_inst = 0;
513
514         dc_get_edp_link_panel_inst(dc, pipe_ctx->stream->link, &panel_inst);
515
516         memset(&cmd, 0, sizeof(cmd));
517
518         // Prepare fw command
519         cmd.visual_confirm_color.header.type = DMUB_CMD__GET_VISUAL_CONFIRM_COLOR;
520         cmd.visual_confirm_color.header.sub_type = 0;
521         cmd.visual_confirm_color.header.ret_status = 1;
522         cmd.visual_confirm_color.header.payload_bytes = sizeof(struct dmub_cmd_visual_confirm_color_data);
523         cmd.visual_confirm_color.visual_confirm_color_data.visual_confirm_color.panel_inst = panel_inst;
524
525         // If command was processed, copy feature caps to dmub srv
526         if (dc_wake_and_execute_dmub_cmd(dc->ctx, &cmd, DM_DMUB_WAIT_TYPE_WAIT_WITH_REPLY) &&
527                 cmd.visual_confirm_color.header.ret_status == 0) {
528                 memcpy(&dc->ctx->dmub_srv->dmub->visual_confirm_color,
529                         &cmd.visual_confirm_color.visual_confirm_color_data,
530                         sizeof(struct dmub_visual_confirm_color));
531         }
532 }
533
534 /**
535  * populate_subvp_cmd_drr_info - Helper to populate DRR pipe info for the DMCUB subvp command
536  *
537  * @dc: [in] pointer to dc object
538  * @subvp_pipe: [in] pipe_ctx for the SubVP pipe
539  * @vblank_pipe: [in] pipe_ctx for the DRR pipe
540  * @pipe_data: [in] Pipe data which stores the VBLANK/DRR info
541  * @context: [in] DC state for access to phantom stream
542  *
543  * Populate the DMCUB SubVP command with DRR pipe info. All the information
544  * required for calculating the SubVP + DRR microschedule is populated here.
545  *
546  * High level algorithm:
547  * 1. Get timing for SubVP pipe, phantom pipe, and DRR pipe
548  * 2. Calculate the min and max vtotal which supports SubVP + DRR microschedule
549  * 3. Populate the drr_info with the min and max supported vtotal values
550  */
551 static void populate_subvp_cmd_drr_info(struct dc *dc,
552                 struct dc_state *context,
553                 struct pipe_ctx *subvp_pipe,
554                 struct pipe_ctx *vblank_pipe,
555                 struct dmub_cmd_fw_assisted_mclk_switch_pipe_data_v2 *pipe_data)
556 {
557         struct dc_stream_state *phantom_stream = dc_state_get_paired_subvp_stream(context, subvp_pipe->stream);
558         struct dc_crtc_timing *main_timing = &subvp_pipe->stream->timing;
559         struct dc_crtc_timing *phantom_timing = &phantom_stream->timing;
560         struct dc_crtc_timing *drr_timing = &vblank_pipe->stream->timing;
561         uint16_t drr_frame_us = 0;
562         uint16_t min_drr_supported_us = 0;
563         uint16_t max_drr_supported_us = 0;
564         uint16_t max_drr_vblank_us = 0;
565         uint16_t max_drr_mallregion_us = 0;
566         uint16_t mall_region_us = 0;
567         uint16_t prefetch_us = 0;
568         uint16_t subvp_active_us = 0;
569         uint16_t drr_active_us = 0;
570         uint16_t min_vtotal_supported = 0;
571         uint16_t max_vtotal_supported = 0;
572
573         pipe_data->pipe_config.vblank_data.drr_info.drr_in_use = true;
574         pipe_data->pipe_config.vblank_data.drr_info.use_ramping = false; // for now don't use ramping
575         pipe_data->pipe_config.vblank_data.drr_info.drr_window_size_ms = 4; // hardcode 4ms DRR window for now
576
577         drr_frame_us = div64_u64(((uint64_t)drr_timing->v_total * drr_timing->h_total * 1000000),
578                         (((uint64_t)drr_timing->pix_clk_100hz * 100)));
579         // P-State allow width and FW delays already included phantom_timing->v_addressable
580         mall_region_us = div64_u64(((uint64_t)phantom_timing->v_addressable * phantom_timing->h_total * 1000000),
581                         (((uint64_t)phantom_timing->pix_clk_100hz * 100)));
582         min_drr_supported_us = drr_frame_us + mall_region_us + SUBVP_DRR_MARGIN_US;
583         min_vtotal_supported = div64_u64(((uint64_t)drr_timing->pix_clk_100hz * 100 * min_drr_supported_us),
584                         (((uint64_t)drr_timing->h_total * 1000000)));
585
586         prefetch_us = div64_u64(((uint64_t)(phantom_timing->v_total - phantom_timing->v_front_porch) * phantom_timing->h_total * 1000000),
587                         (((uint64_t)phantom_timing->pix_clk_100hz * 100) + dc->caps.subvp_prefetch_end_to_mall_start_us));
588         subvp_active_us = div64_u64(((uint64_t)main_timing->v_addressable * main_timing->h_total * 1000000),
589                         (((uint64_t)main_timing->pix_clk_100hz * 100)));
590         drr_active_us = div64_u64(((uint64_t)drr_timing->v_addressable * drr_timing->h_total * 1000000),
591                         (((uint64_t)drr_timing->pix_clk_100hz * 100)));
592         max_drr_vblank_us = div64_u64((subvp_active_us - prefetch_us -
593                         dc->caps.subvp_fw_processing_delay_us - drr_active_us), 2) + drr_active_us;
594         max_drr_mallregion_us = subvp_active_us - prefetch_us - mall_region_us - dc->caps.subvp_fw_processing_delay_us;
595         max_drr_supported_us = max_drr_vblank_us > max_drr_mallregion_us ? max_drr_vblank_us : max_drr_mallregion_us;
596         max_vtotal_supported = div64_u64(((uint64_t)drr_timing->pix_clk_100hz * 100 * max_drr_supported_us),
597                         (((uint64_t)drr_timing->h_total * 1000000)));
598
599         /* When calculating the max vtotal supported for SubVP + DRR cases, add
600          * margin due to possible rounding errors (being off by 1 line in the
601          * FW calculation can incorrectly push the P-State switch to wait 1 frame
602          * longer).
603          */
604         max_vtotal_supported = max_vtotal_supported - dc->caps.subvp_drr_max_vblank_margin_us;
605
606         pipe_data->pipe_config.vblank_data.drr_info.min_vtotal_supported = min_vtotal_supported;
607         pipe_data->pipe_config.vblank_data.drr_info.max_vtotal_supported = max_vtotal_supported;
608         pipe_data->pipe_config.vblank_data.drr_info.drr_vblank_start_margin = dc->caps.subvp_drr_vblank_start_margin_us;
609 }
610
611 /**
612  * populate_subvp_cmd_vblank_pipe_info - Helper to populate VBLANK pipe info for the DMUB subvp command
613  *
614  * @dc: [in] current dc state
615  * @context: [in] new dc state
616  * @cmd: [in] DMUB cmd to be populated with SubVP info
617  * @vblank_pipe: [in] pipe_ctx for the VBLANK pipe
618  * @cmd_pipe_index: [in] index for the pipe array in DMCUB SubVP cmd
619  *
620  * Populate the DMCUB SubVP command with VBLANK pipe info. All the information
621  * required to calculate the microschedule for SubVP + VBLANK case is stored in
622  * the pipe_data (subvp_data and vblank_data).  Also check if the VBLANK pipe
623  * is a DRR display -- if it is make a call to populate drr_info.
624  */
625 static void populate_subvp_cmd_vblank_pipe_info(struct dc *dc,
626                 struct dc_state *context,
627                 union dmub_rb_cmd *cmd,
628                 struct pipe_ctx *vblank_pipe,
629                 uint8_t cmd_pipe_index)
630 {
631         uint32_t i;
632         struct pipe_ctx *pipe = NULL;
633         struct dmub_cmd_fw_assisted_mclk_switch_pipe_data_v2 *pipe_data =
634                         &cmd->fw_assisted_mclk_switch_v2.config_data.pipe_data[cmd_pipe_index];
635
636         // Find the SubVP pipe
637         for (i = 0; i < dc->res_pool->pipe_count; i++) {
638                 pipe = &context->res_ctx.pipe_ctx[i];
639
640                 // We check for master pipe, but it shouldn't matter since we only need
641                 // the pipe for timing info (stream should be same for any pipe splits)
642                 if (!resource_is_pipe_type(pipe, OTG_MASTER) ||
643                                 !resource_is_pipe_type(pipe, DPP_PIPE))
644                         continue;
645
646                 // Find the SubVP pipe
647                 if (dc_state_get_pipe_subvp_type(context, pipe) == SUBVP_MAIN)
648                         break;
649         }
650
651         pipe_data->mode = VBLANK;
652         pipe_data->pipe_config.vblank_data.pix_clk_100hz = vblank_pipe->stream->timing.pix_clk_100hz;
653         pipe_data->pipe_config.vblank_data.vblank_start = vblank_pipe->stream->timing.v_total -
654                                                         vblank_pipe->stream->timing.v_front_porch;
655         pipe_data->pipe_config.vblank_data.vtotal = vblank_pipe->stream->timing.v_total;
656         pipe_data->pipe_config.vblank_data.htotal = vblank_pipe->stream->timing.h_total;
657         pipe_data->pipe_config.vblank_data.vblank_pipe_index = vblank_pipe->pipe_idx;
658         pipe_data->pipe_config.vblank_data.vstartup_start = vblank_pipe->pipe_dlg_param.vstartup_start;
659         pipe_data->pipe_config.vblank_data.vblank_end =
660                         vblank_pipe->stream->timing.v_total - vblank_pipe->stream->timing.v_front_porch - vblank_pipe->stream->timing.v_addressable;
661
662         if (vblank_pipe->stream->ignore_msa_timing_param &&
663                 (vblank_pipe->stream->allow_freesync || vblank_pipe->stream->vrr_active_variable || vblank_pipe->stream->vrr_active_fixed))
664                 populate_subvp_cmd_drr_info(dc, context, pipe, vblank_pipe, pipe_data);
665 }
666
667 /**
668  * update_subvp_prefetch_end_to_mall_start - Helper for SubVP + SubVP case
669  *
670  * @dc: [in] current dc state
671  * @context: [in] new dc state
672  * @cmd: [in] DMUB cmd to be populated with SubVP info
673  * @subvp_pipes: [in] Array of SubVP pipes (should always be length 2)
674  *
675  * For SubVP + SubVP, we use a single vertical interrupt to start the
676  * microschedule for both SubVP pipes. In order for this to work correctly, the
677  * MALL REGION of both SubVP pipes must start at the same time. This function
678  * lengthens the prefetch end to mall start delay of the SubVP pipe that has
679  * the shorter prefetch so that both MALL REGION's will start at the same time.
680  */
681 static void update_subvp_prefetch_end_to_mall_start(struct dc *dc,
682                 struct dc_state *context,
683                 union dmub_rb_cmd *cmd,
684                 struct pipe_ctx *subvp_pipes[])
685 {
686         uint32_t subvp0_prefetch_us = 0;
687         uint32_t subvp1_prefetch_us = 0;
688         uint32_t prefetch_delta_us = 0;
689         struct dc_stream_state *phantom_stream0 = NULL;
690         struct dc_stream_state *phantom_stream1 = NULL;
691         struct dc_crtc_timing *phantom_timing0 = NULL;
692         struct dc_crtc_timing *phantom_timing1 = NULL;
693         struct dmub_cmd_fw_assisted_mclk_switch_pipe_data_v2 *pipe_data = NULL;
694
695         phantom_stream0 = dc_state_get_paired_subvp_stream(context, subvp_pipes[0]->stream);
696         phantom_stream1 = dc_state_get_paired_subvp_stream(context, subvp_pipes[1]->stream);
697         phantom_timing0 = &phantom_stream0->timing;
698         phantom_timing1 = &phantom_stream1->timing;
699
700         subvp0_prefetch_us = div64_u64(((uint64_t)(phantom_timing0->v_total - phantom_timing0->v_front_porch) *
701                         (uint64_t)phantom_timing0->h_total * 1000000),
702                         (((uint64_t)phantom_timing0->pix_clk_100hz * 100) + dc->caps.subvp_prefetch_end_to_mall_start_us));
703         subvp1_prefetch_us = div64_u64(((uint64_t)(phantom_timing1->v_total - phantom_timing1->v_front_porch) *
704                         (uint64_t)phantom_timing1->h_total * 1000000),
705                         (((uint64_t)phantom_timing1->pix_clk_100hz * 100) + dc->caps.subvp_prefetch_end_to_mall_start_us));
706
707         // Whichever SubVP PIPE has the smaller prefetch (including the prefetch end to mall start time)
708         // should increase it's prefetch time to match the other
709         if (subvp0_prefetch_us > subvp1_prefetch_us) {
710                 pipe_data = &cmd->fw_assisted_mclk_switch_v2.config_data.pipe_data[1];
711                 prefetch_delta_us = subvp0_prefetch_us - subvp1_prefetch_us;
712                 pipe_data->pipe_config.subvp_data.prefetch_to_mall_start_lines =
713                                 div64_u64(((uint64_t)(dc->caps.subvp_prefetch_end_to_mall_start_us + prefetch_delta_us) *
714                                         ((uint64_t)phantom_timing1->pix_clk_100hz * 100) + ((uint64_t)phantom_timing1->h_total * 1000000 - 1)),
715                                         ((uint64_t)phantom_timing1->h_total * 1000000));
716
717         } else if (subvp1_prefetch_us >  subvp0_prefetch_us) {
718                 pipe_data = &cmd->fw_assisted_mclk_switch_v2.config_data.pipe_data[0];
719                 prefetch_delta_us = subvp1_prefetch_us - subvp0_prefetch_us;
720                 pipe_data->pipe_config.subvp_data.prefetch_to_mall_start_lines =
721                                 div64_u64(((uint64_t)(dc->caps.subvp_prefetch_end_to_mall_start_us + prefetch_delta_us) *
722                                         ((uint64_t)phantom_timing0->pix_clk_100hz * 100) + ((uint64_t)phantom_timing0->h_total * 1000000 - 1)),
723                                         ((uint64_t)phantom_timing0->h_total * 1000000));
724         }
725 }
726
727 /**
728  * populate_subvp_cmd_pipe_info - Helper to populate the SubVP pipe info for the DMUB subvp command
729  *
730  * @dc: [in] current dc state
731  * @context: [in] new dc state
732  * @cmd: [in] DMUB cmd to be populated with SubVP info
733  * @subvp_pipe: [in] pipe_ctx for the SubVP pipe
734  * @cmd_pipe_index: [in] index for the pipe array in DMCUB SubVP cmd
735  *
736  * Populate the DMCUB SubVP command with SubVP pipe info. All the information
737  * required to calculate the microschedule for the SubVP pipe is stored in the
738  * pipe_data of the DMCUB SubVP command.
739  */
740 static void populate_subvp_cmd_pipe_info(struct dc *dc,
741                 struct dc_state *context,
742                 union dmub_rb_cmd *cmd,
743                 struct pipe_ctx *subvp_pipe,
744                 uint8_t cmd_pipe_index)
745 {
746         uint32_t j;
747         struct dmub_cmd_fw_assisted_mclk_switch_pipe_data_v2 *pipe_data =
748                         &cmd->fw_assisted_mclk_switch_v2.config_data.pipe_data[cmd_pipe_index];
749         struct dc_stream_state *phantom_stream = dc_state_get_paired_subvp_stream(context, subvp_pipe->stream);
750         struct dc_crtc_timing *main_timing = &subvp_pipe->stream->timing;
751         struct dc_crtc_timing *phantom_timing = &phantom_stream->timing;
752         uint32_t out_num_stream, out_den_stream, out_num_plane, out_den_plane, out_num, out_den;
753
754         pipe_data->mode = SUBVP;
755         pipe_data->pipe_config.subvp_data.pix_clk_100hz = subvp_pipe->stream->timing.pix_clk_100hz;
756         pipe_data->pipe_config.subvp_data.htotal = subvp_pipe->stream->timing.h_total;
757         pipe_data->pipe_config.subvp_data.vtotal = subvp_pipe->stream->timing.v_total;
758         pipe_data->pipe_config.subvp_data.main_vblank_start =
759                         main_timing->v_total - main_timing->v_front_porch;
760         pipe_data->pipe_config.subvp_data.main_vblank_end =
761                         main_timing->v_total - main_timing->v_front_porch - main_timing->v_addressable;
762         pipe_data->pipe_config.subvp_data.mall_region_lines = phantom_timing->v_addressable;
763         pipe_data->pipe_config.subvp_data.main_pipe_index = subvp_pipe->stream_res.tg->inst;
764         pipe_data->pipe_config.subvp_data.is_drr = subvp_pipe->stream->ignore_msa_timing_param &&
765                 (subvp_pipe->stream->allow_freesync || subvp_pipe->stream->vrr_active_variable || subvp_pipe->stream->vrr_active_fixed);
766
767         /* Calculate the scaling factor from the src and dst height.
768          * e.g. If 3840x2160 being downscaled to 1920x1080, the scaling factor is 1/2.
769          * Reduce the fraction 1080/2160 = 1/2 for the "scaling factor"
770          *
771          * Make sure to combine stream and plane scaling together.
772          */
773         reduce_fraction(subvp_pipe->stream->src.height, subvp_pipe->stream->dst.height,
774                         &out_num_stream, &out_den_stream);
775         reduce_fraction(subvp_pipe->plane_state->src_rect.height, subvp_pipe->plane_state->dst_rect.height,
776                         &out_num_plane, &out_den_plane);
777         reduce_fraction(out_num_stream * out_num_plane, out_den_stream * out_den_plane, &out_num, &out_den);
778         pipe_data->pipe_config.subvp_data.scale_factor_numerator = out_num;
779         pipe_data->pipe_config.subvp_data.scale_factor_denominator = out_den;
780
781         // Prefetch lines is equal to VACTIVE + BP + VSYNC
782         pipe_data->pipe_config.subvp_data.prefetch_lines =
783                         phantom_timing->v_total - phantom_timing->v_front_porch;
784
785         // Round up
786         pipe_data->pipe_config.subvp_data.prefetch_to_mall_start_lines =
787                         div64_u64(((uint64_t)dc->caps.subvp_prefetch_end_to_mall_start_us * ((uint64_t)phantom_timing->pix_clk_100hz * 100) +
788                                         ((uint64_t)phantom_timing->h_total * 1000000 - 1)), ((uint64_t)phantom_timing->h_total * 1000000));
789         pipe_data->pipe_config.subvp_data.processing_delay_lines =
790                         div64_u64(((uint64_t)(dc->caps.subvp_fw_processing_delay_us) * ((uint64_t)phantom_timing->pix_clk_100hz * 100) +
791                                         ((uint64_t)phantom_timing->h_total * 1000000 - 1)), ((uint64_t)phantom_timing->h_total * 1000000));
792
793         if (subvp_pipe->bottom_pipe) {
794                 pipe_data->pipe_config.subvp_data.main_split_pipe_index = subvp_pipe->bottom_pipe->pipe_idx;
795         } else if (subvp_pipe->next_odm_pipe) {
796                 pipe_data->pipe_config.subvp_data.main_split_pipe_index = subvp_pipe->next_odm_pipe->pipe_idx;
797         } else {
798                 pipe_data->pipe_config.subvp_data.main_split_pipe_index = 0xF;
799         }
800
801         // Find phantom pipe index based on phantom stream
802         for (j = 0; j < dc->res_pool->pipe_count; j++) {
803                 struct pipe_ctx *phantom_pipe = &context->res_ctx.pipe_ctx[j];
804
805                 if (resource_is_pipe_type(phantom_pipe, OTG_MASTER) &&
806                                 phantom_pipe->stream == dc_state_get_paired_subvp_stream(context, subvp_pipe->stream)) {
807                         pipe_data->pipe_config.subvp_data.phantom_pipe_index = phantom_pipe->stream_res.tg->inst;
808                         if (phantom_pipe->bottom_pipe) {
809                                 pipe_data->pipe_config.subvp_data.phantom_split_pipe_index = phantom_pipe->bottom_pipe->plane_res.hubp->inst;
810                         } else if (phantom_pipe->next_odm_pipe) {
811                                 pipe_data->pipe_config.subvp_data.phantom_split_pipe_index = phantom_pipe->next_odm_pipe->plane_res.hubp->inst;
812                         } else {
813                                 pipe_data->pipe_config.subvp_data.phantom_split_pipe_index = 0xF;
814                         }
815                         break;
816                 }
817         }
818 }
819
820 /**
821  * dc_dmub_setup_subvp_dmub_command - Populate the DMCUB SubVP command
822  *
823  * @dc: [in] current dc state
824  * @context: [in] new dc state
825  * @enable: [in] if true enables the pipes population
826  *
827  * This function loops through each pipe and populates the DMUB SubVP CMD info
828  * based on the pipe (e.g. SubVP, VBLANK).
829  */
830 void dc_dmub_setup_subvp_dmub_command(struct dc *dc,
831                 struct dc_state *context,
832                 bool enable)
833 {
834         uint8_t cmd_pipe_index = 0;
835         uint32_t i, pipe_idx;
836         uint8_t subvp_count = 0;
837         union dmub_rb_cmd cmd;
838         struct pipe_ctx *subvp_pipes[2];
839         uint32_t wm_val_refclk = 0;
840         enum mall_stream_type pipe_mall_type;
841
842         memset(&cmd, 0, sizeof(cmd));
843         // FW command for SUBVP
844         cmd.fw_assisted_mclk_switch_v2.header.type = DMUB_CMD__FW_ASSISTED_MCLK_SWITCH;
845         cmd.fw_assisted_mclk_switch_v2.header.sub_type = DMUB_CMD__HANDLE_SUBVP_CMD;
846         cmd.fw_assisted_mclk_switch_v2.header.payload_bytes =
847                         sizeof(cmd.fw_assisted_mclk_switch_v2) - sizeof(cmd.fw_assisted_mclk_switch_v2.header);
848
849         for (i = 0; i < dc->res_pool->pipe_count; i++) {
850                 struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i];
851
852                 /* For SubVP pipe count, only count the top most (ODM / MPC) pipe
853                  */
854                 if (resource_is_pipe_type(pipe, OTG_MASTER) &&
855                                 resource_is_pipe_type(pipe, DPP_PIPE) &&
856                                 dc_state_get_pipe_subvp_type(context, pipe) == SUBVP_MAIN)
857                         subvp_pipes[subvp_count++] = pipe;
858         }
859
860         if (enable) {
861                 // For each pipe that is a "main" SUBVP pipe, fill in pipe data for DMUB SUBVP cmd
862                 for (i = 0, pipe_idx = 0; i < dc->res_pool->pipe_count; i++) {
863                         struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i];
864                         pipe_mall_type = dc_state_get_pipe_subvp_type(context, pipe);
865
866                         if (!pipe->stream)
867                                 continue;
868
869                         /* When populating subvp cmd info, only pass in the top most (ODM / MPC) pipe.
870                          * Any ODM or MPC splits being used in SubVP will be handled internally in
871                          * populate_subvp_cmd_pipe_info
872                          */
873                         if (resource_is_pipe_type(pipe, OTG_MASTER) &&
874                                         resource_is_pipe_type(pipe, DPP_PIPE) &&
875                                         pipe_mall_type == SUBVP_MAIN) {
876                                 populate_subvp_cmd_pipe_info(dc, context, &cmd, pipe, cmd_pipe_index++);
877                         } else if (resource_is_pipe_type(pipe, OTG_MASTER) &&
878                                         resource_is_pipe_type(pipe, DPP_PIPE) &&
879                                         pipe_mall_type == SUBVP_NONE) {
880                                 // Don't need to check for ActiveDRAMClockChangeMargin < 0, not valid in cases where
881                                 // we run through DML without calculating "natural" P-state support
882                                 populate_subvp_cmd_vblank_pipe_info(dc, context, &cmd, pipe, cmd_pipe_index++);
883
884                         }
885                         pipe_idx++;
886                 }
887                 if (subvp_count == 2) {
888                         update_subvp_prefetch_end_to_mall_start(dc, context, &cmd, subvp_pipes);
889                 }
890                 cmd.fw_assisted_mclk_switch_v2.config_data.pstate_allow_width_us = dc->caps.subvp_pstate_allow_width_us;
891                 cmd.fw_assisted_mclk_switch_v2.config_data.vertical_int_margin_us = dc->caps.subvp_vertical_int_margin_us;
892
893                 // Store the original watermark value for this SubVP config so we can lower it when the
894                 // MCLK switch starts
895                 wm_val_refclk = context->bw_ctx.bw.dcn.watermarks.a.cstate_pstate.pstate_change_ns *
896                                 (dc->res_pool->ref_clocks.dchub_ref_clock_inKhz / 1000) / 1000;
897
898                 cmd.fw_assisted_mclk_switch_v2.config_data.watermark_a_cache = wm_val_refclk < 0xFFFF ? wm_val_refclk : 0xFFFF;
899         }
900
901         dc_wake_and_execute_dmub_cmd(dc->ctx, &cmd, DM_DMUB_WAIT_TYPE_WAIT);
902 }
903
904 bool dc_dmub_srv_get_diagnostic_data(struct dc_dmub_srv *dc_dmub_srv, struct dmub_diagnostic_data *diag_data)
905 {
906         if (!dc_dmub_srv || !dc_dmub_srv->dmub || !diag_data)
907                 return false;
908         return dmub_srv_get_diagnostic_data(dc_dmub_srv->dmub, diag_data);
909 }
910
911 void dc_dmub_srv_log_diagnostic_data(struct dc_dmub_srv *dc_dmub_srv)
912 {
913         struct dmub_diagnostic_data diag_data = {0};
914         uint32_t i;
915
916         if (!dc_dmub_srv || !dc_dmub_srv->dmub) {
917                 DC_LOG_ERROR("%s: invalid parameters.", __func__);
918                 return;
919         }
920
921         DC_LOG_ERROR("%s: DMCUB error - collecting diagnostic data\n", __func__);
922
923         if (!dc_dmub_srv_get_diagnostic_data(dc_dmub_srv, &diag_data)) {
924                 DC_LOG_ERROR("%s: dc_dmub_srv_get_diagnostic_data failed.", __func__);
925                 return;
926         }
927
928         DC_LOG_DEBUG("DMCUB STATE:");
929         DC_LOG_DEBUG("    dmcub_version      : %08x", diag_data.dmcub_version);
930         DC_LOG_DEBUG("    scratch  [0]       : %08x", diag_data.scratch[0]);
931         DC_LOG_DEBUG("    scratch  [1]       : %08x", diag_data.scratch[1]);
932         DC_LOG_DEBUG("    scratch  [2]       : %08x", diag_data.scratch[2]);
933         DC_LOG_DEBUG("    scratch  [3]       : %08x", diag_data.scratch[3]);
934         DC_LOG_DEBUG("    scratch  [4]       : %08x", diag_data.scratch[4]);
935         DC_LOG_DEBUG("    scratch  [5]       : %08x", diag_data.scratch[5]);
936         DC_LOG_DEBUG("    scratch  [6]       : %08x", diag_data.scratch[6]);
937         DC_LOG_DEBUG("    scratch  [7]       : %08x", diag_data.scratch[7]);
938         DC_LOG_DEBUG("    scratch  [8]       : %08x", diag_data.scratch[8]);
939         DC_LOG_DEBUG("    scratch  [9]       : %08x", diag_data.scratch[9]);
940         DC_LOG_DEBUG("    scratch [10]       : %08x", diag_data.scratch[10]);
941         DC_LOG_DEBUG("    scratch [11]       : %08x", diag_data.scratch[11]);
942         DC_LOG_DEBUG("    scratch [12]       : %08x", diag_data.scratch[12]);
943         DC_LOG_DEBUG("    scratch [13]       : %08x", diag_data.scratch[13]);
944         DC_LOG_DEBUG("    scratch [14]       : %08x", diag_data.scratch[14]);
945         DC_LOG_DEBUG("    scratch [15]       : %08x", diag_data.scratch[15]);
946         for (i = 0; i < DMUB_PC_SNAPSHOT_COUNT; i++)
947                 DC_LOG_DEBUG("    pc[%d]             : %08x", i, diag_data.pc[i]);
948         DC_LOG_DEBUG("    unk_fault_addr     : %08x", diag_data.undefined_address_fault_addr);
949         DC_LOG_DEBUG("    inst_fault_addr    : %08x", diag_data.inst_fetch_fault_addr);
950         DC_LOG_DEBUG("    data_fault_addr    : %08x", diag_data.data_write_fault_addr);
951         DC_LOG_DEBUG("    inbox1_rptr        : %08x", diag_data.inbox1_rptr);
952         DC_LOG_DEBUG("    inbox1_wptr        : %08x", diag_data.inbox1_wptr);
953         DC_LOG_DEBUG("    inbox1_size        : %08x", diag_data.inbox1_size);
954         DC_LOG_DEBUG("    inbox0_rptr        : %08x", diag_data.inbox0_rptr);
955         DC_LOG_DEBUG("    inbox0_wptr        : %08x", diag_data.inbox0_wptr);
956         DC_LOG_DEBUG("    inbox0_size        : %08x", diag_data.inbox0_size);
957         DC_LOG_DEBUG("    is_enabled         : %d", diag_data.is_dmcub_enabled);
958         DC_LOG_DEBUG("    is_soft_reset      : %d", diag_data.is_dmcub_soft_reset);
959         DC_LOG_DEBUG("    is_secure_reset    : %d", diag_data.is_dmcub_secure_reset);
960         DC_LOG_DEBUG("    is_traceport_en    : %d", diag_data.is_traceport_en);
961         DC_LOG_DEBUG("    is_cw0_en          : %d", diag_data.is_cw0_enabled);
962         DC_LOG_DEBUG("    is_cw6_en          : %d", diag_data.is_cw6_enabled);
963 }
964
965 static bool dc_can_pipe_disable_cursor(struct pipe_ctx *pipe_ctx)
966 {
967         struct pipe_ctx *test_pipe, *split_pipe;
968         const struct scaler_data *scl_data = &pipe_ctx->plane_res.scl_data;
969         struct rect r1 = scl_data->recout, r2, r2_half;
970         int r1_r = r1.x + r1.width, r1_b = r1.y + r1.height, r2_r, r2_b;
971         int cur_layer = pipe_ctx->plane_state->layer_index;
972
973         /**
974          * Disable the cursor if there's another pipe above this with a
975          * plane that contains this pipe's viewport to prevent double cursor
976          * and incorrect scaling artifacts.
977          */
978         for (test_pipe = pipe_ctx->top_pipe; test_pipe;
979              test_pipe = test_pipe->top_pipe) {
980                 // Skip invisible layer and pipe-split plane on same layer
981                 if (!test_pipe->plane_state->visible || test_pipe->plane_state->layer_index == cur_layer)
982                         continue;
983
984                 r2 = test_pipe->plane_res.scl_data.recout;
985                 r2_r = r2.x + r2.width;
986                 r2_b = r2.y + r2.height;
987                 split_pipe = test_pipe;
988
989                 /**
990                  * There is another half plane on same layer because of
991                  * pipe-split, merge together per same height.
992                  */
993                 for (split_pipe = pipe_ctx->top_pipe; split_pipe;
994                      split_pipe = split_pipe->top_pipe)
995                         if (split_pipe->plane_state->layer_index == test_pipe->plane_state->layer_index) {
996                                 r2_half = split_pipe->plane_res.scl_data.recout;
997                                 r2.x = (r2_half.x < r2.x) ? r2_half.x : r2.x;
998                                 r2.width = r2.width + r2_half.width;
999                                 r2_r = r2.x + r2.width;
1000                                 break;
1001                         }
1002
1003                 if (r1.x >= r2.x && r1.y >= r2.y && r1_r <= r2_r && r1_b <= r2_b)
1004                         return true;
1005         }
1006
1007         return false;
1008 }
1009
1010 static bool dc_dmub_should_update_cursor_data(struct pipe_ctx *pipe_ctx)
1011 {
1012         if (pipe_ctx->plane_state != NULL) {
1013                 if (pipe_ctx->plane_state->address.type == PLN_ADDR_TYPE_VIDEO_PROGRESSIVE)
1014                         return false;
1015
1016                 if (dc_can_pipe_disable_cursor(pipe_ctx))
1017                         return false;
1018         }
1019
1020         if ((pipe_ctx->stream->link->psr_settings.psr_version == DC_PSR_VERSION_SU_1 ||
1021                 pipe_ctx->stream->link->psr_settings.psr_version == DC_PSR_VERSION_1) &&
1022                 pipe_ctx->stream->ctx->dce_version >= DCN_VERSION_3_1)
1023                 return true;
1024
1025         if (pipe_ctx->stream->link->replay_settings.config.replay_supported)
1026                 return true;
1027
1028         return false;
1029 }
1030
1031 static void dc_build_cursor_update_payload0(
1032                 struct pipe_ctx *pipe_ctx, uint8_t p_idx,
1033                 struct dmub_cmd_update_cursor_payload0 *payload)
1034 {
1035         struct hubp *hubp = pipe_ctx->plane_res.hubp;
1036         unsigned int panel_inst = 0;
1037
1038         if (!dc_get_edp_link_panel_inst(hubp->ctx->dc,
1039                 pipe_ctx->stream->link, &panel_inst))
1040                 return;
1041
1042         /* Payload: Cursor Rect is built from position & attribute
1043          * x & y are obtained from postion
1044          */
1045         payload->cursor_rect.x = hubp->cur_rect.x;
1046         payload->cursor_rect.y = hubp->cur_rect.y;
1047         /* w & h are obtained from attribute */
1048         payload->cursor_rect.width  = hubp->cur_rect.w;
1049         payload->cursor_rect.height = hubp->cur_rect.h;
1050
1051         payload->enable      = hubp->pos.cur_ctl.bits.cur_enable;
1052         payload->pipe_idx    = p_idx;
1053         payload->cmd_version = DMUB_CMD_PSR_CONTROL_VERSION_1;
1054         payload->panel_inst  = panel_inst;
1055 }
1056
1057 static void dc_build_cursor_position_update_payload0(
1058                 struct dmub_cmd_update_cursor_payload0 *pl, const uint8_t p_idx,
1059                 const struct hubp *hubp, const struct dpp *dpp)
1060 {
1061         /* Hubp */
1062         pl->position_cfg.pHubp.cur_ctl.raw  = hubp->pos.cur_ctl.raw;
1063         pl->position_cfg.pHubp.position.raw = hubp->pos.position.raw;
1064         pl->position_cfg.pHubp.hot_spot.raw = hubp->pos.hot_spot.raw;
1065         pl->position_cfg.pHubp.dst_offset.raw = hubp->pos.dst_offset.raw;
1066
1067         /* dpp */
1068         pl->position_cfg.pDpp.cur0_ctl.raw = dpp->pos.cur0_ctl.raw;
1069         pl->position_cfg.pipe_idx = p_idx;
1070 }
1071
1072 static void dc_build_cursor_attribute_update_payload1(
1073                 struct dmub_cursor_attributes_cfg *pl_A, const uint8_t p_idx,
1074                 const struct hubp *hubp, const struct dpp *dpp)
1075 {
1076         /* Hubp */
1077         pl_A->aHubp.SURFACE_ADDR_HIGH = hubp->att.SURFACE_ADDR_HIGH;
1078         pl_A->aHubp.SURFACE_ADDR = hubp->att.SURFACE_ADDR;
1079         pl_A->aHubp.cur_ctl.raw  = hubp->att.cur_ctl.raw;
1080         pl_A->aHubp.size.raw     = hubp->att.size.raw;
1081         pl_A->aHubp.settings.raw = hubp->att.settings.raw;
1082
1083         /* dpp */
1084         pl_A->aDpp.cur0_ctl.raw = dpp->att.cur0_ctl.raw;
1085 }
1086
1087 /**
1088  * dc_send_update_cursor_info_to_dmu - Populate the DMCUB Cursor update info command
1089  *
1090  * @pCtx: [in] pipe context
1091  * @pipe_idx: [in] pipe index
1092  *
1093  * This function would store the cursor related information and pass it into
1094  * dmub
1095  */
1096 void dc_send_update_cursor_info_to_dmu(
1097                 struct pipe_ctx *pCtx, uint8_t pipe_idx)
1098 {
1099         union dmub_rb_cmd cmd[2];
1100         union dmub_cmd_update_cursor_info_data *update_cursor_info_0 =
1101                                         &cmd[0].update_cursor_info.update_cursor_info_data;
1102
1103         memset(cmd, 0, sizeof(cmd));
1104
1105         if (!dc_dmub_should_update_cursor_data(pCtx))
1106                 return;
1107         /*
1108          * Since we use multi_cmd_pending for dmub command, the 2nd command is
1109          * only assigned to store cursor attributes info.
1110          * 1st command can view as 2 parts, 1st is for PSR/Replay data, the other
1111          * is to store cursor position info.
1112          *
1113          * Command heaer type must be the same type if using  multi_cmd_pending.
1114          * Besides, while process 2nd command in DMU, the sub type is useless.
1115          * So it's meanless to pass the sub type header with different type.
1116          */
1117
1118         {
1119                 /* Build Payload#0 Header */
1120                 cmd[0].update_cursor_info.header.type = DMUB_CMD__UPDATE_CURSOR_INFO;
1121                 cmd[0].update_cursor_info.header.payload_bytes =
1122                                 sizeof(cmd[0].update_cursor_info.update_cursor_info_data);
1123                 cmd[0].update_cursor_info.header.multi_cmd_pending = 1; //To combine multi dmu cmd, 1st cmd
1124
1125                 /* Prepare Payload */
1126                 dc_build_cursor_update_payload0(pCtx, pipe_idx, &update_cursor_info_0->payload0);
1127
1128                 dc_build_cursor_position_update_payload0(&update_cursor_info_0->payload0, pipe_idx,
1129                                 pCtx->plane_res.hubp, pCtx->plane_res.dpp);
1130                 }
1131         {
1132                 /* Build Payload#1 Header */
1133                 cmd[1].update_cursor_info.header.type = DMUB_CMD__UPDATE_CURSOR_INFO;
1134                 cmd[1].update_cursor_info.header.payload_bytes = sizeof(struct cursor_attributes_cfg);
1135                 cmd[1].update_cursor_info.header.multi_cmd_pending = 0; //Indicate it's the last command.
1136
1137                 dc_build_cursor_attribute_update_payload1(
1138                                 &cmd[1].update_cursor_info.update_cursor_info_data.payload1.attribute_cfg,
1139                                 pipe_idx, pCtx->plane_res.hubp, pCtx->plane_res.dpp);
1140
1141                 /* Combine 2nd cmds update_curosr_info to DMU */
1142                 dc_wake_and_execute_dmub_cmd_list(pCtx->stream->ctx, 2, cmd, DM_DMUB_WAIT_TYPE_WAIT);
1143         }
1144 }
1145
1146 bool dc_dmub_check_min_version(struct dmub_srv *srv)
1147 {
1148         if (!srv->hw_funcs.is_psrsu_supported)
1149                 return true;
1150         return srv->hw_funcs.is_psrsu_supported(srv);
1151 }
1152
1153 void dc_dmub_srv_enable_dpia_trace(const struct dc *dc)
1154 {
1155         struct dc_dmub_srv *dc_dmub_srv = dc->ctx->dmub_srv;
1156
1157         if (!dc_dmub_srv || !dc_dmub_srv->dmub) {
1158                 DC_LOG_ERROR("%s: invalid parameters.", __func__);
1159                 return;
1160         }
1161
1162         if (!dc_wake_and_execute_gpint(dc->ctx, DMUB_GPINT__SET_TRACE_BUFFER_MASK_WORD1,
1163                                        0x0010, NULL, DM_DMUB_WAIT_TYPE_WAIT)) {
1164                 DC_LOG_ERROR("timeout updating trace buffer mask word\n");
1165                 return;
1166         }
1167
1168         if (!dc_wake_and_execute_gpint(dc->ctx, DMUB_GPINT__UPDATE_TRACE_BUFFER_MASK,
1169                                        0x0000, NULL, DM_DMUB_WAIT_TYPE_WAIT)) {
1170                 DC_LOG_ERROR("timeout updating trace buffer mask word\n");
1171                 return;
1172         }
1173
1174         DC_LOG_DEBUG("Enabled DPIA trace\n");
1175 }
1176
1177 void dc_dmub_srv_subvp_save_surf_addr(const struct dc_dmub_srv *dc_dmub_srv, const struct dc_plane_address *addr, uint8_t subvp_index)
1178 {
1179         dmub_srv_subvp_save_surf_addr(dc_dmub_srv->dmub, addr, subvp_index);
1180 }
1181
1182 bool dc_dmub_srv_is_hw_pwr_up(struct dc_dmub_srv *dc_dmub_srv, bool wait)
1183 {
1184         struct dc_context *dc_ctx;
1185         enum dmub_status status;
1186
1187         if (!dc_dmub_srv || !dc_dmub_srv->dmub)
1188                 return true;
1189
1190         if (dc_dmub_srv->ctx->dc->debug.dmcub_emulation)
1191                 return true;
1192
1193         dc_ctx = dc_dmub_srv->ctx;
1194
1195         if (wait) {
1196                 if (dc_dmub_srv->ctx->dc->debug.disable_timeout) {
1197                         do {
1198                                 status = dmub_srv_wait_for_hw_pwr_up(dc_dmub_srv->dmub, 500000);
1199                         } while (status != DMUB_STATUS_OK);
1200                 } else {
1201                         status = dmub_srv_wait_for_hw_pwr_up(dc_dmub_srv->dmub, 500000);
1202                         if (status != DMUB_STATUS_OK) {
1203                                 DC_ERROR("Error querying DMUB hw power up status: error=%d\n", status);
1204                                 return false;
1205                         }
1206                 }
1207         } else
1208                 return dmub_srv_is_hw_pwr_up(dc_dmub_srv->dmub);
1209
1210         return true;
1211 }
1212
1213 static int count_active_streams(const struct dc *dc)
1214 {
1215         int i, count = 0;
1216
1217         for (i = 0; i < dc->current_state->stream_count; ++i) {
1218                 struct dc_stream_state *stream = dc->current_state->streams[i];
1219
1220                 if (stream && !stream->dpms_off)
1221                         count += 1;
1222         }
1223
1224         return count;
1225 }
1226
1227 static void dc_dmub_srv_notify_idle(const struct dc *dc, bool allow_idle)
1228 {
1229         volatile const struct dmub_shared_state_ips_fw *ips_fw;
1230         struct dc_dmub_srv *dc_dmub_srv;
1231         union dmub_rb_cmd cmd = {0};
1232
1233         if (dc->debug.dmcub_emulation)
1234                 return;
1235
1236         if (!dc->ctx->dmub_srv || !dc->ctx->dmub_srv->dmub)
1237                 return;
1238
1239         dc_dmub_srv = dc->ctx->dmub_srv;
1240         ips_fw = &dc_dmub_srv->dmub->shared_state[DMUB_SHARED_SHARE_FEATURE__IPS_FW].data.ips_fw;
1241
1242         memset(&cmd, 0, sizeof(cmd));
1243         cmd.idle_opt_notify_idle.header.type = DMUB_CMD__IDLE_OPT;
1244         cmd.idle_opt_notify_idle.header.sub_type = DMUB_CMD__IDLE_OPT_DCN_NOTIFY_IDLE;
1245         cmd.idle_opt_notify_idle.header.payload_bytes =
1246                 sizeof(cmd.idle_opt_notify_idle) -
1247                 sizeof(cmd.idle_opt_notify_idle.header);
1248
1249         cmd.idle_opt_notify_idle.cntl_data.driver_idle = allow_idle;
1250
1251         if (allow_idle) {
1252                 volatile struct dmub_shared_state_ips_driver *ips_driver =
1253                         &dc_dmub_srv->dmub->shared_state[DMUB_SHARED_SHARE_FEATURE__IPS_DRIVER].data.ips_driver;
1254                 union dmub_shared_state_ips_driver_signals new_signals;
1255
1256                 DC_LOG_IPS(
1257                         "%s wait idle (ips1_commit=%d ips2_commit=%d)",
1258                         __func__,
1259                         ips_fw->signals.bits.ips1_commit,
1260                         ips_fw->signals.bits.ips2_commit);
1261
1262                 dc_dmub_srv_wait_idle(dc->ctx->dmub_srv);
1263
1264                 memset(&new_signals, 0, sizeof(new_signals));
1265
1266                 if (dc->config.disable_ips == DMUB_IPS_ENABLE ||
1267                     dc->config.disable_ips == DMUB_IPS_DISABLE_DYNAMIC) {
1268                         new_signals.bits.allow_pg = 1;
1269                         new_signals.bits.allow_ips1 = 1;
1270                         new_signals.bits.allow_ips2 = 1;
1271                         new_signals.bits.allow_z10 = 1;
1272                 } else if (dc->config.disable_ips == DMUB_IPS_DISABLE_IPS1) {
1273                         new_signals.bits.allow_ips1 = 1;
1274                 } else if (dc->config.disable_ips == DMUB_IPS_DISABLE_IPS2) {
1275                         new_signals.bits.allow_pg = 1;
1276                         new_signals.bits.allow_ips1 = 1;
1277                 } else if (dc->config.disable_ips == DMUB_IPS_DISABLE_IPS2_Z10) {
1278                         new_signals.bits.allow_pg = 1;
1279                         new_signals.bits.allow_ips1 = 1;
1280                         new_signals.bits.allow_ips2 = 1;
1281                 } else if (dc->config.disable_ips == DMUB_IPS_RCG_IN_ACTIVE_IPS2_IN_OFF) {
1282                         /* TODO: Move this logic out to hwseq */
1283                         if (count_active_streams(dc) == 0) {
1284                                 /* IPS2 - Display off */
1285                                 new_signals.bits.allow_pg = 1;
1286                                 new_signals.bits.allow_ips1 = 1;
1287                                 new_signals.bits.allow_ips2 = 1;
1288                                 new_signals.bits.allow_z10 = 1;
1289                         } else {
1290                                 /* RCG only */
1291                                 new_signals.bits.allow_pg = 0;
1292                                 new_signals.bits.allow_ips1 = 1;
1293                                 new_signals.bits.allow_ips2 = 0;
1294                                 new_signals.bits.allow_z10 = 0;
1295                         }
1296                 }
1297
1298                 ips_driver->signals = new_signals;
1299         }
1300
1301         DC_LOG_IPS(
1302                 "%s send allow_idle=%d (ips1_commit=%d ips2_commit=%d)",
1303                 __func__,
1304                 allow_idle,
1305                 ips_fw->signals.bits.ips1_commit,
1306                 ips_fw->signals.bits.ips2_commit);
1307
1308         /* NOTE: This does not use the "wake" interface since this is part of the wake path. */
1309         /* We also do not perform a wait since DMCUB could enter idle after the notification. */
1310         dm_execute_dmub_cmd(dc->ctx, &cmd, allow_idle ? DM_DMUB_WAIT_TYPE_NO_WAIT : DM_DMUB_WAIT_TYPE_WAIT);
1311
1312         /* Register access should stop at this point. */
1313         if (allow_idle)
1314                 dc_dmub_srv->needs_idle_wake = true;
1315 }
1316
1317 static void dc_dmub_srv_exit_low_power_state(const struct dc *dc)
1318 {
1319         struct dc_dmub_srv *dc_dmub_srv;
1320         uint32_t rcg_exit_count = 0, ips1_exit_count = 0, ips2_exit_count = 0;
1321
1322         if (dc->debug.dmcub_emulation)
1323                 return;
1324
1325         if (!dc->ctx->dmub_srv || !dc->ctx->dmub_srv->dmub)
1326                 return;
1327
1328         dc_dmub_srv = dc->ctx->dmub_srv;
1329
1330         if (dc->clk_mgr->funcs->exit_low_power_state) {
1331                 volatile const struct dmub_shared_state_ips_fw *ips_fw =
1332                         &dc_dmub_srv->dmub->shared_state[DMUB_SHARED_SHARE_FEATURE__IPS_FW].data.ips_fw;
1333                 volatile struct dmub_shared_state_ips_driver *ips_driver =
1334                         &dc_dmub_srv->dmub->shared_state[DMUB_SHARED_SHARE_FEATURE__IPS_DRIVER].data.ips_driver;
1335                 union dmub_shared_state_ips_driver_signals prev_driver_signals = ips_driver->signals;
1336
1337                 rcg_exit_count = ips_fw->rcg_exit_count;
1338                 ips1_exit_count = ips_fw->ips1_exit_count;
1339                 ips2_exit_count = ips_fw->ips2_exit_count;
1340
1341                 ips_driver->signals.all = 0;
1342
1343                 DC_LOG_IPS(
1344                         "%s (allow ips1=%d ips2=%d) (commit ips1=%d ips2=%d) (count rcg=%d ips1=%d ips2=%d)",
1345                         __func__,
1346                         ips_driver->signals.bits.allow_ips1,
1347                         ips_driver->signals.bits.allow_ips2,
1348                         ips_fw->signals.bits.ips1_commit,
1349                         ips_fw->signals.bits.ips2_commit,
1350                         ips_fw->rcg_entry_count,
1351                         ips_fw->ips1_entry_count,
1352                         ips_fw->ips2_entry_count);
1353
1354                 /* Note: register access has technically not resumed for DCN here, but we
1355                  * need to be message PMFW through our standard register interface.
1356                  */
1357                 dc_dmub_srv->needs_idle_wake = false;
1358
1359                 if (prev_driver_signals.bits.allow_ips2 &&
1360                     (!dc->debug.optimize_ips_handshake ||
1361                      ips_fw->signals.bits.ips2_commit || !ips_fw->signals.bits.in_idle)) {
1362                         DC_LOG_IPS(
1363                                 "wait IPS2 eval (ips1_commit=%d ips2_commit=%d)",
1364                                 ips_fw->signals.bits.ips1_commit,
1365                                 ips_fw->signals.bits.ips2_commit);
1366
1367                         if (!dc->debug.optimize_ips_handshake || !ips_fw->signals.bits.ips2_commit)
1368                                 udelay(dc->debug.ips2_eval_delay_us);
1369
1370                         if (ips_fw->signals.bits.ips2_commit) {
1371                                 DC_LOG_IPS(
1372                                         "exit IPS2 #1 (ips1_commit=%d ips2_commit=%d)",
1373                                         ips_fw->signals.bits.ips1_commit,
1374                                         ips_fw->signals.bits.ips2_commit);
1375
1376                                 // Tell PMFW to exit low power state
1377                                 dc->clk_mgr->funcs->exit_low_power_state(dc->clk_mgr);
1378
1379                                 DC_LOG_IPS(
1380                                         "wait IPS2 entry delay (ips1_commit=%d ips2_commit=%d)",
1381                                         ips_fw->signals.bits.ips1_commit,
1382                                         ips_fw->signals.bits.ips2_commit);
1383
1384                                 // Wait for IPS2 entry upper bound
1385                                 udelay(dc->debug.ips2_entry_delay_us);
1386
1387                                 DC_LOG_IPS(
1388                                         "exit IPS2 #2 (ips1_commit=%d ips2_commit=%d)",
1389                                         ips_fw->signals.bits.ips1_commit,
1390                                         ips_fw->signals.bits.ips2_commit);
1391
1392                                 dc->clk_mgr->funcs->exit_low_power_state(dc->clk_mgr);
1393
1394                                 DC_LOG_IPS(
1395                                         "wait IPS2 commit clear (ips1_commit=%d ips2_commit=%d)",
1396                                         ips_fw->signals.bits.ips1_commit,
1397                                         ips_fw->signals.bits.ips2_commit);
1398
1399                                 while (ips_fw->signals.bits.ips2_commit)
1400                                         udelay(1);
1401
1402                                 DC_LOG_IPS(
1403                                         "wait hw_pwr_up (ips1_commit=%d ips2_commit=%d)",
1404                                         ips_fw->signals.bits.ips1_commit,
1405                                         ips_fw->signals.bits.ips2_commit);
1406
1407                                 if (!dc_dmub_srv_is_hw_pwr_up(dc->ctx->dmub_srv, true))
1408                                         ASSERT(0);
1409
1410                                 DC_LOG_IPS(
1411                                         "resync inbox1 (ips1_commit=%d ips2_commit=%d)",
1412                                         ips_fw->signals.bits.ips1_commit,
1413                                         ips_fw->signals.bits.ips2_commit);
1414
1415                                 dmub_srv_sync_inbox1(dc->ctx->dmub_srv->dmub);
1416                         }
1417                 }
1418
1419                 dc_dmub_srv_notify_idle(dc, false);
1420                 if (prev_driver_signals.bits.allow_ips1) {
1421                         DC_LOG_IPS(
1422                                 "wait for IPS1 commit clear (ips1_commit=%d ips2_commit=%d)",
1423                                 ips_fw->signals.bits.ips1_commit,
1424                                 ips_fw->signals.bits.ips2_commit);
1425
1426                         while (ips_fw->signals.bits.ips1_commit)
1427                                 udelay(1);
1428
1429                         DC_LOG_IPS(
1430                                 "wait for IPS1 commit clear done (ips1_commit=%d ips2_commit=%d)",
1431                                 ips_fw->signals.bits.ips1_commit,
1432                                 ips_fw->signals.bits.ips2_commit);
1433                 }
1434         }
1435
1436         if (!dc_dmub_srv_is_hw_pwr_up(dc->ctx->dmub_srv, true))
1437                 ASSERT(0);
1438
1439         DC_LOG_IPS("%s exit (count rcg=%d ips1=%d ips2=%d)",
1440                 __func__,
1441                 rcg_exit_count,
1442                 ips1_exit_count,
1443                 ips2_exit_count);
1444 }
1445
1446 void dc_dmub_srv_set_power_state(struct dc_dmub_srv *dc_dmub_srv, enum dc_acpi_cm_power_state powerState)
1447 {
1448         struct dmub_srv *dmub;
1449
1450         if (!dc_dmub_srv)
1451                 return;
1452
1453         dmub = dc_dmub_srv->dmub;
1454
1455         if (powerState == DC_ACPI_CM_POWER_STATE_D0)
1456                 dmub_srv_set_power_state(dmub, DMUB_POWER_STATE_D0);
1457         else
1458                 dmub_srv_set_power_state(dmub, DMUB_POWER_STATE_D3);
1459 }
1460
1461 void dc_dmub_srv_apply_idle_power_optimizations(const struct dc *dc, bool allow_idle)
1462 {
1463         struct dc_dmub_srv *dc_dmub_srv = dc->ctx->dmub_srv;
1464
1465         if (!dc_dmub_srv || !dc_dmub_srv->dmub)
1466                 return;
1467
1468         if (dc_dmub_srv->idle_allowed == allow_idle)
1469                 return;
1470
1471         DC_LOG_IPS("%s state change: old=%d new=%d", __func__, dc_dmub_srv->idle_allowed, allow_idle);
1472
1473         /*
1474          * Entering a low power state requires a driver notification.
1475          * Powering up the hardware requires notifying PMFW and DMCUB.
1476          * Clearing the driver idle allow requires a DMCUB command.
1477          * DMCUB commands requires the DMCUB to be powered up and restored.
1478          */
1479
1480         if (!allow_idle) {
1481                 dc_dmub_srv->idle_exit_counter += 1;
1482
1483                 dc_dmub_srv_exit_low_power_state(dc);
1484                 /*
1485                  * Idle is considered fully exited only after the sequence above
1486                  * fully completes. If we have a race of two threads exiting
1487                  * at the same time then it's safe to perform the sequence
1488                  * twice as long as we're not re-entering.
1489                  *
1490                  * Infinite command submission is avoided by using the
1491                  * dm_execute_dmub_cmd submission instead of the "wake" helpers.
1492                  */
1493                 dc_dmub_srv->idle_allowed = false;
1494
1495                 dc_dmub_srv->idle_exit_counter -= 1;
1496                 if (dc_dmub_srv->idle_exit_counter < 0) {
1497                         ASSERT(0);
1498                         dc_dmub_srv->idle_exit_counter = 0;
1499                 }
1500         } else {
1501                 /* Consider idle as notified prior to the actual submission to
1502                  * prevent multiple entries. */
1503                 dc_dmub_srv->idle_allowed = true;
1504
1505                 dc_dmub_srv_notify_idle(dc, allow_idle);
1506         }
1507 }
1508
1509 bool dc_wake_and_execute_dmub_cmd(const struct dc_context *ctx, union dmub_rb_cmd *cmd,
1510                                   enum dm_dmub_wait_type wait_type)
1511 {
1512         return dc_wake_and_execute_dmub_cmd_list(ctx, 1, cmd, wait_type);
1513 }
1514
1515 bool dc_wake_and_execute_dmub_cmd_list(const struct dc_context *ctx, unsigned int count,
1516                                        union dmub_rb_cmd *cmd, enum dm_dmub_wait_type wait_type)
1517 {
1518         struct dc_dmub_srv *dc_dmub_srv = ctx->dmub_srv;
1519         bool result = false, reallow_idle = false;
1520
1521         if (!dc_dmub_srv || !dc_dmub_srv->dmub)
1522                 return false;
1523
1524         if (count == 0)
1525                 return true;
1526
1527         if (dc_dmub_srv->idle_allowed) {
1528                 dc_dmub_srv_apply_idle_power_optimizations(ctx->dc, false);
1529                 reallow_idle = true;
1530         }
1531
1532         /*
1533          * These may have different implementations in DM, so ensure
1534          * that we guide it to the expected helper.
1535          */
1536         if (count > 1)
1537                 result = dm_execute_dmub_cmd_list(ctx, count, cmd, wait_type);
1538         else
1539                 result = dm_execute_dmub_cmd(ctx, cmd, wait_type);
1540
1541         if (result && reallow_idle && dc_dmub_srv->idle_exit_counter == 0 &&
1542             !ctx->dc->debug.disable_dmub_reallow_idle)
1543                 dc_dmub_srv_apply_idle_power_optimizations(ctx->dc, true);
1544
1545         return result;
1546 }
1547
1548 static bool dc_dmub_execute_gpint(const struct dc_context *ctx, enum dmub_gpint_command command_code,
1549                                   uint16_t param, uint32_t *response, enum dm_dmub_wait_type wait_type)
1550 {
1551         struct dc_dmub_srv *dc_dmub_srv = ctx->dmub_srv;
1552         const uint32_t wait_us = wait_type == DM_DMUB_WAIT_TYPE_NO_WAIT ? 0 : 30;
1553         enum dmub_status status;
1554
1555         if (response)
1556                 *response = 0;
1557
1558         if (!dc_dmub_srv || !dc_dmub_srv->dmub)
1559                 return false;
1560
1561         status = dmub_srv_send_gpint_command(dc_dmub_srv->dmub, command_code, param, wait_us);
1562         if (status != DMUB_STATUS_OK) {
1563                 if (status == DMUB_STATUS_TIMEOUT && wait_type == DM_DMUB_WAIT_TYPE_NO_WAIT)
1564                         return true;
1565
1566                 return false;
1567         }
1568
1569         if (response && wait_type == DM_DMUB_WAIT_TYPE_WAIT_WITH_REPLY)
1570                 dmub_srv_get_gpint_response(dc_dmub_srv->dmub, response);
1571
1572         return true;
1573 }
1574
1575 bool dc_wake_and_execute_gpint(const struct dc_context *ctx, enum dmub_gpint_command command_code,
1576                                uint16_t param, uint32_t *response, enum dm_dmub_wait_type wait_type)
1577 {
1578         struct dc_dmub_srv *dc_dmub_srv = ctx->dmub_srv;
1579         bool result = false, reallow_idle = false;
1580
1581         if (!dc_dmub_srv || !dc_dmub_srv->dmub)
1582                 return false;
1583
1584         if (dc_dmub_srv->idle_allowed) {
1585                 dc_dmub_srv_apply_idle_power_optimizations(ctx->dc, false);
1586                 reallow_idle = true;
1587         }
1588
1589         result = dc_dmub_execute_gpint(ctx, command_code, param, response, wait_type);
1590
1591         if (result && reallow_idle && dc_dmub_srv->idle_exit_counter == 0 &&
1592             !ctx->dc->debug.disable_dmub_reallow_idle)
1593                 dc_dmub_srv_apply_idle_power_optimizations(ctx->dc, true);
1594
1595         return result;
1596 }
1597
This page took 0.135125 seconds and 4 git commands to generate.