]> Git Repo - linux.git/blob - drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
drm/amd/display: Add writeback enable field (wb_enabled)
[linux.git] / drivers / gpu / drm / amd / display / amdgpu_dm / amdgpu_dm.c
1 /*
2  * Copyright 2015 Advanced Micro Devices, Inc.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20  * OTHER DEALINGS IN THE SOFTWARE.
21  *
22  * Authors: AMD
23  *
24  */
25
26 /* The caprices of the preprocessor require that this be declared right here */
27 #define CREATE_TRACE_POINTS
28
29 #include "dm_services_types.h"
30 #include "dc.h"
31 #include "link_enc_cfg.h"
32 #include "dc/inc/core_types.h"
33 #include "dal_asic_id.h"
34 #include "dmub/dmub_srv.h"
35 #include "dc/inc/hw/dmcu.h"
36 #include "dc/inc/hw/abm.h"
37 #include "dc/dc_dmub_srv.h"
38 #include "dc/dc_edid_parser.h"
39 #include "dc/dc_stat.h"
40 #include "amdgpu_dm_trace.h"
41 #include "dpcd_defs.h"
42 #include "link/protocols/link_dpcd.h"
43 #include "link_service_types.h"
44 #include "link/protocols/link_dp_capability.h"
45 #include "link/protocols/link_ddc.h"
46
47 #include "vid.h"
48 #include "amdgpu.h"
49 #include "amdgpu_display.h"
50 #include "amdgpu_ucode.h"
51 #include "atom.h"
52 #include "amdgpu_dm.h"
53 #include "amdgpu_dm_plane.h"
54 #include "amdgpu_dm_crtc.h"
55 #include "amdgpu_dm_hdcp.h"
56 #include <drm/display/drm_hdcp_helper.h>
57 #include "amdgpu_dm_wb.h"
58 #include "amdgpu_pm.h"
59 #include "amdgpu_atombios.h"
60
61 #include "amd_shared.h"
62 #include "amdgpu_dm_irq.h"
63 #include "dm_helpers.h"
64 #include "amdgpu_dm_mst_types.h"
65 #if defined(CONFIG_DEBUG_FS)
66 #include "amdgpu_dm_debugfs.h"
67 #endif
68 #include "amdgpu_dm_psr.h"
69 #include "amdgpu_dm_replay.h"
70
71 #include "ivsrcid/ivsrcid_vislands30.h"
72
73 #include <linux/backlight.h>
74 #include <linux/module.h>
75 #include <linux/moduleparam.h>
76 #include <linux/types.h>
77 #include <linux/pm_runtime.h>
78 #include <linux/pci.h>
79 #include <linux/firmware.h>
80 #include <linux/component.h>
81 #include <linux/dmi.h>
82
83 #include <drm/display/drm_dp_mst_helper.h>
84 #include <drm/display/drm_hdmi_helper.h>
85 #include <drm/drm_atomic.h>
86 #include <drm/drm_atomic_uapi.h>
87 #include <drm/drm_atomic_helper.h>
88 #include <drm/drm_blend.h>
89 #include <drm/drm_fixed.h>
90 #include <drm/drm_fourcc.h>
91 #include <drm/drm_edid.h>
92 #include <drm/drm_eld.h>
93 #include <drm/drm_vblank.h>
94 #include <drm/drm_audio_component.h>
95 #include <drm/drm_gem_atomic_helper.h>
96 #include <drm/drm_plane_helper.h>
97
98 #include <acpi/video.h>
99
100 #include "ivsrcid/dcn/irqsrcs_dcn_1_0.h"
101
102 #include "dcn/dcn_1_0_offset.h"
103 #include "dcn/dcn_1_0_sh_mask.h"
104 #include "soc15_hw_ip.h"
105 #include "soc15_common.h"
106 #include "vega10_ip_offset.h"
107
108 #include "gc/gc_11_0_0_offset.h"
109 #include "gc/gc_11_0_0_sh_mask.h"
110
111 #include "modules/inc/mod_freesync.h"
112 #include "modules/power/power_helpers.h"
113
114 #define FIRMWARE_RENOIR_DMUB "amdgpu/renoir_dmcub.bin"
115 MODULE_FIRMWARE(FIRMWARE_RENOIR_DMUB);
116 #define FIRMWARE_SIENNA_CICHLID_DMUB "amdgpu/sienna_cichlid_dmcub.bin"
117 MODULE_FIRMWARE(FIRMWARE_SIENNA_CICHLID_DMUB);
118 #define FIRMWARE_NAVY_FLOUNDER_DMUB "amdgpu/navy_flounder_dmcub.bin"
119 MODULE_FIRMWARE(FIRMWARE_NAVY_FLOUNDER_DMUB);
120 #define FIRMWARE_GREEN_SARDINE_DMUB "amdgpu/green_sardine_dmcub.bin"
121 MODULE_FIRMWARE(FIRMWARE_GREEN_SARDINE_DMUB);
122 #define FIRMWARE_VANGOGH_DMUB "amdgpu/vangogh_dmcub.bin"
123 MODULE_FIRMWARE(FIRMWARE_VANGOGH_DMUB);
124 #define FIRMWARE_DIMGREY_CAVEFISH_DMUB "amdgpu/dimgrey_cavefish_dmcub.bin"
125 MODULE_FIRMWARE(FIRMWARE_DIMGREY_CAVEFISH_DMUB);
126 #define FIRMWARE_BEIGE_GOBY_DMUB "amdgpu/beige_goby_dmcub.bin"
127 MODULE_FIRMWARE(FIRMWARE_BEIGE_GOBY_DMUB);
128 #define FIRMWARE_YELLOW_CARP_DMUB "amdgpu/yellow_carp_dmcub.bin"
129 MODULE_FIRMWARE(FIRMWARE_YELLOW_CARP_DMUB);
130 #define FIRMWARE_DCN_314_DMUB "amdgpu/dcn_3_1_4_dmcub.bin"
131 MODULE_FIRMWARE(FIRMWARE_DCN_314_DMUB);
132 #define FIRMWARE_DCN_315_DMUB "amdgpu/dcn_3_1_5_dmcub.bin"
133 MODULE_FIRMWARE(FIRMWARE_DCN_315_DMUB);
134 #define FIRMWARE_DCN316_DMUB "amdgpu/dcn_3_1_6_dmcub.bin"
135 MODULE_FIRMWARE(FIRMWARE_DCN316_DMUB);
136
137 #define FIRMWARE_DCN_V3_2_0_DMCUB "amdgpu/dcn_3_2_0_dmcub.bin"
138 MODULE_FIRMWARE(FIRMWARE_DCN_V3_2_0_DMCUB);
139 #define FIRMWARE_DCN_V3_2_1_DMCUB "amdgpu/dcn_3_2_1_dmcub.bin"
140 MODULE_FIRMWARE(FIRMWARE_DCN_V3_2_1_DMCUB);
141
142 #define FIRMWARE_RAVEN_DMCU             "amdgpu/raven_dmcu.bin"
143 MODULE_FIRMWARE(FIRMWARE_RAVEN_DMCU);
144
145 #define FIRMWARE_NAVI12_DMCU            "amdgpu/navi12_dmcu.bin"
146 MODULE_FIRMWARE(FIRMWARE_NAVI12_DMCU);
147
148 #define FIRMWARE_DCN_35_DMUB "amdgpu/dcn_3_5_dmcub.bin"
149 MODULE_FIRMWARE(FIRMWARE_DCN_35_DMUB);
150
151 /* Number of bytes in PSP header for firmware. */
152 #define PSP_HEADER_BYTES 0x100
153
154 /* Number of bytes in PSP footer for firmware. */
155 #define PSP_FOOTER_BYTES 0x100
156
157 /**
158  * DOC: overview
159  *
160  * The AMDgpu display manager, **amdgpu_dm** (or even simpler,
161  * **dm**) sits between DRM and DC. It acts as a liaison, converting DRM
162  * requests into DC requests, and DC responses into DRM responses.
163  *
164  * The root control structure is &struct amdgpu_display_manager.
165  */
166
167 /* basic init/fini API */
168 static int amdgpu_dm_init(struct amdgpu_device *adev);
169 static void amdgpu_dm_fini(struct amdgpu_device *adev);
170 static bool is_freesync_video_mode(const struct drm_display_mode *mode, struct amdgpu_dm_connector *aconnector);
171
172 static enum drm_mode_subconnector get_subconnector_type(struct dc_link *link)
173 {
174         switch (link->dpcd_caps.dongle_type) {
175         case DISPLAY_DONGLE_NONE:
176                 return DRM_MODE_SUBCONNECTOR_Native;
177         case DISPLAY_DONGLE_DP_VGA_CONVERTER:
178                 return DRM_MODE_SUBCONNECTOR_VGA;
179         case DISPLAY_DONGLE_DP_DVI_CONVERTER:
180         case DISPLAY_DONGLE_DP_DVI_DONGLE:
181                 return DRM_MODE_SUBCONNECTOR_DVID;
182         case DISPLAY_DONGLE_DP_HDMI_CONVERTER:
183         case DISPLAY_DONGLE_DP_HDMI_DONGLE:
184                 return DRM_MODE_SUBCONNECTOR_HDMIA;
185         case DISPLAY_DONGLE_DP_HDMI_MISMATCHED_DONGLE:
186         default:
187                 return DRM_MODE_SUBCONNECTOR_Unknown;
188         }
189 }
190
191 static void update_subconnector_property(struct amdgpu_dm_connector *aconnector)
192 {
193         struct dc_link *link = aconnector->dc_link;
194         struct drm_connector *connector = &aconnector->base;
195         enum drm_mode_subconnector subconnector = DRM_MODE_SUBCONNECTOR_Unknown;
196
197         if (connector->connector_type != DRM_MODE_CONNECTOR_DisplayPort)
198                 return;
199
200         if (aconnector->dc_sink)
201                 subconnector = get_subconnector_type(link);
202
203         drm_object_property_set_value(&connector->base,
204                         connector->dev->mode_config.dp_subconnector_property,
205                         subconnector);
206 }
207
208 /*
209  * initializes drm_device display related structures, based on the information
210  * provided by DAL. The drm strcutures are: drm_crtc, drm_connector,
211  * drm_encoder, drm_mode_config
212  *
213  * Returns 0 on success
214  */
215 static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev);
216 /* removes and deallocates the drm structures, created by the above function */
217 static void amdgpu_dm_destroy_drm_device(struct amdgpu_display_manager *dm);
218
219 static int amdgpu_dm_connector_init(struct amdgpu_display_manager *dm,
220                                     struct amdgpu_dm_connector *amdgpu_dm_connector,
221                                     u32 link_index,
222                                     struct amdgpu_encoder *amdgpu_encoder);
223 static int amdgpu_dm_encoder_init(struct drm_device *dev,
224                                   struct amdgpu_encoder *aencoder,
225                                   uint32_t link_index);
226
227 static int amdgpu_dm_connector_get_modes(struct drm_connector *connector);
228
229 static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state);
230
231 static int amdgpu_dm_atomic_check(struct drm_device *dev,
232                                   struct drm_atomic_state *state);
233
234 static void handle_hpd_irq_helper(struct amdgpu_dm_connector *aconnector);
235 static void handle_hpd_rx_irq(void *param);
236
237 static bool
238 is_timing_unchanged_for_freesync(struct drm_crtc_state *old_crtc_state,
239                                  struct drm_crtc_state *new_crtc_state);
240 /*
241  * dm_vblank_get_counter
242  *
243  * @brief
244  * Get counter for number of vertical blanks
245  *
246  * @param
247  * struct amdgpu_device *adev - [in] desired amdgpu device
248  * int disp_idx - [in] which CRTC to get the counter from
249  *
250  * @return
251  * Counter for vertical blanks
252  */
253 static u32 dm_vblank_get_counter(struct amdgpu_device *adev, int crtc)
254 {
255         struct amdgpu_crtc *acrtc = NULL;
256
257         if (crtc >= adev->mode_info.num_crtc)
258                 return 0;
259
260         acrtc = adev->mode_info.crtcs[crtc];
261
262         if (!acrtc->dm_irq_params.stream) {
263                 DRM_ERROR("dc_stream_state is NULL for crtc '%d'!\n",
264                           crtc);
265                 return 0;
266         }
267
268         return dc_stream_get_vblank_counter(acrtc->dm_irq_params.stream);
269 }
270
271 static int dm_crtc_get_scanoutpos(struct amdgpu_device *adev, int crtc,
272                                   u32 *vbl, u32 *position)
273 {
274         u32 v_blank_start, v_blank_end, h_position, v_position;
275         struct amdgpu_crtc *acrtc = NULL;
276
277         if ((crtc < 0) || (crtc >= adev->mode_info.num_crtc))
278                 return -EINVAL;
279
280         acrtc = adev->mode_info.crtcs[crtc];
281
282         if (!acrtc->dm_irq_params.stream) {
283                 DRM_ERROR("dc_stream_state is NULL for crtc '%d'!\n",
284                           crtc);
285                 return 0;
286         }
287
288         /*
289          * TODO rework base driver to use values directly.
290          * for now parse it back into reg-format
291          */
292         dc_stream_get_scanoutpos(acrtc->dm_irq_params.stream,
293                                  &v_blank_start,
294                                  &v_blank_end,
295                                  &h_position,
296                                  &v_position);
297
298         *position = v_position | (h_position << 16);
299         *vbl = v_blank_start | (v_blank_end << 16);
300
301         return 0;
302 }
303
304 static bool dm_is_idle(void *handle)
305 {
306         /* XXX todo */
307         return true;
308 }
309
310 static int dm_wait_for_idle(void *handle)
311 {
312         /* XXX todo */
313         return 0;
314 }
315
316 static bool dm_check_soft_reset(void *handle)
317 {
318         return false;
319 }
320
321 static int dm_soft_reset(void *handle)
322 {
323         /* XXX todo */
324         return 0;
325 }
326
327 static struct amdgpu_crtc *
328 get_crtc_by_otg_inst(struct amdgpu_device *adev,
329                      int otg_inst)
330 {
331         struct drm_device *dev = adev_to_drm(adev);
332         struct drm_crtc *crtc;
333         struct amdgpu_crtc *amdgpu_crtc;
334
335         if (WARN_ON(otg_inst == -1))
336                 return adev->mode_info.crtcs[0];
337
338         list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
339                 amdgpu_crtc = to_amdgpu_crtc(crtc);
340
341                 if (amdgpu_crtc->otg_inst == otg_inst)
342                         return amdgpu_crtc;
343         }
344
345         return NULL;
346 }
347
348 static inline bool is_dc_timing_adjust_needed(struct dm_crtc_state *old_state,
349                                               struct dm_crtc_state *new_state)
350 {
351         if (new_state->freesync_config.state ==  VRR_STATE_ACTIVE_FIXED)
352                 return true;
353         else if (amdgpu_dm_crtc_vrr_active(old_state) != amdgpu_dm_crtc_vrr_active(new_state))
354                 return true;
355         else
356                 return false;
357 }
358
359 static inline void reverse_planes_order(struct dc_surface_update *array_of_surface_update,
360                                         int planes_count)
361 {
362         int i, j;
363
364         for (i = 0, j = planes_count - 1; i < j; i++, j--)
365                 swap(array_of_surface_update[i], array_of_surface_update[j]);
366 }
367
368 /**
369  * update_planes_and_stream_adapter() - Send planes to be updated in DC
370  *
371  * DC has a generic way to update planes and stream via
372  * dc_update_planes_and_stream function; however, DM might need some
373  * adjustments and preparation before calling it. This function is a wrapper
374  * for the dc_update_planes_and_stream that does any required configuration
375  * before passing control to DC.
376  *
377  * @dc: Display Core control structure
378  * @update_type: specify whether it is FULL/MEDIUM/FAST update
379  * @planes_count: planes count to update
380  * @stream: stream state
381  * @stream_update: stream update
382  * @array_of_surface_update: dc surface update pointer
383  *
384  */
385 static inline bool update_planes_and_stream_adapter(struct dc *dc,
386                                                     int update_type,
387                                                     int planes_count,
388                                                     struct dc_stream_state *stream,
389                                                     struct dc_stream_update *stream_update,
390                                                     struct dc_surface_update *array_of_surface_update)
391 {
392         reverse_planes_order(array_of_surface_update, planes_count);
393
394         /*
395          * Previous frame finished and HW is ready for optimization.
396          */
397         if (update_type == UPDATE_TYPE_FAST)
398                 dc_post_update_surfaces_to_stream(dc);
399
400         return dc_update_planes_and_stream(dc,
401                                            array_of_surface_update,
402                                            planes_count,
403                                            stream,
404                                            stream_update);
405 }
406
407 /**
408  * dm_pflip_high_irq() - Handle pageflip interrupt
409  * @interrupt_params: ignored
410  *
411  * Handles the pageflip interrupt by notifying all interested parties
412  * that the pageflip has been completed.
413  */
414 static void dm_pflip_high_irq(void *interrupt_params)
415 {
416         struct amdgpu_crtc *amdgpu_crtc;
417         struct common_irq_params *irq_params = interrupt_params;
418         struct amdgpu_device *adev = irq_params->adev;
419         struct drm_device *dev = adev_to_drm(adev);
420         unsigned long flags;
421         struct drm_pending_vblank_event *e;
422         u32 vpos, hpos, v_blank_start, v_blank_end;
423         bool vrr_active;
424
425         amdgpu_crtc = get_crtc_by_otg_inst(adev, irq_params->irq_src - IRQ_TYPE_PFLIP);
426
427         /* IRQ could occur when in initial stage */
428         /* TODO work and BO cleanup */
429         if (amdgpu_crtc == NULL) {
430                 drm_dbg_state(dev, "CRTC is null, returning.\n");
431                 return;
432         }
433
434         spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
435
436         if (amdgpu_crtc->pflip_status != AMDGPU_FLIP_SUBMITTED) {
437                 drm_dbg_state(dev,
438                               "amdgpu_crtc->pflip_status = %d != AMDGPU_FLIP_SUBMITTED(%d) on crtc:%d[%p]\n",
439                               amdgpu_crtc->pflip_status, AMDGPU_FLIP_SUBMITTED,
440                               amdgpu_crtc->crtc_id, amdgpu_crtc);
441                 spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
442                 return;
443         }
444
445         /* page flip completed. */
446         e = amdgpu_crtc->event;
447         amdgpu_crtc->event = NULL;
448
449         WARN_ON(!e);
450
451         vrr_active = amdgpu_dm_crtc_vrr_active_irq(amdgpu_crtc);
452
453         /* Fixed refresh rate, or VRR scanout position outside front-porch? */
454         if (!vrr_active ||
455             !dc_stream_get_scanoutpos(amdgpu_crtc->dm_irq_params.stream, &v_blank_start,
456                                       &v_blank_end, &hpos, &vpos) ||
457             (vpos < v_blank_start)) {
458                 /* Update to correct count and vblank timestamp if racing with
459                  * vblank irq. This also updates to the correct vblank timestamp
460                  * even in VRR mode, as scanout is past the front-porch atm.
461                  */
462                 drm_crtc_accurate_vblank_count(&amdgpu_crtc->base);
463
464                 /* Wake up userspace by sending the pageflip event with proper
465                  * count and timestamp of vblank of flip completion.
466                  */
467                 if (e) {
468                         drm_crtc_send_vblank_event(&amdgpu_crtc->base, e);
469
470                         /* Event sent, so done with vblank for this flip */
471                         drm_crtc_vblank_put(&amdgpu_crtc->base);
472                 }
473         } else if (e) {
474                 /* VRR active and inside front-porch: vblank count and
475                  * timestamp for pageflip event will only be up to date after
476                  * drm_crtc_handle_vblank() has been executed from late vblank
477                  * irq handler after start of back-porch (vline 0). We queue the
478                  * pageflip event for send-out by drm_crtc_handle_vblank() with
479                  * updated timestamp and count, once it runs after us.
480                  *
481                  * We need to open-code this instead of using the helper
482                  * drm_crtc_arm_vblank_event(), as that helper would
483                  * call drm_crtc_accurate_vblank_count(), which we must
484                  * not call in VRR mode while we are in front-porch!
485                  */
486
487                 /* sequence will be replaced by real count during send-out. */
488                 e->sequence = drm_crtc_vblank_count(&amdgpu_crtc->base);
489                 e->pipe = amdgpu_crtc->crtc_id;
490
491                 list_add_tail(&e->base.link, &adev_to_drm(adev)->vblank_event_list);
492                 e = NULL;
493         }
494
495         /* Keep track of vblank of this flip for flip throttling. We use the
496          * cooked hw counter, as that one incremented at start of this vblank
497          * of pageflip completion, so last_flip_vblank is the forbidden count
498          * for queueing new pageflips if vsync + VRR is enabled.
499          */
500         amdgpu_crtc->dm_irq_params.last_flip_vblank =
501                 amdgpu_get_vblank_counter_kms(&amdgpu_crtc->base);
502
503         amdgpu_crtc->pflip_status = AMDGPU_FLIP_NONE;
504         spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
505
506         drm_dbg_state(dev,
507                       "crtc:%d[%p], pflip_stat:AMDGPU_FLIP_NONE, vrr[%d]-fp %d\n",
508                       amdgpu_crtc->crtc_id, amdgpu_crtc, vrr_active, (int)!e);
509 }
510
511 static void dm_vupdate_high_irq(void *interrupt_params)
512 {
513         struct common_irq_params *irq_params = interrupt_params;
514         struct amdgpu_device *adev = irq_params->adev;
515         struct amdgpu_crtc *acrtc;
516         struct drm_device *drm_dev;
517         struct drm_vblank_crtc *vblank;
518         ktime_t frame_duration_ns, previous_timestamp;
519         unsigned long flags;
520         int vrr_active;
521
522         acrtc = get_crtc_by_otg_inst(adev, irq_params->irq_src - IRQ_TYPE_VUPDATE);
523
524         if (acrtc) {
525                 vrr_active = amdgpu_dm_crtc_vrr_active_irq(acrtc);
526                 drm_dev = acrtc->base.dev;
527                 vblank = &drm_dev->vblank[acrtc->base.index];
528                 previous_timestamp = atomic64_read(&irq_params->previous_timestamp);
529                 frame_duration_ns = vblank->time - previous_timestamp;
530
531                 if (frame_duration_ns > 0) {
532                         trace_amdgpu_refresh_rate_track(acrtc->base.index,
533                                                 frame_duration_ns,
534                                                 ktime_divns(NSEC_PER_SEC, frame_duration_ns));
535                         atomic64_set(&irq_params->previous_timestamp, vblank->time);
536                 }
537
538                 drm_dbg_vbl(drm_dev,
539                             "crtc:%d, vupdate-vrr:%d\n", acrtc->crtc_id,
540                             vrr_active);
541
542                 /* Core vblank handling is done here after end of front-porch in
543                  * vrr mode, as vblank timestamping will give valid results
544                  * while now done after front-porch. This will also deliver
545                  * page-flip completion events that have been queued to us
546                  * if a pageflip happened inside front-porch.
547                  */
548                 if (vrr_active) {
549                         amdgpu_dm_crtc_handle_vblank(acrtc);
550
551                         /* BTR processing for pre-DCE12 ASICs */
552                         if (acrtc->dm_irq_params.stream &&
553                             adev->family < AMDGPU_FAMILY_AI) {
554                                 spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
555                                 mod_freesync_handle_v_update(
556                                     adev->dm.freesync_module,
557                                     acrtc->dm_irq_params.stream,
558                                     &acrtc->dm_irq_params.vrr_params);
559
560                                 dc_stream_adjust_vmin_vmax(
561                                     adev->dm.dc,
562                                     acrtc->dm_irq_params.stream,
563                                     &acrtc->dm_irq_params.vrr_params.adjust);
564                                 spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
565                         }
566                 }
567         }
568 }
569
570 /**
571  * dm_crtc_high_irq() - Handles CRTC interrupt
572  * @interrupt_params: used for determining the CRTC instance
573  *
574  * Handles the CRTC/VSYNC interrupt by notfying DRM's VBLANK
575  * event handler.
576  */
577 static void dm_crtc_high_irq(void *interrupt_params)
578 {
579         struct common_irq_params *irq_params = interrupt_params;
580         struct amdgpu_device *adev = irq_params->adev;
581         struct drm_writeback_job *job;
582         struct amdgpu_crtc *acrtc;
583         unsigned long flags;
584         int vrr_active;
585
586         acrtc = get_crtc_by_otg_inst(adev, irq_params->irq_src - IRQ_TYPE_VBLANK);
587         if (!acrtc)
588                 return;
589
590         if (acrtc->wb_pending) {
591                 if (acrtc->wb_conn) {
592                         spin_lock_irqsave(&acrtc->wb_conn->job_lock, flags);
593                         job = list_first_entry_or_null(&acrtc->wb_conn->job_queue,
594                                                        struct drm_writeback_job,
595                                                        list_entry);
596                         spin_unlock_irqrestore(&acrtc->wb_conn->job_lock, flags);
597
598                         if (job)
599                                 drm_writeback_signal_completion(acrtc->wb_conn, 0);
600                 } else
601                         DRM_ERROR("%s: no amdgpu_crtc wb_conn\n", __func__);
602                 acrtc->wb_pending = false;
603         }
604
605         vrr_active = amdgpu_dm_crtc_vrr_active_irq(acrtc);
606
607         drm_dbg_vbl(adev_to_drm(adev),
608                     "crtc:%d, vupdate-vrr:%d, planes:%d\n", acrtc->crtc_id,
609                     vrr_active, acrtc->dm_irq_params.active_planes);
610
611         /**
612          * Core vblank handling at start of front-porch is only possible
613          * in non-vrr mode, as only there vblank timestamping will give
614          * valid results while done in front-porch. Otherwise defer it
615          * to dm_vupdate_high_irq after end of front-porch.
616          */
617         if (!vrr_active)
618                 amdgpu_dm_crtc_handle_vblank(acrtc);
619
620         /**
621          * Following stuff must happen at start of vblank, for crc
622          * computation and below-the-range btr support in vrr mode.
623          */
624         amdgpu_dm_crtc_handle_crc_irq(&acrtc->base);
625
626         /* BTR updates need to happen before VUPDATE on Vega and above. */
627         if (adev->family < AMDGPU_FAMILY_AI)
628                 return;
629
630         spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
631
632         if (acrtc->dm_irq_params.stream &&
633             acrtc->dm_irq_params.vrr_params.supported &&
634             acrtc->dm_irq_params.freesync_config.state ==
635                     VRR_STATE_ACTIVE_VARIABLE) {
636                 mod_freesync_handle_v_update(adev->dm.freesync_module,
637                                              acrtc->dm_irq_params.stream,
638                                              &acrtc->dm_irq_params.vrr_params);
639
640                 dc_stream_adjust_vmin_vmax(adev->dm.dc, acrtc->dm_irq_params.stream,
641                                            &acrtc->dm_irq_params.vrr_params.adjust);
642         }
643
644         /*
645          * If there aren't any active_planes then DCH HUBP may be clock-gated.
646          * In that case, pageflip completion interrupts won't fire and pageflip
647          * completion events won't get delivered. Prevent this by sending
648          * pending pageflip events from here if a flip is still pending.
649          *
650          * If any planes are enabled, use dm_pflip_high_irq() instead, to
651          * avoid race conditions between flip programming and completion,
652          * which could cause too early flip completion events.
653          */
654         if (adev->family >= AMDGPU_FAMILY_RV &&
655             acrtc->pflip_status == AMDGPU_FLIP_SUBMITTED &&
656             acrtc->dm_irq_params.active_planes == 0) {
657                 if (acrtc->event) {
658                         drm_crtc_send_vblank_event(&acrtc->base, acrtc->event);
659                         acrtc->event = NULL;
660                         drm_crtc_vblank_put(&acrtc->base);
661                 }
662                 acrtc->pflip_status = AMDGPU_FLIP_NONE;
663         }
664
665         spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
666 }
667
668 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
669 /**
670  * dm_dcn_vertical_interrupt0_high_irq() - Handles OTG Vertical interrupt0 for
671  * DCN generation ASICs
672  * @interrupt_params: interrupt parameters
673  *
674  * Used to set crc window/read out crc value at vertical line 0 position
675  */
676 static void dm_dcn_vertical_interrupt0_high_irq(void *interrupt_params)
677 {
678         struct common_irq_params *irq_params = interrupt_params;
679         struct amdgpu_device *adev = irq_params->adev;
680         struct amdgpu_crtc *acrtc;
681
682         acrtc = get_crtc_by_otg_inst(adev, irq_params->irq_src - IRQ_TYPE_VLINE0);
683
684         if (!acrtc)
685                 return;
686
687         amdgpu_dm_crtc_handle_crc_window_irq(&acrtc->base);
688 }
689 #endif /* CONFIG_DRM_AMD_SECURE_DISPLAY */
690
691 /**
692  * dmub_aux_setconfig_callback - Callback for AUX or SET_CONFIG command.
693  * @adev: amdgpu_device pointer
694  * @notify: dmub notification structure
695  *
696  * Dmub AUX or SET_CONFIG command completion processing callback
697  * Copies dmub notification to DM which is to be read by AUX command.
698  * issuing thread and also signals the event to wake up the thread.
699  */
700 static void dmub_aux_setconfig_callback(struct amdgpu_device *adev,
701                                         struct dmub_notification *notify)
702 {
703         if (adev->dm.dmub_notify)
704                 memcpy(adev->dm.dmub_notify, notify, sizeof(struct dmub_notification));
705         if (notify->type == DMUB_NOTIFICATION_AUX_REPLY)
706                 complete(&adev->dm.dmub_aux_transfer_done);
707 }
708
709 /**
710  * dmub_hpd_callback - DMUB HPD interrupt processing callback.
711  * @adev: amdgpu_device pointer
712  * @notify: dmub notification structure
713  *
714  * Dmub Hpd interrupt processing callback. Gets displayindex through the
715  * ink index and calls helper to do the processing.
716  */
717 static void dmub_hpd_callback(struct amdgpu_device *adev,
718                               struct dmub_notification *notify)
719 {
720         struct amdgpu_dm_connector *aconnector;
721         struct amdgpu_dm_connector *hpd_aconnector = NULL;
722         struct drm_connector *connector;
723         struct drm_connector_list_iter iter;
724         struct dc_link *link;
725         u8 link_index = 0;
726         struct drm_device *dev;
727
728         if (adev == NULL)
729                 return;
730
731         if (notify == NULL) {
732                 DRM_ERROR("DMUB HPD callback notification was NULL");
733                 return;
734         }
735
736         if (notify->link_index > adev->dm.dc->link_count) {
737                 DRM_ERROR("DMUB HPD index (%u)is abnormal", notify->link_index);
738                 return;
739         }
740
741         link_index = notify->link_index;
742         link = adev->dm.dc->links[link_index];
743         dev = adev->dm.ddev;
744
745         drm_connector_list_iter_begin(dev, &iter);
746         drm_for_each_connector_iter(connector, &iter) {
747
748                 if (connector->connector_type == DRM_MODE_CONNECTOR_WRITEBACK)
749                         continue;
750
751                 aconnector = to_amdgpu_dm_connector(connector);
752                 if (link && aconnector->dc_link == link) {
753                         if (notify->type == DMUB_NOTIFICATION_HPD)
754                                 DRM_INFO("DMUB HPD callback: link_index=%u\n", link_index);
755                         else if (notify->type == DMUB_NOTIFICATION_HPD_IRQ)
756                                 DRM_INFO("DMUB HPD IRQ callback: link_index=%u\n", link_index);
757                         else
758                                 DRM_WARN("DMUB Unknown HPD callback type %d, link_index=%u\n",
759                                                 notify->type, link_index);
760
761                         hpd_aconnector = aconnector;
762                         break;
763                 }
764         }
765         drm_connector_list_iter_end(&iter);
766
767         if (hpd_aconnector) {
768                 if (notify->type == DMUB_NOTIFICATION_HPD)
769                         handle_hpd_irq_helper(hpd_aconnector);
770                 else if (notify->type == DMUB_NOTIFICATION_HPD_IRQ)
771                         handle_hpd_rx_irq(hpd_aconnector);
772         }
773 }
774
775 /**
776  * register_dmub_notify_callback - Sets callback for DMUB notify
777  * @adev: amdgpu_device pointer
778  * @type: Type of dmub notification
779  * @callback: Dmub interrupt callback function
780  * @dmub_int_thread_offload: offload indicator
781  *
782  * API to register a dmub callback handler for a dmub notification
783  * Also sets indicator whether callback processing to be offloaded.
784  * to dmub interrupt handling thread
785  * Return: true if successfully registered, false if there is existing registration
786  */
787 static bool register_dmub_notify_callback(struct amdgpu_device *adev,
788                                           enum dmub_notification_type type,
789                                           dmub_notify_interrupt_callback_t callback,
790                                           bool dmub_int_thread_offload)
791 {
792         if (callback != NULL && type < ARRAY_SIZE(adev->dm.dmub_thread_offload)) {
793                 adev->dm.dmub_callback[type] = callback;
794                 adev->dm.dmub_thread_offload[type] = dmub_int_thread_offload;
795         } else
796                 return false;
797
798         return true;
799 }
800
801 static void dm_handle_hpd_work(struct work_struct *work)
802 {
803         struct dmub_hpd_work *dmub_hpd_wrk;
804
805         dmub_hpd_wrk = container_of(work, struct dmub_hpd_work, handle_hpd_work);
806
807         if (!dmub_hpd_wrk->dmub_notify) {
808                 DRM_ERROR("dmub_hpd_wrk dmub_notify is NULL");
809                 return;
810         }
811
812         if (dmub_hpd_wrk->dmub_notify->type < ARRAY_SIZE(dmub_hpd_wrk->adev->dm.dmub_callback)) {
813                 dmub_hpd_wrk->adev->dm.dmub_callback[dmub_hpd_wrk->dmub_notify->type](dmub_hpd_wrk->adev,
814                 dmub_hpd_wrk->dmub_notify);
815         }
816
817         kfree(dmub_hpd_wrk->dmub_notify);
818         kfree(dmub_hpd_wrk);
819
820 }
821
822 #define DMUB_TRACE_MAX_READ 64
823 /**
824  * dm_dmub_outbox1_low_irq() - Handles Outbox interrupt
825  * @interrupt_params: used for determining the Outbox instance
826  *
827  * Handles the Outbox Interrupt
828  * event handler.
829  */
830 static void dm_dmub_outbox1_low_irq(void *interrupt_params)
831 {
832         struct dmub_notification notify;
833         struct common_irq_params *irq_params = interrupt_params;
834         struct amdgpu_device *adev = irq_params->adev;
835         struct amdgpu_display_manager *dm = &adev->dm;
836         struct dmcub_trace_buf_entry entry = { 0 };
837         u32 count = 0;
838         struct dmub_hpd_work *dmub_hpd_wrk;
839         struct dc_link *plink = NULL;
840
841         if (dc_enable_dmub_notifications(adev->dm.dc) &&
842                 irq_params->irq_src == DC_IRQ_SOURCE_DMCUB_OUTBOX) {
843
844                 do {
845                         dc_stat_get_dmub_notification(adev->dm.dc, &notify);
846                         if (notify.type >= ARRAY_SIZE(dm->dmub_thread_offload)) {
847                                 DRM_ERROR("DM: notify type %d invalid!", notify.type);
848                                 continue;
849                         }
850                         if (!dm->dmub_callback[notify.type]) {
851                                 DRM_DEBUG_DRIVER("DMUB notification skipped, no handler: type=%d\n", notify.type);
852                                 continue;
853                         }
854                         if (dm->dmub_thread_offload[notify.type] == true) {
855                                 dmub_hpd_wrk = kzalloc(sizeof(*dmub_hpd_wrk), GFP_ATOMIC);
856                                 if (!dmub_hpd_wrk) {
857                                         DRM_ERROR("Failed to allocate dmub_hpd_wrk");
858                                         return;
859                                 }
860                                 dmub_hpd_wrk->dmub_notify = kmemdup(&notify, sizeof(struct dmub_notification),
861                                                                     GFP_ATOMIC);
862                                 if (!dmub_hpd_wrk->dmub_notify) {
863                                         kfree(dmub_hpd_wrk);
864                                         DRM_ERROR("Failed to allocate dmub_hpd_wrk->dmub_notify");
865                                         return;
866                                 }
867                                 INIT_WORK(&dmub_hpd_wrk->handle_hpd_work, dm_handle_hpd_work);
868                                 dmub_hpd_wrk->adev = adev;
869                                 if (notify.type == DMUB_NOTIFICATION_HPD) {
870                                         plink = adev->dm.dc->links[notify.link_index];
871                                         if (plink) {
872                                                 plink->hpd_status =
873                                                         notify.hpd_status == DP_HPD_PLUG;
874                                         }
875                                 }
876                                 queue_work(adev->dm.delayed_hpd_wq, &dmub_hpd_wrk->handle_hpd_work);
877                         } else {
878                                 dm->dmub_callback[notify.type](adev, &notify);
879                         }
880                 } while (notify.pending_notification);
881         }
882
883
884         do {
885                 if (dc_dmub_srv_get_dmub_outbox0_msg(dm->dc, &entry)) {
886                         trace_amdgpu_dmub_trace_high_irq(entry.trace_code, entry.tick_count,
887                                                         entry.param0, entry.param1);
888
889                         DRM_DEBUG_DRIVER("trace_code:%u, tick_count:%u, param0:%u, param1:%u\n",
890                                  entry.trace_code, entry.tick_count, entry.param0, entry.param1);
891                 } else
892                         break;
893
894                 count++;
895
896         } while (count <= DMUB_TRACE_MAX_READ);
897
898         if (count > DMUB_TRACE_MAX_READ)
899                 DRM_DEBUG_DRIVER("Warning : count > DMUB_TRACE_MAX_READ");
900 }
901
902 static int dm_set_clockgating_state(void *handle,
903                   enum amd_clockgating_state state)
904 {
905         return 0;
906 }
907
908 static int dm_set_powergating_state(void *handle,
909                   enum amd_powergating_state state)
910 {
911         return 0;
912 }
913
914 /* Prototypes of private functions */
915 static int dm_early_init(void *handle);
916
917 /* Allocate memory for FBC compressed data  */
918 static void amdgpu_dm_fbc_init(struct drm_connector *connector)
919 {
920         struct amdgpu_device *adev = drm_to_adev(connector->dev);
921         struct dm_compressor_info *compressor = &adev->dm.compressor;
922         struct amdgpu_dm_connector *aconn = to_amdgpu_dm_connector(connector);
923         struct drm_display_mode *mode;
924         unsigned long max_size = 0;
925
926         if (adev->dm.dc->fbc_compressor == NULL)
927                 return;
928
929         if (aconn->dc_link->connector_signal != SIGNAL_TYPE_EDP)
930                 return;
931
932         if (compressor->bo_ptr)
933                 return;
934
935
936         list_for_each_entry(mode, &connector->modes, head) {
937                 if (max_size < mode->htotal * mode->vtotal)
938                         max_size = mode->htotal * mode->vtotal;
939         }
940
941         if (max_size) {
942                 int r = amdgpu_bo_create_kernel(adev, max_size * 4, PAGE_SIZE,
943                             AMDGPU_GEM_DOMAIN_GTT, &compressor->bo_ptr,
944                             &compressor->gpu_addr, &compressor->cpu_addr);
945
946                 if (r)
947                         DRM_ERROR("DM: Failed to initialize FBC\n");
948                 else {
949                         adev->dm.dc->ctx->fbc_gpu_addr = compressor->gpu_addr;
950                         DRM_INFO("DM: FBC alloc %lu\n", max_size*4);
951                 }
952
953         }
954
955 }
956
957 static int amdgpu_dm_audio_component_get_eld(struct device *kdev, int port,
958                                           int pipe, bool *enabled,
959                                           unsigned char *buf, int max_bytes)
960 {
961         struct drm_device *dev = dev_get_drvdata(kdev);
962         struct amdgpu_device *adev = drm_to_adev(dev);
963         struct drm_connector *connector;
964         struct drm_connector_list_iter conn_iter;
965         struct amdgpu_dm_connector *aconnector;
966         int ret = 0;
967
968         *enabled = false;
969
970         mutex_lock(&adev->dm.audio_lock);
971
972         drm_connector_list_iter_begin(dev, &conn_iter);
973         drm_for_each_connector_iter(connector, &conn_iter) {
974
975                 if (connector->connector_type == DRM_MODE_CONNECTOR_WRITEBACK)
976                         continue;
977
978                 aconnector = to_amdgpu_dm_connector(connector);
979                 if (aconnector->audio_inst != port)
980                         continue;
981
982                 *enabled = true;
983                 ret = drm_eld_size(connector->eld);
984                 memcpy(buf, connector->eld, min(max_bytes, ret));
985
986                 break;
987         }
988         drm_connector_list_iter_end(&conn_iter);
989
990         mutex_unlock(&adev->dm.audio_lock);
991
992         DRM_DEBUG_KMS("Get ELD : idx=%d ret=%d en=%d\n", port, ret, *enabled);
993
994         return ret;
995 }
996
997 static const struct drm_audio_component_ops amdgpu_dm_audio_component_ops = {
998         .get_eld = amdgpu_dm_audio_component_get_eld,
999 };
1000
1001 static int amdgpu_dm_audio_component_bind(struct device *kdev,
1002                                        struct device *hda_kdev, void *data)
1003 {
1004         struct drm_device *dev = dev_get_drvdata(kdev);
1005         struct amdgpu_device *adev = drm_to_adev(dev);
1006         struct drm_audio_component *acomp = data;
1007
1008         acomp->ops = &amdgpu_dm_audio_component_ops;
1009         acomp->dev = kdev;
1010         adev->dm.audio_component = acomp;
1011
1012         return 0;
1013 }
1014
1015 static void amdgpu_dm_audio_component_unbind(struct device *kdev,
1016                                           struct device *hda_kdev, void *data)
1017 {
1018         struct amdgpu_device *adev = drm_to_adev(dev_get_drvdata(kdev));
1019         struct drm_audio_component *acomp = data;
1020
1021         acomp->ops = NULL;
1022         acomp->dev = NULL;
1023         adev->dm.audio_component = NULL;
1024 }
1025
1026 static const struct component_ops amdgpu_dm_audio_component_bind_ops = {
1027         .bind   = amdgpu_dm_audio_component_bind,
1028         .unbind = amdgpu_dm_audio_component_unbind,
1029 };
1030
1031 static int amdgpu_dm_audio_init(struct amdgpu_device *adev)
1032 {
1033         int i, ret;
1034
1035         if (!amdgpu_audio)
1036                 return 0;
1037
1038         adev->mode_info.audio.enabled = true;
1039
1040         adev->mode_info.audio.num_pins = adev->dm.dc->res_pool->audio_count;
1041
1042         for (i = 0; i < adev->mode_info.audio.num_pins; i++) {
1043                 adev->mode_info.audio.pin[i].channels = -1;
1044                 adev->mode_info.audio.pin[i].rate = -1;
1045                 adev->mode_info.audio.pin[i].bits_per_sample = -1;
1046                 adev->mode_info.audio.pin[i].status_bits = 0;
1047                 adev->mode_info.audio.pin[i].category_code = 0;
1048                 adev->mode_info.audio.pin[i].connected = false;
1049                 adev->mode_info.audio.pin[i].id =
1050                         adev->dm.dc->res_pool->audios[i]->inst;
1051                 adev->mode_info.audio.pin[i].offset = 0;
1052         }
1053
1054         ret = component_add(adev->dev, &amdgpu_dm_audio_component_bind_ops);
1055         if (ret < 0)
1056                 return ret;
1057
1058         adev->dm.audio_registered = true;
1059
1060         return 0;
1061 }
1062
1063 static void amdgpu_dm_audio_fini(struct amdgpu_device *adev)
1064 {
1065         if (!amdgpu_audio)
1066                 return;
1067
1068         if (!adev->mode_info.audio.enabled)
1069                 return;
1070
1071         if (adev->dm.audio_registered) {
1072                 component_del(adev->dev, &amdgpu_dm_audio_component_bind_ops);
1073                 adev->dm.audio_registered = false;
1074         }
1075
1076         /* TODO: Disable audio? */
1077
1078         adev->mode_info.audio.enabled = false;
1079 }
1080
1081 static  void amdgpu_dm_audio_eld_notify(struct amdgpu_device *adev, int pin)
1082 {
1083         struct drm_audio_component *acomp = adev->dm.audio_component;
1084
1085         if (acomp && acomp->audio_ops && acomp->audio_ops->pin_eld_notify) {
1086                 DRM_DEBUG_KMS("Notify ELD: %d\n", pin);
1087
1088                 acomp->audio_ops->pin_eld_notify(acomp->audio_ops->audio_ptr,
1089                                                  pin, -1);
1090         }
1091 }
1092
1093 static int dm_dmub_hw_init(struct amdgpu_device *adev)
1094 {
1095         const struct dmcub_firmware_header_v1_0 *hdr;
1096         struct dmub_srv *dmub_srv = adev->dm.dmub_srv;
1097         struct dmub_srv_fb_info *fb_info = adev->dm.dmub_fb_info;
1098         const struct firmware *dmub_fw = adev->dm.dmub_fw;
1099         struct dmcu *dmcu = adev->dm.dc->res_pool->dmcu;
1100         struct abm *abm = adev->dm.dc->res_pool->abm;
1101         struct dc_context *ctx = adev->dm.dc->ctx;
1102         struct dmub_srv_hw_params hw_params;
1103         enum dmub_status status;
1104         const unsigned char *fw_inst_const, *fw_bss_data;
1105         u32 i, fw_inst_const_size, fw_bss_data_size;
1106         bool has_hw_support;
1107
1108         if (!dmub_srv)
1109                 /* DMUB isn't supported on the ASIC. */
1110                 return 0;
1111
1112         if (!fb_info) {
1113                 DRM_ERROR("No framebuffer info for DMUB service.\n");
1114                 return -EINVAL;
1115         }
1116
1117         if (!dmub_fw) {
1118                 /* Firmware required for DMUB support. */
1119                 DRM_ERROR("No firmware provided for DMUB.\n");
1120                 return -EINVAL;
1121         }
1122
1123         /* initialize register offsets for ASICs with runtime initialization available */
1124         if (dmub_srv->hw_funcs.init_reg_offsets)
1125                 dmub_srv->hw_funcs.init_reg_offsets(dmub_srv, ctx);
1126
1127         status = dmub_srv_has_hw_support(dmub_srv, &has_hw_support);
1128         if (status != DMUB_STATUS_OK) {
1129                 DRM_ERROR("Error checking HW support for DMUB: %d\n", status);
1130                 return -EINVAL;
1131         }
1132
1133         if (!has_hw_support) {
1134                 DRM_INFO("DMUB unsupported on ASIC\n");
1135                 return 0;
1136         }
1137
1138         /* Reset DMCUB if it was previously running - before we overwrite its memory. */
1139         status = dmub_srv_hw_reset(dmub_srv);
1140         if (status != DMUB_STATUS_OK)
1141                 DRM_WARN("Error resetting DMUB HW: %d\n", status);
1142
1143         hdr = (const struct dmcub_firmware_header_v1_0 *)dmub_fw->data;
1144
1145         fw_inst_const = dmub_fw->data +
1146                         le32_to_cpu(hdr->header.ucode_array_offset_bytes) +
1147                         PSP_HEADER_BYTES;
1148
1149         fw_bss_data = dmub_fw->data +
1150                       le32_to_cpu(hdr->header.ucode_array_offset_bytes) +
1151                       le32_to_cpu(hdr->inst_const_bytes);
1152
1153         /* Copy firmware and bios info into FB memory. */
1154         fw_inst_const_size = le32_to_cpu(hdr->inst_const_bytes) -
1155                              PSP_HEADER_BYTES - PSP_FOOTER_BYTES;
1156
1157         fw_bss_data_size = le32_to_cpu(hdr->bss_data_bytes);
1158
1159         /* if adev->firmware.load_type == AMDGPU_FW_LOAD_PSP,
1160          * amdgpu_ucode_init_single_fw will load dmub firmware
1161          * fw_inst_const part to cw0; otherwise, the firmware back door load
1162          * will be done by dm_dmub_hw_init
1163          */
1164         if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) {
1165                 memcpy(fb_info->fb[DMUB_WINDOW_0_INST_CONST].cpu_addr, fw_inst_const,
1166                                 fw_inst_const_size);
1167         }
1168
1169         if (fw_bss_data_size)
1170                 memcpy(fb_info->fb[DMUB_WINDOW_2_BSS_DATA].cpu_addr,
1171                        fw_bss_data, fw_bss_data_size);
1172
1173         /* Copy firmware bios info into FB memory. */
1174         memcpy(fb_info->fb[DMUB_WINDOW_3_VBIOS].cpu_addr, adev->bios,
1175                adev->bios_size);
1176
1177         /* Reset regions that need to be reset. */
1178         memset(fb_info->fb[DMUB_WINDOW_4_MAILBOX].cpu_addr, 0,
1179         fb_info->fb[DMUB_WINDOW_4_MAILBOX].size);
1180
1181         memset(fb_info->fb[DMUB_WINDOW_5_TRACEBUFF].cpu_addr, 0,
1182                fb_info->fb[DMUB_WINDOW_5_TRACEBUFF].size);
1183
1184         memset(fb_info->fb[DMUB_WINDOW_6_FW_STATE].cpu_addr, 0,
1185                fb_info->fb[DMUB_WINDOW_6_FW_STATE].size);
1186
1187         /* Initialize hardware. */
1188         memset(&hw_params, 0, sizeof(hw_params));
1189         hw_params.fb_base = adev->gmc.fb_start;
1190         hw_params.fb_offset = adev->vm_manager.vram_base_offset;
1191
1192         /* backdoor load firmware and trigger dmub running */
1193         if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP)
1194                 hw_params.load_inst_const = true;
1195
1196         if (dmcu)
1197                 hw_params.psp_version = dmcu->psp_version;
1198
1199         for (i = 0; i < fb_info->num_fb; ++i)
1200                 hw_params.fb[i] = &fb_info->fb[i];
1201
1202         switch (amdgpu_ip_version(adev, DCE_HWIP, 0)) {
1203         case IP_VERSION(3, 1, 3):
1204         case IP_VERSION(3, 1, 4):
1205         case IP_VERSION(3, 5, 0):
1206                 hw_params.dpia_supported = true;
1207                 hw_params.disable_dpia = adev->dm.dc->debug.dpia_debug.bits.disable_dpia;
1208                 break;
1209         default:
1210                 break;
1211         }
1212
1213         status = dmub_srv_hw_init(dmub_srv, &hw_params);
1214         if (status != DMUB_STATUS_OK) {
1215                 DRM_ERROR("Error initializing DMUB HW: %d\n", status);
1216                 return -EINVAL;
1217         }
1218
1219         /* Wait for firmware load to finish. */
1220         status = dmub_srv_wait_for_auto_load(dmub_srv, 100000);
1221         if (status != DMUB_STATUS_OK)
1222                 DRM_WARN("Wait for DMUB auto-load failed: %d\n", status);
1223
1224         /* Init DMCU and ABM if available. */
1225         if (dmcu && abm) {
1226                 dmcu->funcs->dmcu_init(dmcu);
1227                 abm->dmcu_is_running = dmcu->funcs->is_dmcu_initialized(dmcu);
1228         }
1229
1230         if (!adev->dm.dc->ctx->dmub_srv)
1231                 adev->dm.dc->ctx->dmub_srv = dc_dmub_srv_create(adev->dm.dc, dmub_srv);
1232         if (!adev->dm.dc->ctx->dmub_srv) {
1233                 DRM_ERROR("Couldn't allocate DC DMUB server!\n");
1234                 return -ENOMEM;
1235         }
1236
1237         DRM_INFO("DMUB hardware initialized: version=0x%08X\n",
1238                  adev->dm.dmcub_fw_version);
1239
1240         return 0;
1241 }
1242
1243 static void dm_dmub_hw_resume(struct amdgpu_device *adev)
1244 {
1245         struct dmub_srv *dmub_srv = adev->dm.dmub_srv;
1246         enum dmub_status status;
1247         bool init;
1248
1249         if (!dmub_srv) {
1250                 /* DMUB isn't supported on the ASIC. */
1251                 return;
1252         }
1253
1254         status = dmub_srv_is_hw_init(dmub_srv, &init);
1255         if (status != DMUB_STATUS_OK)
1256                 DRM_WARN("DMUB hardware init check failed: %d\n", status);
1257
1258         if (status == DMUB_STATUS_OK && init) {
1259                 /* Wait for firmware load to finish. */
1260                 status = dmub_srv_wait_for_auto_load(dmub_srv, 100000);
1261                 if (status != DMUB_STATUS_OK)
1262                         DRM_WARN("Wait for DMUB auto-load failed: %d\n", status);
1263         } else {
1264                 /* Perform the full hardware initialization. */
1265                 dm_dmub_hw_init(adev);
1266         }
1267 }
1268
1269 static void mmhub_read_system_context(struct amdgpu_device *adev, struct dc_phy_addr_space_config *pa_config)
1270 {
1271         u64 pt_base;
1272         u32 logical_addr_low;
1273         u32 logical_addr_high;
1274         u32 agp_base, agp_bot, agp_top;
1275         PHYSICAL_ADDRESS_LOC page_table_start, page_table_end, page_table_base;
1276
1277         memset(pa_config, 0, sizeof(*pa_config));
1278
1279         agp_base = 0;
1280         agp_bot = adev->gmc.agp_start >> 24;
1281         agp_top = adev->gmc.agp_end >> 24;
1282
1283         /* AGP aperture is disabled */
1284         if (agp_bot > agp_top) {
1285                 logical_addr_low = adev->gmc.fb_start >> 18;
1286                 if (adev->apu_flags & AMD_APU_IS_RAVEN2)
1287                         /*
1288                          * Raven2 has a HW issue that it is unable to use the vram which
1289                          * is out of MC_VM_SYSTEM_APERTURE_HIGH_ADDR. So here is the
1290                          * workaround that increase system aperture high address (add 1)
1291                          * to get rid of the VM fault and hardware hang.
1292                          */
1293                         logical_addr_high = (adev->gmc.fb_end >> 18) + 0x1;
1294                 else
1295                         logical_addr_high = adev->gmc.fb_end >> 18;
1296         } else {
1297                 logical_addr_low = min(adev->gmc.fb_start, adev->gmc.agp_start) >> 18;
1298                 if (adev->apu_flags & AMD_APU_IS_RAVEN2)
1299                         /*
1300                          * Raven2 has a HW issue that it is unable to use the vram which
1301                          * is out of MC_VM_SYSTEM_APERTURE_HIGH_ADDR. So here is the
1302                          * workaround that increase system aperture high address (add 1)
1303                          * to get rid of the VM fault and hardware hang.
1304                          */
1305                         logical_addr_high = max((adev->gmc.fb_end >> 18) + 0x1, adev->gmc.agp_end >> 18);
1306                 else
1307                         logical_addr_high = max(adev->gmc.fb_end, adev->gmc.agp_end) >> 18;
1308         }
1309
1310         pt_base = amdgpu_gmc_pd_addr(adev->gart.bo);
1311
1312         page_table_start.high_part = upper_32_bits(adev->gmc.gart_start >>
1313                                                    AMDGPU_GPU_PAGE_SHIFT);
1314         page_table_start.low_part = lower_32_bits(adev->gmc.gart_start >>
1315                                                   AMDGPU_GPU_PAGE_SHIFT);
1316         page_table_end.high_part = upper_32_bits(adev->gmc.gart_end >>
1317                                                  AMDGPU_GPU_PAGE_SHIFT);
1318         page_table_end.low_part = lower_32_bits(adev->gmc.gart_end >>
1319                                                 AMDGPU_GPU_PAGE_SHIFT);
1320         page_table_base.high_part = upper_32_bits(pt_base);
1321         page_table_base.low_part = lower_32_bits(pt_base);
1322
1323         pa_config->system_aperture.start_addr = (uint64_t)logical_addr_low << 18;
1324         pa_config->system_aperture.end_addr = (uint64_t)logical_addr_high << 18;
1325
1326         pa_config->system_aperture.agp_base = (uint64_t)agp_base << 24;
1327         pa_config->system_aperture.agp_bot = (uint64_t)agp_bot << 24;
1328         pa_config->system_aperture.agp_top = (uint64_t)agp_top << 24;
1329
1330         pa_config->system_aperture.fb_base = adev->gmc.fb_start;
1331         pa_config->system_aperture.fb_offset = adev->vm_manager.vram_base_offset;
1332         pa_config->system_aperture.fb_top = adev->gmc.fb_end;
1333
1334         pa_config->gart_config.page_table_start_addr = page_table_start.quad_part << 12;
1335         pa_config->gart_config.page_table_end_addr = page_table_end.quad_part << 12;
1336         pa_config->gart_config.page_table_base_addr = page_table_base.quad_part;
1337
1338         pa_config->is_hvm_enabled = adev->mode_info.gpu_vm_support;
1339
1340 }
1341
1342 static void force_connector_state(
1343         struct amdgpu_dm_connector *aconnector,
1344         enum drm_connector_force force_state)
1345 {
1346         struct drm_connector *connector = &aconnector->base;
1347
1348         mutex_lock(&connector->dev->mode_config.mutex);
1349         aconnector->base.force = force_state;
1350         mutex_unlock(&connector->dev->mode_config.mutex);
1351
1352         mutex_lock(&aconnector->hpd_lock);
1353         drm_kms_helper_connector_hotplug_event(connector);
1354         mutex_unlock(&aconnector->hpd_lock);
1355 }
1356
1357 static void dm_handle_hpd_rx_offload_work(struct work_struct *work)
1358 {
1359         struct hpd_rx_irq_offload_work *offload_work;
1360         struct amdgpu_dm_connector *aconnector;
1361         struct dc_link *dc_link;
1362         struct amdgpu_device *adev;
1363         enum dc_connection_type new_connection_type = dc_connection_none;
1364         unsigned long flags;
1365         union test_response test_response;
1366
1367         memset(&test_response, 0, sizeof(test_response));
1368
1369         offload_work = container_of(work, struct hpd_rx_irq_offload_work, work);
1370         aconnector = offload_work->offload_wq->aconnector;
1371
1372         if (!aconnector) {
1373                 DRM_ERROR("Can't retrieve aconnector in hpd_rx_irq_offload_work");
1374                 goto skip;
1375         }
1376
1377         adev = drm_to_adev(aconnector->base.dev);
1378         dc_link = aconnector->dc_link;
1379
1380         mutex_lock(&aconnector->hpd_lock);
1381         if (!dc_link_detect_connection_type(dc_link, &new_connection_type))
1382                 DRM_ERROR("KMS: Failed to detect connector\n");
1383         mutex_unlock(&aconnector->hpd_lock);
1384
1385         if (new_connection_type == dc_connection_none)
1386                 goto skip;
1387
1388         if (amdgpu_in_reset(adev))
1389                 goto skip;
1390
1391         if (offload_work->data.bytes.device_service_irq.bits.UP_REQ_MSG_RDY ||
1392                 offload_work->data.bytes.device_service_irq.bits.DOWN_REP_MSG_RDY) {
1393                 dm_handle_mst_sideband_msg_ready_event(&aconnector->mst_mgr, DOWN_OR_UP_MSG_RDY_EVENT);
1394                 spin_lock_irqsave(&offload_work->offload_wq->offload_lock, flags);
1395                 offload_work->offload_wq->is_handling_mst_msg_rdy_event = false;
1396                 spin_unlock_irqrestore(&offload_work->offload_wq->offload_lock, flags);
1397                 goto skip;
1398         }
1399
1400         mutex_lock(&adev->dm.dc_lock);
1401         if (offload_work->data.bytes.device_service_irq.bits.AUTOMATED_TEST) {
1402                 dc_link_dp_handle_automated_test(dc_link);
1403
1404                 if (aconnector->timing_changed) {
1405                         /* force connector disconnect and reconnect */
1406                         force_connector_state(aconnector, DRM_FORCE_OFF);
1407                         msleep(100);
1408                         force_connector_state(aconnector, DRM_FORCE_UNSPECIFIED);
1409                 }
1410
1411                 test_response.bits.ACK = 1;
1412
1413                 core_link_write_dpcd(
1414                 dc_link,
1415                 DP_TEST_RESPONSE,
1416                 &test_response.raw,
1417                 sizeof(test_response));
1418         } else if ((dc_link->connector_signal != SIGNAL_TYPE_EDP) &&
1419                         dc_link_check_link_loss_status(dc_link, &offload_work->data) &&
1420                         dc_link_dp_allow_hpd_rx_irq(dc_link)) {
1421                 /* offload_work->data is from handle_hpd_rx_irq->
1422                  * schedule_hpd_rx_offload_work.this is defer handle
1423                  * for hpd short pulse. upon here, link status may be
1424                  * changed, need get latest link status from dpcd
1425                  * registers. if link status is good, skip run link
1426                  * training again.
1427                  */
1428                 union hpd_irq_data irq_data;
1429
1430                 memset(&irq_data, 0, sizeof(irq_data));
1431
1432                 /* before dc_link_dp_handle_link_loss, allow new link lost handle
1433                  * request be added to work queue if link lost at end of dc_link_
1434                  * dp_handle_link_loss
1435                  */
1436                 spin_lock_irqsave(&offload_work->offload_wq->offload_lock, flags);
1437                 offload_work->offload_wq->is_handling_link_loss = false;
1438                 spin_unlock_irqrestore(&offload_work->offload_wq->offload_lock, flags);
1439
1440                 if ((dc_link_dp_read_hpd_rx_irq_data(dc_link, &irq_data) == DC_OK) &&
1441                         dc_link_check_link_loss_status(dc_link, &irq_data))
1442                         dc_link_dp_handle_link_loss(dc_link);
1443         }
1444         mutex_unlock(&adev->dm.dc_lock);
1445
1446 skip:
1447         kfree(offload_work);
1448
1449 }
1450
1451 static struct hpd_rx_irq_offload_work_queue *hpd_rx_irq_create_workqueue(struct dc *dc)
1452 {
1453         int max_caps = dc->caps.max_links;
1454         int i = 0;
1455         struct hpd_rx_irq_offload_work_queue *hpd_rx_offload_wq = NULL;
1456
1457         hpd_rx_offload_wq = kcalloc(max_caps, sizeof(*hpd_rx_offload_wq), GFP_KERNEL);
1458
1459         if (!hpd_rx_offload_wq)
1460                 return NULL;
1461
1462
1463         for (i = 0; i < max_caps; i++) {
1464                 hpd_rx_offload_wq[i].wq =
1465                                     create_singlethread_workqueue("amdgpu_dm_hpd_rx_offload_wq");
1466
1467                 if (hpd_rx_offload_wq[i].wq == NULL) {
1468                         DRM_ERROR("create amdgpu_dm_hpd_rx_offload_wq fail!");
1469                         goto out_err;
1470                 }
1471
1472                 spin_lock_init(&hpd_rx_offload_wq[i].offload_lock);
1473         }
1474
1475         return hpd_rx_offload_wq;
1476
1477 out_err:
1478         for (i = 0; i < max_caps; i++) {
1479                 if (hpd_rx_offload_wq[i].wq)
1480                         destroy_workqueue(hpd_rx_offload_wq[i].wq);
1481         }
1482         kfree(hpd_rx_offload_wq);
1483         return NULL;
1484 }
1485
1486 struct amdgpu_stutter_quirk {
1487         u16 chip_vendor;
1488         u16 chip_device;
1489         u16 subsys_vendor;
1490         u16 subsys_device;
1491         u8 revision;
1492 };
1493
1494 static const struct amdgpu_stutter_quirk amdgpu_stutter_quirk_list[] = {
1495         /* https://bugzilla.kernel.org/show_bug.cgi?id=214417 */
1496         { 0x1002, 0x15dd, 0x1002, 0x15dd, 0xc8 },
1497         { 0, 0, 0, 0, 0 },
1498 };
1499
1500 static bool dm_should_disable_stutter(struct pci_dev *pdev)
1501 {
1502         const struct amdgpu_stutter_quirk *p = amdgpu_stutter_quirk_list;
1503
1504         while (p && p->chip_device != 0) {
1505                 if (pdev->vendor == p->chip_vendor &&
1506                     pdev->device == p->chip_device &&
1507                     pdev->subsystem_vendor == p->subsys_vendor &&
1508                     pdev->subsystem_device == p->subsys_device &&
1509                     pdev->revision == p->revision) {
1510                         return true;
1511                 }
1512                 ++p;
1513         }
1514         return false;
1515 }
1516
1517 static const struct dmi_system_id hpd_disconnect_quirk_table[] = {
1518         {
1519                 .matches = {
1520                         DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
1521                         DMI_MATCH(DMI_PRODUCT_NAME, "Precision 3660"),
1522                 },
1523         },
1524         {
1525                 .matches = {
1526                         DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
1527                         DMI_MATCH(DMI_PRODUCT_NAME, "Precision 3260"),
1528                 },
1529         },
1530         {
1531                 .matches = {
1532                         DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
1533                         DMI_MATCH(DMI_PRODUCT_NAME, "Precision 3460"),
1534                 },
1535         },
1536         {
1537                 .matches = {
1538                         DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
1539                         DMI_MATCH(DMI_PRODUCT_NAME, "OptiPlex Tower Plus 7010"),
1540                 },
1541         },
1542         {
1543                 .matches = {
1544                         DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
1545                         DMI_MATCH(DMI_PRODUCT_NAME, "OptiPlex Tower 7010"),
1546                 },
1547         },
1548         {
1549                 .matches = {
1550                         DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
1551                         DMI_MATCH(DMI_PRODUCT_NAME, "OptiPlex SFF Plus 7010"),
1552                 },
1553         },
1554         {
1555                 .matches = {
1556                         DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
1557                         DMI_MATCH(DMI_PRODUCT_NAME, "OptiPlex SFF 7010"),
1558                 },
1559         },
1560         {
1561                 .matches = {
1562                         DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
1563                         DMI_MATCH(DMI_PRODUCT_NAME, "OptiPlex Micro Plus 7010"),
1564                 },
1565         },
1566         {
1567                 .matches = {
1568                         DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
1569                         DMI_MATCH(DMI_PRODUCT_NAME, "OptiPlex Micro 7010"),
1570                 },
1571         },
1572         {}
1573         /* TODO: refactor this from a fixed table to a dynamic option */
1574 };
1575
1576 static void retrieve_dmi_info(struct amdgpu_display_manager *dm)
1577 {
1578         const struct dmi_system_id *dmi_id;
1579
1580         dm->aux_hpd_discon_quirk = false;
1581
1582         dmi_id = dmi_first_match(hpd_disconnect_quirk_table);
1583         if (dmi_id) {
1584                 dm->aux_hpd_discon_quirk = true;
1585                 DRM_INFO("aux_hpd_discon_quirk attached\n");
1586         }
1587 }
1588
1589 static int amdgpu_dm_init(struct amdgpu_device *adev)
1590 {
1591         struct dc_init_data init_data;
1592         struct dc_callback_init init_params;
1593         int r;
1594
1595         adev->dm.ddev = adev_to_drm(adev);
1596         adev->dm.adev = adev;
1597
1598         /* Zero all the fields */
1599         memset(&init_data, 0, sizeof(init_data));
1600         memset(&init_params, 0, sizeof(init_params));
1601
1602         mutex_init(&adev->dm.dpia_aux_lock);
1603         mutex_init(&adev->dm.dc_lock);
1604         mutex_init(&adev->dm.audio_lock);
1605
1606         if (amdgpu_dm_irq_init(adev)) {
1607                 DRM_ERROR("amdgpu: failed to initialize DM IRQ support.\n");
1608                 goto error;
1609         }
1610
1611         init_data.asic_id.chip_family = adev->family;
1612
1613         init_data.asic_id.pci_revision_id = adev->pdev->revision;
1614         init_data.asic_id.hw_internal_rev = adev->external_rev_id;
1615         init_data.asic_id.chip_id = adev->pdev->device;
1616
1617         init_data.asic_id.vram_width = adev->gmc.vram_width;
1618         /* TODO: initialize init_data.asic_id.vram_type here!!!! */
1619         init_data.asic_id.atombios_base_address =
1620                 adev->mode_info.atom_context->bios;
1621
1622         init_data.driver = adev;
1623
1624         adev->dm.cgs_device = amdgpu_cgs_create_device(adev);
1625
1626         if (!adev->dm.cgs_device) {
1627                 DRM_ERROR("amdgpu: failed to create cgs device.\n");
1628                 goto error;
1629         }
1630
1631         init_data.cgs_device = adev->dm.cgs_device;
1632
1633         init_data.dce_environment = DCE_ENV_PRODUCTION_DRV;
1634
1635         switch (amdgpu_ip_version(adev, DCE_HWIP, 0)) {
1636         case IP_VERSION(2, 1, 0):
1637                 switch (adev->dm.dmcub_fw_version) {
1638                 case 0: /* development */
1639                 case 0x1: /* linux-firmware.git hash 6d9f399 */
1640                 case 0x01000000: /* linux-firmware.git hash 9a0b0f4 */
1641                         init_data.flags.disable_dmcu = false;
1642                         break;
1643                 default:
1644                         init_data.flags.disable_dmcu = true;
1645                 }
1646                 break;
1647         case IP_VERSION(2, 0, 3):
1648                 init_data.flags.disable_dmcu = true;
1649                 break;
1650         default:
1651                 break;
1652         }
1653
1654         /* APU support S/G display by default except:
1655          * ASICs before Carrizo,
1656          * RAVEN1 (Users reported stability issue)
1657          */
1658
1659         if (adev->asic_type < CHIP_CARRIZO) {
1660                 init_data.flags.gpu_vm_support = false;
1661         } else if (adev->asic_type == CHIP_RAVEN) {
1662                 if (adev->apu_flags & AMD_APU_IS_RAVEN)
1663                         init_data.flags.gpu_vm_support = false;
1664                 else
1665                         init_data.flags.gpu_vm_support = (amdgpu_sg_display != 0);
1666         } else {
1667                 init_data.flags.gpu_vm_support = (amdgpu_sg_display != 0) && (adev->flags & AMD_IS_APU);
1668         }
1669
1670         adev->mode_info.gpu_vm_support = init_data.flags.gpu_vm_support;
1671
1672         if (amdgpu_dc_feature_mask & DC_FBC_MASK)
1673                 init_data.flags.fbc_support = true;
1674
1675         if (amdgpu_dc_feature_mask & DC_MULTI_MON_PP_MCLK_SWITCH_MASK)
1676                 init_data.flags.multi_mon_pp_mclk_switch = true;
1677
1678         if (amdgpu_dc_feature_mask & DC_DISABLE_FRACTIONAL_PWM_MASK)
1679                 init_data.flags.disable_fractional_pwm = true;
1680
1681         if (amdgpu_dc_feature_mask & DC_EDP_NO_POWER_SEQUENCING)
1682                 init_data.flags.edp_no_power_sequencing = true;
1683
1684         if (amdgpu_dc_feature_mask & DC_DISABLE_LTTPR_DP1_4A)
1685                 init_data.flags.allow_lttpr_non_transparent_mode.bits.DP1_4A = true;
1686         if (amdgpu_dc_feature_mask & DC_DISABLE_LTTPR_DP2_0)
1687                 init_data.flags.allow_lttpr_non_transparent_mode.bits.DP2_0 = true;
1688
1689         init_data.flags.seamless_boot_edp_requested = false;
1690
1691         if (amdgpu_device_seamless_boot_supported(adev)) {
1692                 init_data.flags.seamless_boot_edp_requested = true;
1693                 init_data.flags.allow_seamless_boot_optimization = true;
1694                 DRM_INFO("Seamless boot condition check passed\n");
1695         }
1696
1697         init_data.flags.enable_mipi_converter_optimization = true;
1698
1699         init_data.dcn_reg_offsets = adev->reg_offset[DCE_HWIP][0];
1700         init_data.nbio_reg_offsets = adev->reg_offset[NBIO_HWIP][0];
1701         init_data.clk_reg_offsets = adev->reg_offset[CLK_HWIP][0];
1702
1703         /* Enable DWB for tested platforms only */
1704         if (adev->ip_versions[DCE_HWIP][0] >= IP_VERSION(3, 0, 0))
1705                 init_data.num_virtual_links = 1;
1706
1707         INIT_LIST_HEAD(&adev->dm.da_list);
1708
1709         retrieve_dmi_info(&adev->dm);
1710
1711         /* Display Core create. */
1712         adev->dm.dc = dc_create(&init_data);
1713
1714         if (adev->dm.dc) {
1715                 DRM_INFO("Display Core v%s initialized on %s\n", DC_VER,
1716                          dce_version_to_string(adev->dm.dc->ctx->dce_version));
1717         } else {
1718                 DRM_INFO("Display Core failed to initialize with v%s!\n", DC_VER);
1719                 goto error;
1720         }
1721
1722         if (amdgpu_dc_debug_mask & DC_DISABLE_PIPE_SPLIT) {
1723                 adev->dm.dc->debug.force_single_disp_pipe_split = false;
1724                 adev->dm.dc->debug.pipe_split_policy = MPC_SPLIT_AVOID;
1725         }
1726
1727         if (adev->asic_type != CHIP_CARRIZO && adev->asic_type != CHIP_STONEY)
1728                 adev->dm.dc->debug.disable_stutter = amdgpu_pp_feature_mask & PP_STUTTER_MODE ? false : true;
1729         if (dm_should_disable_stutter(adev->pdev))
1730                 adev->dm.dc->debug.disable_stutter = true;
1731
1732         if (amdgpu_dc_debug_mask & DC_DISABLE_STUTTER)
1733                 adev->dm.dc->debug.disable_stutter = true;
1734
1735         if (amdgpu_dc_debug_mask & DC_DISABLE_DSC)
1736                 adev->dm.dc->debug.disable_dsc = true;
1737
1738         if (amdgpu_dc_debug_mask & DC_DISABLE_CLOCK_GATING)
1739                 adev->dm.dc->debug.disable_clock_gate = true;
1740
1741         if (amdgpu_dc_debug_mask & DC_FORCE_SUBVP_MCLK_SWITCH)
1742                 adev->dm.dc->debug.force_subvp_mclk_switch = true;
1743
1744         adev->dm.dc->debug.visual_confirm = amdgpu_dc_visual_confirm;
1745
1746         /* TODO: Remove after DP2 receiver gets proper support of Cable ID feature */
1747         adev->dm.dc->debug.ignore_cable_id = true;
1748
1749         if (adev->dm.dc->caps.dp_hdmi21_pcon_support)
1750                 DRM_INFO("DP-HDMI FRL PCON supported\n");
1751
1752         r = dm_dmub_hw_init(adev);
1753         if (r) {
1754                 DRM_ERROR("DMUB interface failed to initialize: status=%d\n", r);
1755                 goto error;
1756         }
1757
1758         dc_hardware_init(adev->dm.dc);
1759
1760         adev->dm.hpd_rx_offload_wq = hpd_rx_irq_create_workqueue(adev->dm.dc);
1761         if (!adev->dm.hpd_rx_offload_wq) {
1762                 DRM_ERROR("amdgpu: failed to create hpd rx offload workqueue.\n");
1763                 goto error;
1764         }
1765
1766         if ((adev->flags & AMD_IS_APU) && (adev->asic_type >= CHIP_CARRIZO)) {
1767                 struct dc_phy_addr_space_config pa_config;
1768
1769                 mmhub_read_system_context(adev, &pa_config);
1770
1771                 // Call the DC init_memory func
1772                 dc_setup_system_context(adev->dm.dc, &pa_config);
1773         }
1774
1775         adev->dm.freesync_module = mod_freesync_create(adev->dm.dc);
1776         if (!adev->dm.freesync_module) {
1777                 DRM_ERROR(
1778                 "amdgpu: failed to initialize freesync_module.\n");
1779         } else
1780                 DRM_DEBUG_DRIVER("amdgpu: freesync_module init done %p.\n",
1781                                 adev->dm.freesync_module);
1782
1783         amdgpu_dm_init_color_mod();
1784
1785         if (adev->dm.dc->caps.max_links > 0) {
1786                 adev->dm.vblank_control_workqueue =
1787                         create_singlethread_workqueue("dm_vblank_control_workqueue");
1788                 if (!adev->dm.vblank_control_workqueue)
1789                         DRM_ERROR("amdgpu: failed to initialize vblank_workqueue.\n");
1790         }
1791
1792         if (adev->dm.dc->caps.max_links > 0 && adev->family >= AMDGPU_FAMILY_RV) {
1793                 adev->dm.hdcp_workqueue = hdcp_create_workqueue(adev, &init_params.cp_psp, adev->dm.dc);
1794
1795                 if (!adev->dm.hdcp_workqueue)
1796                         DRM_ERROR("amdgpu: failed to initialize hdcp_workqueue.\n");
1797                 else
1798                         DRM_DEBUG_DRIVER("amdgpu: hdcp_workqueue init done %p.\n", adev->dm.hdcp_workqueue);
1799
1800                 dc_init_callbacks(adev->dm.dc, &init_params);
1801         }
1802         if (dc_is_dmub_outbox_supported(adev->dm.dc)) {
1803                 init_completion(&adev->dm.dmub_aux_transfer_done);
1804                 adev->dm.dmub_notify = kzalloc(sizeof(struct dmub_notification), GFP_KERNEL);
1805                 if (!adev->dm.dmub_notify) {
1806                         DRM_INFO("amdgpu: fail to allocate adev->dm.dmub_notify");
1807                         goto error;
1808                 }
1809
1810                 adev->dm.delayed_hpd_wq = create_singlethread_workqueue("amdgpu_dm_hpd_wq");
1811                 if (!adev->dm.delayed_hpd_wq) {
1812                         DRM_ERROR("amdgpu: failed to create hpd offload workqueue.\n");
1813                         goto error;
1814                 }
1815
1816                 amdgpu_dm_outbox_init(adev);
1817                 if (!register_dmub_notify_callback(adev, DMUB_NOTIFICATION_AUX_REPLY,
1818                         dmub_aux_setconfig_callback, false)) {
1819                         DRM_ERROR("amdgpu: fail to register dmub aux callback");
1820                         goto error;
1821                 }
1822                 if (!register_dmub_notify_callback(adev, DMUB_NOTIFICATION_HPD, dmub_hpd_callback, true)) {
1823                         DRM_ERROR("amdgpu: fail to register dmub hpd callback");
1824                         goto error;
1825                 }
1826                 if (!register_dmub_notify_callback(adev, DMUB_NOTIFICATION_HPD_IRQ, dmub_hpd_callback, true)) {
1827                         DRM_ERROR("amdgpu: fail to register dmub hpd callback");
1828                         goto error;
1829                 }
1830         }
1831
1832         /* Enable outbox notification only after IRQ handlers are registered and DMUB is alive.
1833          * It is expected that DMUB will resend any pending notifications at this point, for
1834          * example HPD from DPIA.
1835          */
1836         if (dc_is_dmub_outbox_supported(adev->dm.dc)) {
1837                 dc_enable_dmub_outbox(adev->dm.dc);
1838
1839                 /* DPIA trace goes to dmesg logs only if outbox is enabled */
1840                 if (amdgpu_dc_debug_mask & DC_ENABLE_DPIA_TRACE)
1841                         dc_dmub_srv_enable_dpia_trace(adev->dm.dc);
1842         }
1843
1844         if (amdgpu_dm_initialize_drm_device(adev)) {
1845                 DRM_ERROR(
1846                 "amdgpu: failed to initialize sw for display support.\n");
1847                 goto error;
1848         }
1849
1850         /* create fake encoders for MST */
1851         dm_dp_create_fake_mst_encoders(adev);
1852
1853         /* TODO: Add_display_info? */
1854
1855         /* TODO use dynamic cursor width */
1856         adev_to_drm(adev)->mode_config.cursor_width = adev->dm.dc->caps.max_cursor_size;
1857         adev_to_drm(adev)->mode_config.cursor_height = adev->dm.dc->caps.max_cursor_size;
1858
1859         if (drm_vblank_init(adev_to_drm(adev), adev->dm.display_indexes_num)) {
1860                 DRM_ERROR(
1861                 "amdgpu: failed to initialize sw for display support.\n");
1862                 goto error;
1863         }
1864
1865 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
1866         adev->dm.secure_display_ctxs = amdgpu_dm_crtc_secure_display_create_contexts(adev);
1867         if (!adev->dm.secure_display_ctxs)
1868                 DRM_ERROR("amdgpu: failed to initialize secure display contexts.\n");
1869 #endif
1870
1871         DRM_DEBUG_DRIVER("KMS initialized.\n");
1872
1873         return 0;
1874 error:
1875         amdgpu_dm_fini(adev);
1876
1877         return -EINVAL;
1878 }
1879
1880 static int amdgpu_dm_early_fini(void *handle)
1881 {
1882         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1883
1884         amdgpu_dm_audio_fini(adev);
1885
1886         return 0;
1887 }
1888
1889 static void amdgpu_dm_fini(struct amdgpu_device *adev)
1890 {
1891         int i;
1892
1893         if (adev->dm.vblank_control_workqueue) {
1894                 destroy_workqueue(adev->dm.vblank_control_workqueue);
1895                 adev->dm.vblank_control_workqueue = NULL;
1896         }
1897
1898         amdgpu_dm_destroy_drm_device(&adev->dm);
1899
1900 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
1901         if (adev->dm.secure_display_ctxs) {
1902                 for (i = 0; i < adev->mode_info.num_crtc; i++) {
1903                         if (adev->dm.secure_display_ctxs[i].crtc) {
1904                                 flush_work(&adev->dm.secure_display_ctxs[i].notify_ta_work);
1905                                 flush_work(&adev->dm.secure_display_ctxs[i].forward_roi_work);
1906                         }
1907                 }
1908                 kfree(adev->dm.secure_display_ctxs);
1909                 adev->dm.secure_display_ctxs = NULL;
1910         }
1911 #endif
1912         if (adev->dm.hdcp_workqueue) {
1913                 hdcp_destroy(&adev->dev->kobj, adev->dm.hdcp_workqueue);
1914                 adev->dm.hdcp_workqueue = NULL;
1915         }
1916
1917         if (adev->dm.dc)
1918                 dc_deinit_callbacks(adev->dm.dc);
1919
1920         if (adev->dm.dc)
1921                 dc_dmub_srv_destroy(&adev->dm.dc->ctx->dmub_srv);
1922
1923         if (dc_enable_dmub_notifications(adev->dm.dc)) {
1924                 kfree(adev->dm.dmub_notify);
1925                 adev->dm.dmub_notify = NULL;
1926                 destroy_workqueue(adev->dm.delayed_hpd_wq);
1927                 adev->dm.delayed_hpd_wq = NULL;
1928         }
1929
1930         if (adev->dm.dmub_bo)
1931                 amdgpu_bo_free_kernel(&adev->dm.dmub_bo,
1932                                       &adev->dm.dmub_bo_gpu_addr,
1933                                       &adev->dm.dmub_bo_cpu_addr);
1934
1935         if (adev->dm.hpd_rx_offload_wq) {
1936                 for (i = 0; i < adev->dm.dc->caps.max_links; i++) {
1937                         if (adev->dm.hpd_rx_offload_wq[i].wq) {
1938                                 destroy_workqueue(adev->dm.hpd_rx_offload_wq[i].wq);
1939                                 adev->dm.hpd_rx_offload_wq[i].wq = NULL;
1940                         }
1941                 }
1942
1943                 kfree(adev->dm.hpd_rx_offload_wq);
1944                 adev->dm.hpd_rx_offload_wq = NULL;
1945         }
1946
1947         /* DC Destroy TODO: Replace destroy DAL */
1948         if (adev->dm.dc)
1949                 dc_destroy(&adev->dm.dc);
1950         /*
1951          * TODO: pageflip, vlank interrupt
1952          *
1953          * amdgpu_dm_irq_fini(adev);
1954          */
1955
1956         if (adev->dm.cgs_device) {
1957                 amdgpu_cgs_destroy_device(adev->dm.cgs_device);
1958                 adev->dm.cgs_device = NULL;
1959         }
1960         if (adev->dm.freesync_module) {
1961                 mod_freesync_destroy(adev->dm.freesync_module);
1962                 adev->dm.freesync_module = NULL;
1963         }
1964
1965         mutex_destroy(&adev->dm.audio_lock);
1966         mutex_destroy(&adev->dm.dc_lock);
1967         mutex_destroy(&adev->dm.dpia_aux_lock);
1968 }
1969
1970 static int load_dmcu_fw(struct amdgpu_device *adev)
1971 {
1972         const char *fw_name_dmcu = NULL;
1973         int r;
1974         const struct dmcu_firmware_header_v1_0 *hdr;
1975
1976         switch (adev->asic_type) {
1977 #if defined(CONFIG_DRM_AMD_DC_SI)
1978         case CHIP_TAHITI:
1979         case CHIP_PITCAIRN:
1980         case CHIP_VERDE:
1981         case CHIP_OLAND:
1982 #endif
1983         case CHIP_BONAIRE:
1984         case CHIP_HAWAII:
1985         case CHIP_KAVERI:
1986         case CHIP_KABINI:
1987         case CHIP_MULLINS:
1988         case CHIP_TONGA:
1989         case CHIP_FIJI:
1990         case CHIP_CARRIZO:
1991         case CHIP_STONEY:
1992         case CHIP_POLARIS11:
1993         case CHIP_POLARIS10:
1994         case CHIP_POLARIS12:
1995         case CHIP_VEGAM:
1996         case CHIP_VEGA10:
1997         case CHIP_VEGA12:
1998         case CHIP_VEGA20:
1999                 return 0;
2000         case CHIP_NAVI12:
2001                 fw_name_dmcu = FIRMWARE_NAVI12_DMCU;
2002                 break;
2003         case CHIP_RAVEN:
2004                 if (ASICREV_IS_PICASSO(adev->external_rev_id))
2005                         fw_name_dmcu = FIRMWARE_RAVEN_DMCU;
2006                 else if (ASICREV_IS_RAVEN2(adev->external_rev_id))
2007                         fw_name_dmcu = FIRMWARE_RAVEN_DMCU;
2008                 else
2009                         return 0;
2010                 break;
2011         default:
2012                 switch (amdgpu_ip_version(adev, DCE_HWIP, 0)) {
2013                 case IP_VERSION(2, 0, 2):
2014                 case IP_VERSION(2, 0, 3):
2015                 case IP_VERSION(2, 0, 0):
2016                 case IP_VERSION(2, 1, 0):
2017                 case IP_VERSION(3, 0, 0):
2018                 case IP_VERSION(3, 0, 2):
2019                 case IP_VERSION(3, 0, 3):
2020                 case IP_VERSION(3, 0, 1):
2021                 case IP_VERSION(3, 1, 2):
2022                 case IP_VERSION(3, 1, 3):
2023                 case IP_VERSION(3, 1, 4):
2024                 case IP_VERSION(3, 1, 5):
2025                 case IP_VERSION(3, 1, 6):
2026                 case IP_VERSION(3, 2, 0):
2027                 case IP_VERSION(3, 2, 1):
2028                 case IP_VERSION(3, 5, 0):
2029                         return 0;
2030                 default:
2031                         break;
2032                 }
2033                 DRM_ERROR("Unsupported ASIC type: 0x%X\n", adev->asic_type);
2034                 return -EINVAL;
2035         }
2036
2037         if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) {
2038                 DRM_DEBUG_KMS("dm: DMCU firmware not supported on direct or SMU loading\n");
2039                 return 0;
2040         }
2041
2042         r = amdgpu_ucode_request(adev, &adev->dm.fw_dmcu, fw_name_dmcu);
2043         if (r == -ENODEV) {
2044                 /* DMCU firmware is not necessary, so don't raise a fuss if it's missing */
2045                 DRM_DEBUG_KMS("dm: DMCU firmware not found\n");
2046                 adev->dm.fw_dmcu = NULL;
2047                 return 0;
2048         }
2049         if (r) {
2050                 dev_err(adev->dev, "amdgpu_dm: Can't validate firmware \"%s\"\n",
2051                         fw_name_dmcu);
2052                 amdgpu_ucode_release(&adev->dm.fw_dmcu);
2053                 return r;
2054         }
2055
2056         hdr = (const struct dmcu_firmware_header_v1_0 *)adev->dm.fw_dmcu->data;
2057         adev->firmware.ucode[AMDGPU_UCODE_ID_DMCU_ERAM].ucode_id = AMDGPU_UCODE_ID_DMCU_ERAM;
2058         adev->firmware.ucode[AMDGPU_UCODE_ID_DMCU_ERAM].fw = adev->dm.fw_dmcu;
2059         adev->firmware.fw_size +=
2060                 ALIGN(le32_to_cpu(hdr->header.ucode_size_bytes) - le32_to_cpu(hdr->intv_size_bytes), PAGE_SIZE);
2061
2062         adev->firmware.ucode[AMDGPU_UCODE_ID_DMCU_INTV].ucode_id = AMDGPU_UCODE_ID_DMCU_INTV;
2063         adev->firmware.ucode[AMDGPU_UCODE_ID_DMCU_INTV].fw = adev->dm.fw_dmcu;
2064         adev->firmware.fw_size +=
2065                 ALIGN(le32_to_cpu(hdr->intv_size_bytes), PAGE_SIZE);
2066
2067         adev->dm.dmcu_fw_version = le32_to_cpu(hdr->header.ucode_version);
2068
2069         DRM_DEBUG_KMS("PSP loading DMCU firmware\n");
2070
2071         return 0;
2072 }
2073
2074 static uint32_t amdgpu_dm_dmub_reg_read(void *ctx, uint32_t address)
2075 {
2076         struct amdgpu_device *adev = ctx;
2077
2078         return dm_read_reg(adev->dm.dc->ctx, address);
2079 }
2080
2081 static void amdgpu_dm_dmub_reg_write(void *ctx, uint32_t address,
2082                                      uint32_t value)
2083 {
2084         struct amdgpu_device *adev = ctx;
2085
2086         return dm_write_reg(adev->dm.dc->ctx, address, value);
2087 }
2088
2089 static int dm_dmub_sw_init(struct amdgpu_device *adev)
2090 {
2091         struct dmub_srv_create_params create_params;
2092         struct dmub_srv_region_params region_params;
2093         struct dmub_srv_region_info region_info;
2094         struct dmub_srv_memory_params memory_params;
2095         struct dmub_srv_fb_info *fb_info;
2096         struct dmub_srv *dmub_srv;
2097         const struct dmcub_firmware_header_v1_0 *hdr;
2098         enum dmub_asic dmub_asic;
2099         enum dmub_status status;
2100         int r;
2101
2102         switch (amdgpu_ip_version(adev, DCE_HWIP, 0)) {
2103         case IP_VERSION(2, 1, 0):
2104                 dmub_asic = DMUB_ASIC_DCN21;
2105                 break;
2106         case IP_VERSION(3, 0, 0):
2107                 dmub_asic = DMUB_ASIC_DCN30;
2108                 break;
2109         case IP_VERSION(3, 0, 1):
2110                 dmub_asic = DMUB_ASIC_DCN301;
2111                 break;
2112         case IP_VERSION(3, 0, 2):
2113                 dmub_asic = DMUB_ASIC_DCN302;
2114                 break;
2115         case IP_VERSION(3, 0, 3):
2116                 dmub_asic = DMUB_ASIC_DCN303;
2117                 break;
2118         case IP_VERSION(3, 1, 2):
2119         case IP_VERSION(3, 1, 3):
2120                 dmub_asic = (adev->external_rev_id == YELLOW_CARP_B0) ? DMUB_ASIC_DCN31B : DMUB_ASIC_DCN31;
2121                 break;
2122         case IP_VERSION(3, 1, 4):
2123                 dmub_asic = DMUB_ASIC_DCN314;
2124                 break;
2125         case IP_VERSION(3, 1, 5):
2126                 dmub_asic = DMUB_ASIC_DCN315;
2127                 break;
2128         case IP_VERSION(3, 1, 6):
2129                 dmub_asic = DMUB_ASIC_DCN316;
2130                 break;
2131         case IP_VERSION(3, 2, 0):
2132                 dmub_asic = DMUB_ASIC_DCN32;
2133                 break;
2134         case IP_VERSION(3, 2, 1):
2135                 dmub_asic = DMUB_ASIC_DCN321;
2136                 break;
2137         case IP_VERSION(3, 5, 0):
2138                 dmub_asic = DMUB_ASIC_DCN35;
2139                 break;
2140         default:
2141                 /* ASIC doesn't support DMUB. */
2142                 return 0;
2143         }
2144
2145         hdr = (const struct dmcub_firmware_header_v1_0 *)adev->dm.dmub_fw->data;
2146         adev->dm.dmcub_fw_version = le32_to_cpu(hdr->header.ucode_version);
2147
2148         if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
2149                 adev->firmware.ucode[AMDGPU_UCODE_ID_DMCUB].ucode_id =
2150                         AMDGPU_UCODE_ID_DMCUB;
2151                 adev->firmware.ucode[AMDGPU_UCODE_ID_DMCUB].fw =
2152                         adev->dm.dmub_fw;
2153                 adev->firmware.fw_size +=
2154                         ALIGN(le32_to_cpu(hdr->inst_const_bytes), PAGE_SIZE);
2155
2156                 DRM_INFO("Loading DMUB firmware via PSP: version=0x%08X\n",
2157                          adev->dm.dmcub_fw_version);
2158         }
2159
2160
2161         adev->dm.dmub_srv = kzalloc(sizeof(*adev->dm.dmub_srv), GFP_KERNEL);
2162         dmub_srv = adev->dm.dmub_srv;
2163
2164         if (!dmub_srv) {
2165                 DRM_ERROR("Failed to allocate DMUB service!\n");
2166                 return -ENOMEM;
2167         }
2168
2169         memset(&create_params, 0, sizeof(create_params));
2170         create_params.user_ctx = adev;
2171         create_params.funcs.reg_read = amdgpu_dm_dmub_reg_read;
2172         create_params.funcs.reg_write = amdgpu_dm_dmub_reg_write;
2173         create_params.asic = dmub_asic;
2174
2175         /* Create the DMUB service. */
2176         status = dmub_srv_create(dmub_srv, &create_params);
2177         if (status != DMUB_STATUS_OK) {
2178                 DRM_ERROR("Error creating DMUB service: %d\n", status);
2179                 return -EINVAL;
2180         }
2181
2182         /* Calculate the size of all the regions for the DMUB service. */
2183         memset(&region_params, 0, sizeof(region_params));
2184
2185         region_params.inst_const_size = le32_to_cpu(hdr->inst_const_bytes) -
2186                                         PSP_HEADER_BYTES - PSP_FOOTER_BYTES;
2187         region_params.bss_data_size = le32_to_cpu(hdr->bss_data_bytes);
2188         region_params.vbios_size = adev->bios_size;
2189         region_params.fw_bss_data = region_params.bss_data_size ?
2190                 adev->dm.dmub_fw->data +
2191                 le32_to_cpu(hdr->header.ucode_array_offset_bytes) +
2192                 le32_to_cpu(hdr->inst_const_bytes) : NULL;
2193         region_params.fw_inst_const =
2194                 adev->dm.dmub_fw->data +
2195                 le32_to_cpu(hdr->header.ucode_array_offset_bytes) +
2196                 PSP_HEADER_BYTES;
2197         region_params.is_mailbox_in_inbox = false;
2198
2199         status = dmub_srv_calc_region_info(dmub_srv, &region_params,
2200                                            &region_info);
2201
2202         if (status != DMUB_STATUS_OK) {
2203                 DRM_ERROR("Error calculating DMUB region info: %d\n", status);
2204                 return -EINVAL;
2205         }
2206
2207         /*
2208          * Allocate a framebuffer based on the total size of all the regions.
2209          * TODO: Move this into GART.
2210          */
2211         r = amdgpu_bo_create_kernel(adev, region_info.fb_size, PAGE_SIZE,
2212                                     AMDGPU_GEM_DOMAIN_VRAM |
2213                                     AMDGPU_GEM_DOMAIN_GTT,
2214                                     &adev->dm.dmub_bo,
2215                                     &adev->dm.dmub_bo_gpu_addr,
2216                                     &adev->dm.dmub_bo_cpu_addr);
2217         if (r)
2218                 return r;
2219
2220         /* Rebase the regions on the framebuffer address. */
2221         memset(&memory_params, 0, sizeof(memory_params));
2222         memory_params.cpu_fb_addr = adev->dm.dmub_bo_cpu_addr;
2223         memory_params.gpu_fb_addr = adev->dm.dmub_bo_gpu_addr;
2224         memory_params.region_info = &region_info;
2225
2226         adev->dm.dmub_fb_info =
2227                 kzalloc(sizeof(*adev->dm.dmub_fb_info), GFP_KERNEL);
2228         fb_info = adev->dm.dmub_fb_info;
2229
2230         if (!fb_info) {
2231                 DRM_ERROR(
2232                         "Failed to allocate framebuffer info for DMUB service!\n");
2233                 return -ENOMEM;
2234         }
2235
2236         status = dmub_srv_calc_mem_info(dmub_srv, &memory_params, fb_info);
2237         if (status != DMUB_STATUS_OK) {
2238                 DRM_ERROR("Error calculating DMUB FB info: %d\n", status);
2239                 return -EINVAL;
2240         }
2241
2242         return 0;
2243 }
2244
2245 static int dm_sw_init(void *handle)
2246 {
2247         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
2248         int r;
2249
2250         r = dm_dmub_sw_init(adev);
2251         if (r)
2252                 return r;
2253
2254         return load_dmcu_fw(adev);
2255 }
2256
2257 static int dm_sw_fini(void *handle)
2258 {
2259         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
2260
2261         kfree(adev->dm.dmub_fb_info);
2262         adev->dm.dmub_fb_info = NULL;
2263
2264         if (adev->dm.dmub_srv) {
2265                 dmub_srv_destroy(adev->dm.dmub_srv);
2266                 adev->dm.dmub_srv = NULL;
2267         }
2268
2269         amdgpu_ucode_release(&adev->dm.dmub_fw);
2270         amdgpu_ucode_release(&adev->dm.fw_dmcu);
2271
2272         return 0;
2273 }
2274
2275 static int detect_mst_link_for_all_connectors(struct drm_device *dev)
2276 {
2277         struct amdgpu_dm_connector *aconnector;
2278         struct drm_connector *connector;
2279         struct drm_connector_list_iter iter;
2280         int ret = 0;
2281
2282         drm_connector_list_iter_begin(dev, &iter);
2283         drm_for_each_connector_iter(connector, &iter) {
2284
2285                 if (connector->connector_type == DRM_MODE_CONNECTOR_WRITEBACK)
2286                         continue;
2287
2288                 aconnector = to_amdgpu_dm_connector(connector);
2289                 if (aconnector->dc_link->type == dc_connection_mst_branch &&
2290                     aconnector->mst_mgr.aux) {
2291                         DRM_DEBUG_DRIVER("DM_MST: starting TM on aconnector: %p [id: %d]\n",
2292                                          aconnector,
2293                                          aconnector->base.base.id);
2294
2295                         ret = drm_dp_mst_topology_mgr_set_mst(&aconnector->mst_mgr, true);
2296                         if (ret < 0) {
2297                                 DRM_ERROR("DM_MST: Failed to start MST\n");
2298                                 aconnector->dc_link->type =
2299                                         dc_connection_single;
2300                                 ret = dm_helpers_dp_mst_stop_top_mgr(aconnector->dc_link->ctx,
2301                                                                      aconnector->dc_link);
2302                                 break;
2303                         }
2304                 }
2305         }
2306         drm_connector_list_iter_end(&iter);
2307
2308         return ret;
2309 }
2310
2311 static int dm_late_init(void *handle)
2312 {
2313         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
2314
2315         struct dmcu_iram_parameters params;
2316         unsigned int linear_lut[16];
2317         int i;
2318         struct dmcu *dmcu = NULL;
2319
2320         dmcu = adev->dm.dc->res_pool->dmcu;
2321
2322         for (i = 0; i < 16; i++)
2323                 linear_lut[i] = 0xFFFF * i / 15;
2324
2325         params.set = 0;
2326         params.backlight_ramping_override = false;
2327         params.backlight_ramping_start = 0xCCCC;
2328         params.backlight_ramping_reduction = 0xCCCCCCCC;
2329         params.backlight_lut_array_size = 16;
2330         params.backlight_lut_array = linear_lut;
2331
2332         /* Min backlight level after ABM reduction,  Don't allow below 1%
2333          * 0xFFFF x 0.01 = 0x28F
2334          */
2335         params.min_abm_backlight = 0x28F;
2336         /* In the case where abm is implemented on dmcub,
2337          * dmcu object will be null.
2338          * ABM 2.4 and up are implemented on dmcub.
2339          */
2340         if (dmcu) {
2341                 if (!dmcu_load_iram(dmcu, params))
2342                         return -EINVAL;
2343         } else if (adev->dm.dc->ctx->dmub_srv) {
2344                 struct dc_link *edp_links[MAX_NUM_EDP];
2345                 int edp_num;
2346
2347                 dc_get_edp_links(adev->dm.dc, edp_links, &edp_num);
2348                 for (i = 0; i < edp_num; i++) {
2349                         if (!dmub_init_abm_config(adev->dm.dc->res_pool, params, i))
2350                                 return -EINVAL;
2351                 }
2352         }
2353
2354         return detect_mst_link_for_all_connectors(adev_to_drm(adev));
2355 }
2356
2357 static void resume_mst_branch_status(struct drm_dp_mst_topology_mgr *mgr)
2358 {
2359         int ret;
2360         u8 guid[16];
2361         u64 tmp64;
2362
2363         mutex_lock(&mgr->lock);
2364         if (!mgr->mst_primary)
2365                 goto out_fail;
2366
2367         if (drm_dp_read_dpcd_caps(mgr->aux, mgr->dpcd) < 0) {
2368                 drm_dbg_kms(mgr->dev, "dpcd read failed - undocked during suspend?\n");
2369                 goto out_fail;
2370         }
2371
2372         ret = drm_dp_dpcd_writeb(mgr->aux, DP_MSTM_CTRL,
2373                                  DP_MST_EN |
2374                                  DP_UP_REQ_EN |
2375                                  DP_UPSTREAM_IS_SRC);
2376         if (ret < 0) {
2377                 drm_dbg_kms(mgr->dev, "mst write failed - undocked during suspend?\n");
2378                 goto out_fail;
2379         }
2380
2381         /* Some hubs forget their guids after they resume */
2382         ret = drm_dp_dpcd_read(mgr->aux, DP_GUID, guid, 16);
2383         if (ret != 16) {
2384                 drm_dbg_kms(mgr->dev, "dpcd read failed - undocked during suspend?\n");
2385                 goto out_fail;
2386         }
2387
2388         if (memchr_inv(guid, 0, 16) == NULL) {
2389                 tmp64 = get_jiffies_64();
2390                 memcpy(&guid[0], &tmp64, sizeof(u64));
2391                 memcpy(&guid[8], &tmp64, sizeof(u64));
2392
2393                 ret = drm_dp_dpcd_write(mgr->aux, DP_GUID, guid, 16);
2394
2395                 if (ret != 16) {
2396                         drm_dbg_kms(mgr->dev, "check mstb guid failed - undocked during suspend?\n");
2397                         goto out_fail;
2398                 }
2399         }
2400
2401         memcpy(mgr->mst_primary->guid, guid, 16);
2402
2403 out_fail:
2404         mutex_unlock(&mgr->lock);
2405 }
2406
2407 static void s3_handle_mst(struct drm_device *dev, bool suspend)
2408 {
2409         struct amdgpu_dm_connector *aconnector;
2410         struct drm_connector *connector;
2411         struct drm_connector_list_iter iter;
2412         struct drm_dp_mst_topology_mgr *mgr;
2413
2414         drm_connector_list_iter_begin(dev, &iter);
2415         drm_for_each_connector_iter(connector, &iter) {
2416
2417                 if (connector->connector_type == DRM_MODE_CONNECTOR_WRITEBACK)
2418                         continue;
2419
2420                 aconnector = to_amdgpu_dm_connector(connector);
2421                 if (aconnector->dc_link->type != dc_connection_mst_branch ||
2422                     aconnector->mst_root)
2423                         continue;
2424
2425                 mgr = &aconnector->mst_mgr;
2426
2427                 if (suspend) {
2428                         drm_dp_mst_topology_mgr_suspend(mgr);
2429                 } else {
2430                         /* if extended timeout is supported in hardware,
2431                          * default to LTTPR timeout (3.2ms) first as a W/A for DP link layer
2432                          * CTS 4.2.1.1 regression introduced by CTS specs requirement update.
2433                          */
2434                         try_to_configure_aux_timeout(aconnector->dc_link->ddc, LINK_AUX_DEFAULT_LTTPR_TIMEOUT_PERIOD);
2435                         if (!dp_is_lttpr_present(aconnector->dc_link))
2436                                 try_to_configure_aux_timeout(aconnector->dc_link->ddc, LINK_AUX_DEFAULT_TIMEOUT_PERIOD);
2437
2438                         /* TODO: move resume_mst_branch_status() into drm mst resume again
2439                          * once topology probing work is pulled out from mst resume into mst
2440                          * resume 2nd step. mst resume 2nd step should be called after old
2441                          * state getting restored (i.e. drm_atomic_helper_resume()).
2442                          */
2443                         resume_mst_branch_status(mgr);
2444                 }
2445         }
2446         drm_connector_list_iter_end(&iter);
2447 }
2448
2449 static int amdgpu_dm_smu_write_watermarks_table(struct amdgpu_device *adev)
2450 {
2451         int ret = 0;
2452
2453         /* This interface is for dGPU Navi1x.Linux dc-pplib interface depends
2454          * on window driver dc implementation.
2455          * For Navi1x, clock settings of dcn watermarks are fixed. the settings
2456          * should be passed to smu during boot up and resume from s3.
2457          * boot up: dc calculate dcn watermark clock settings within dc_create,
2458          * dcn20_resource_construct
2459          * then call pplib functions below to pass the settings to smu:
2460          * smu_set_watermarks_for_clock_ranges
2461          * smu_set_watermarks_table
2462          * navi10_set_watermarks_table
2463          * smu_write_watermarks_table
2464          *
2465          * For Renoir, clock settings of dcn watermark are also fixed values.
2466          * dc has implemented different flow for window driver:
2467          * dc_hardware_init / dc_set_power_state
2468          * dcn10_init_hw
2469          * notify_wm_ranges
2470          * set_wm_ranges
2471          * -- Linux
2472          * smu_set_watermarks_for_clock_ranges
2473          * renoir_set_watermarks_table
2474          * smu_write_watermarks_table
2475          *
2476          * For Linux,
2477          * dc_hardware_init -> amdgpu_dm_init
2478          * dc_set_power_state --> dm_resume
2479          *
2480          * therefore, this function apply to navi10/12/14 but not Renoir
2481          * *
2482          */
2483         switch (amdgpu_ip_version(adev, DCE_HWIP, 0)) {
2484         case IP_VERSION(2, 0, 2):
2485         case IP_VERSION(2, 0, 0):
2486                 break;
2487         default:
2488                 return 0;
2489         }
2490
2491         ret = amdgpu_dpm_write_watermarks_table(adev);
2492         if (ret) {
2493                 DRM_ERROR("Failed to update WMTABLE!\n");
2494                 return ret;
2495         }
2496
2497         return 0;
2498 }
2499
2500 /**
2501  * dm_hw_init() - Initialize DC device
2502  * @handle: The base driver device containing the amdgpu_dm device.
2503  *
2504  * Initialize the &struct amdgpu_display_manager device. This involves calling
2505  * the initializers of each DM component, then populating the struct with them.
2506  *
2507  * Although the function implies hardware initialization, both hardware and
2508  * software are initialized here. Splitting them out to their relevant init
2509  * hooks is a future TODO item.
2510  *
2511  * Some notable things that are initialized here:
2512  *
2513  * - Display Core, both software and hardware
2514  * - DC modules that we need (freesync and color management)
2515  * - DRM software states
2516  * - Interrupt sources and handlers
2517  * - Vblank support
2518  * - Debug FS entries, if enabled
2519  */
2520 static int dm_hw_init(void *handle)
2521 {
2522         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
2523         /* Create DAL display manager */
2524         amdgpu_dm_init(adev);
2525         amdgpu_dm_hpd_init(adev);
2526
2527         return 0;
2528 }
2529
2530 /**
2531  * dm_hw_fini() - Teardown DC device
2532  * @handle: The base driver device containing the amdgpu_dm device.
2533  *
2534  * Teardown components within &struct amdgpu_display_manager that require
2535  * cleanup. This involves cleaning up the DRM device, DC, and any modules that
2536  * were loaded. Also flush IRQ workqueues and disable them.
2537  */
2538 static int dm_hw_fini(void *handle)
2539 {
2540         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
2541
2542         amdgpu_dm_hpd_fini(adev);
2543
2544         amdgpu_dm_irq_fini(adev);
2545         amdgpu_dm_fini(adev);
2546         return 0;
2547 }
2548
2549
2550 static void dm_gpureset_toggle_interrupts(struct amdgpu_device *adev,
2551                                  struct dc_state *state, bool enable)
2552 {
2553         enum dc_irq_source irq_source;
2554         struct amdgpu_crtc *acrtc;
2555         int rc = -EBUSY;
2556         int i = 0;
2557
2558         for (i = 0; i < state->stream_count; i++) {
2559                 acrtc = get_crtc_by_otg_inst(
2560                                 adev, state->stream_status[i].primary_otg_inst);
2561
2562                 if (acrtc && state->stream_status[i].plane_count != 0) {
2563                         irq_source = IRQ_TYPE_PFLIP + acrtc->otg_inst;
2564                         rc = dc_interrupt_set(adev->dm.dc, irq_source, enable) ? 0 : -EBUSY;
2565                         if (rc)
2566                                 DRM_WARN("Failed to %s pflip interrupts\n",
2567                                          enable ? "enable" : "disable");
2568
2569                         if (enable) {
2570                                 if (amdgpu_dm_crtc_vrr_active(to_dm_crtc_state(acrtc->base.state)))
2571                                         rc = amdgpu_dm_crtc_set_vupdate_irq(&acrtc->base, true);
2572                         } else
2573                                 rc = amdgpu_dm_crtc_set_vupdate_irq(&acrtc->base, false);
2574
2575                         if (rc)
2576                                 DRM_WARN("Failed to %sable vupdate interrupt\n", enable ? "en" : "dis");
2577
2578                         irq_source = IRQ_TYPE_VBLANK + acrtc->otg_inst;
2579                         /* During gpu-reset we disable and then enable vblank irq, so
2580                          * don't use amdgpu_irq_get/put() to avoid refcount change.
2581                          */
2582                         if (!dc_interrupt_set(adev->dm.dc, irq_source, enable))
2583                                 DRM_WARN("Failed to %sable vblank interrupt\n", enable ? "en" : "dis");
2584                 }
2585         }
2586
2587 }
2588
2589 static enum dc_status amdgpu_dm_commit_zero_streams(struct dc *dc)
2590 {
2591         struct dc_state *context = NULL;
2592         enum dc_status res = DC_ERROR_UNEXPECTED;
2593         int i;
2594         struct dc_stream_state *del_streams[MAX_PIPES];
2595         int del_streams_count = 0;
2596
2597         memset(del_streams, 0, sizeof(del_streams));
2598
2599         context = dc_create_state(dc);
2600         if (context == NULL)
2601                 goto context_alloc_fail;
2602
2603         dc_resource_state_copy_construct_current(dc, context);
2604
2605         /* First remove from context all streams */
2606         for (i = 0; i < context->stream_count; i++) {
2607                 struct dc_stream_state *stream = context->streams[i];
2608
2609                 del_streams[del_streams_count++] = stream;
2610         }
2611
2612         /* Remove all planes for removed streams and then remove the streams */
2613         for (i = 0; i < del_streams_count; i++) {
2614                 if (!dc_rem_all_planes_for_stream(dc, del_streams[i], context)) {
2615                         res = DC_FAIL_DETACH_SURFACES;
2616                         goto fail;
2617                 }
2618
2619                 res = dc_remove_stream_from_ctx(dc, context, del_streams[i]);
2620                 if (res != DC_OK)
2621                         goto fail;
2622         }
2623
2624         res = dc_commit_streams(dc, context->streams, context->stream_count);
2625
2626 fail:
2627         dc_release_state(context);
2628
2629 context_alloc_fail:
2630         return res;
2631 }
2632
2633 static void hpd_rx_irq_work_suspend(struct amdgpu_display_manager *dm)
2634 {
2635         int i;
2636
2637         if (dm->hpd_rx_offload_wq) {
2638                 for (i = 0; i < dm->dc->caps.max_links; i++)
2639                         flush_workqueue(dm->hpd_rx_offload_wq[i].wq);
2640         }
2641 }
2642
2643 static int dm_suspend(void *handle)
2644 {
2645         struct amdgpu_device *adev = handle;
2646         struct amdgpu_display_manager *dm = &adev->dm;
2647         int ret = 0;
2648
2649         if (amdgpu_in_reset(adev)) {
2650                 mutex_lock(&dm->dc_lock);
2651
2652                 dc_allow_idle_optimizations(adev->dm.dc, false);
2653
2654                 dm->cached_dc_state = dc_copy_state(dm->dc->current_state);
2655
2656                 dm_gpureset_toggle_interrupts(adev, dm->cached_dc_state, false);
2657
2658                 amdgpu_dm_commit_zero_streams(dm->dc);
2659
2660                 amdgpu_dm_irq_suspend(adev);
2661
2662                 hpd_rx_irq_work_suspend(dm);
2663
2664                 return ret;
2665         }
2666
2667         WARN_ON(adev->dm.cached_state);
2668         adev->dm.cached_state = drm_atomic_helper_suspend(adev_to_drm(adev));
2669         if (IS_ERR(adev->dm.cached_state))
2670                 return PTR_ERR(adev->dm.cached_state);
2671
2672         s3_handle_mst(adev_to_drm(adev), true);
2673
2674         amdgpu_dm_irq_suspend(adev);
2675
2676         hpd_rx_irq_work_suspend(dm);
2677
2678         dc_set_power_state(dm->dc, DC_ACPI_CM_POWER_STATE_D3);
2679
2680         return 0;
2681 }
2682
2683 struct drm_connector *
2684 amdgpu_dm_find_first_crtc_matching_connector(struct drm_atomic_state *state,
2685                                              struct drm_crtc *crtc)
2686 {
2687         u32 i;
2688         struct drm_connector_state *new_con_state;
2689         struct drm_connector *connector;
2690         struct drm_crtc *crtc_from_state;
2691
2692         for_each_new_connector_in_state(state, connector, new_con_state, i) {
2693                 crtc_from_state = new_con_state->crtc;
2694
2695                 if (crtc_from_state == crtc)
2696                         return connector;
2697         }
2698
2699         return NULL;
2700 }
2701
2702 static void emulated_link_detect(struct dc_link *link)
2703 {
2704         struct dc_sink_init_data sink_init_data = { 0 };
2705         struct display_sink_capability sink_caps = { 0 };
2706         enum dc_edid_status edid_status;
2707         struct dc_context *dc_ctx = link->ctx;
2708         struct drm_device *dev = adev_to_drm(dc_ctx->driver_context);
2709         struct dc_sink *sink = NULL;
2710         struct dc_sink *prev_sink = NULL;
2711
2712         link->type = dc_connection_none;
2713         prev_sink = link->local_sink;
2714
2715         if (prev_sink)
2716                 dc_sink_release(prev_sink);
2717
2718         switch (link->connector_signal) {
2719         case SIGNAL_TYPE_HDMI_TYPE_A: {
2720                 sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C;
2721                 sink_caps.signal = SIGNAL_TYPE_HDMI_TYPE_A;
2722                 break;
2723         }
2724
2725         case SIGNAL_TYPE_DVI_SINGLE_LINK: {
2726                 sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C;
2727                 sink_caps.signal = SIGNAL_TYPE_DVI_SINGLE_LINK;
2728                 break;
2729         }
2730
2731         case SIGNAL_TYPE_DVI_DUAL_LINK: {
2732                 sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C;
2733                 sink_caps.signal = SIGNAL_TYPE_DVI_DUAL_LINK;
2734                 break;
2735         }
2736
2737         case SIGNAL_TYPE_LVDS: {
2738                 sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C;
2739                 sink_caps.signal = SIGNAL_TYPE_LVDS;
2740                 break;
2741         }
2742
2743         case SIGNAL_TYPE_EDP: {
2744                 sink_caps.transaction_type =
2745                         DDC_TRANSACTION_TYPE_I2C_OVER_AUX;
2746                 sink_caps.signal = SIGNAL_TYPE_EDP;
2747                 break;
2748         }
2749
2750         case SIGNAL_TYPE_DISPLAY_PORT: {
2751                 sink_caps.transaction_type =
2752                         DDC_TRANSACTION_TYPE_I2C_OVER_AUX;
2753                 sink_caps.signal = SIGNAL_TYPE_VIRTUAL;
2754                 break;
2755         }
2756
2757         default:
2758                 drm_err(dev, "Invalid connector type! signal:%d\n",
2759                         link->connector_signal);
2760                 return;
2761         }
2762
2763         sink_init_data.link = link;
2764         sink_init_data.sink_signal = sink_caps.signal;
2765
2766         sink = dc_sink_create(&sink_init_data);
2767         if (!sink) {
2768                 drm_err(dev, "Failed to create sink!\n");
2769                 return;
2770         }
2771
2772         /* dc_sink_create returns a new reference */
2773         link->local_sink = sink;
2774
2775         edid_status = dm_helpers_read_local_edid(
2776                         link->ctx,
2777                         link,
2778                         sink);
2779
2780         if (edid_status != EDID_OK)
2781                 drm_err(dev, "Failed to read EDID\n");
2782
2783 }
2784
2785 static void dm_gpureset_commit_state(struct dc_state *dc_state,
2786                                      struct amdgpu_display_manager *dm)
2787 {
2788         struct {
2789                 struct dc_surface_update surface_updates[MAX_SURFACES];
2790                 struct dc_plane_info plane_infos[MAX_SURFACES];
2791                 struct dc_scaling_info scaling_infos[MAX_SURFACES];
2792                 struct dc_flip_addrs flip_addrs[MAX_SURFACES];
2793                 struct dc_stream_update stream_update;
2794         } *bundle;
2795         int k, m;
2796
2797         bundle = kzalloc(sizeof(*bundle), GFP_KERNEL);
2798
2799         if (!bundle) {
2800                 drm_err(dm->ddev, "Failed to allocate update bundle\n");
2801                 goto cleanup;
2802         }
2803
2804         for (k = 0; k < dc_state->stream_count; k++) {
2805                 bundle->stream_update.stream = dc_state->streams[k];
2806
2807                 for (m = 0; m < dc_state->stream_status->plane_count; m++) {
2808                         bundle->surface_updates[m].surface =
2809                                 dc_state->stream_status->plane_states[m];
2810                         bundle->surface_updates[m].surface->force_full_update =
2811                                 true;
2812                 }
2813
2814                 update_planes_and_stream_adapter(dm->dc,
2815                                          UPDATE_TYPE_FULL,
2816                                          dc_state->stream_status->plane_count,
2817                                          dc_state->streams[k],
2818                                          &bundle->stream_update,
2819                                          bundle->surface_updates);
2820         }
2821
2822 cleanup:
2823         kfree(bundle);
2824 }
2825
2826 static int dm_resume(void *handle)
2827 {
2828         struct amdgpu_device *adev = handle;
2829         struct drm_device *ddev = adev_to_drm(adev);
2830         struct amdgpu_display_manager *dm = &adev->dm;
2831         struct amdgpu_dm_connector *aconnector;
2832         struct drm_connector *connector;
2833         struct drm_connector_list_iter iter;
2834         struct drm_crtc *crtc;
2835         struct drm_crtc_state *new_crtc_state;
2836         struct dm_crtc_state *dm_new_crtc_state;
2837         struct drm_plane *plane;
2838         struct drm_plane_state *new_plane_state;
2839         struct dm_plane_state *dm_new_plane_state;
2840         struct dm_atomic_state *dm_state = to_dm_atomic_state(dm->atomic_obj.state);
2841         enum dc_connection_type new_connection_type = dc_connection_none;
2842         struct dc_state *dc_state;
2843         int i, r, j, ret;
2844         bool need_hotplug = false;
2845
2846         if (dm->dc->caps.ips_support) {
2847                 dc_dmub_srv_exit_low_power_state(dm->dc);
2848         }
2849
2850         if (amdgpu_in_reset(adev)) {
2851                 dc_state = dm->cached_dc_state;
2852
2853                 /*
2854                  * The dc->current_state is backed up into dm->cached_dc_state
2855                  * before we commit 0 streams.
2856                  *
2857                  * DC will clear link encoder assignments on the real state
2858                  * but the changes won't propagate over to the copy we made
2859                  * before the 0 streams commit.
2860                  *
2861                  * DC expects that link encoder assignments are *not* valid
2862                  * when committing a state, so as a workaround we can copy
2863                  * off of the current state.
2864                  *
2865                  * We lose the previous assignments, but we had already
2866                  * commit 0 streams anyway.
2867                  */
2868                 link_enc_cfg_copy(adev->dm.dc->current_state, dc_state);
2869
2870                 r = dm_dmub_hw_init(adev);
2871                 if (r)
2872                         DRM_ERROR("DMUB interface failed to initialize: status=%d\n", r);
2873
2874                 dc_set_power_state(dm->dc, DC_ACPI_CM_POWER_STATE_D0);
2875
2876                 dc_resume(dm->dc);
2877
2878                 amdgpu_dm_irq_resume_early(adev);
2879
2880                 for (i = 0; i < dc_state->stream_count; i++) {
2881                         dc_state->streams[i]->mode_changed = true;
2882                         for (j = 0; j < dc_state->stream_status[i].plane_count; j++) {
2883                                 dc_state->stream_status[i].plane_states[j]->update_flags.raw
2884                                         = 0xffffffff;
2885                         }
2886                 }
2887
2888                 if (dc_is_dmub_outbox_supported(adev->dm.dc)) {
2889                         amdgpu_dm_outbox_init(adev);
2890                         dc_enable_dmub_outbox(adev->dm.dc);
2891                 }
2892
2893                 WARN_ON(!dc_commit_streams(dm->dc, dc_state->streams, dc_state->stream_count));
2894
2895                 dm_gpureset_commit_state(dm->cached_dc_state, dm);
2896
2897                 dm_gpureset_toggle_interrupts(adev, dm->cached_dc_state, true);
2898
2899                 dc_release_state(dm->cached_dc_state);
2900                 dm->cached_dc_state = NULL;
2901
2902                 amdgpu_dm_irq_resume_late(adev);
2903
2904                 mutex_unlock(&dm->dc_lock);
2905
2906                 return 0;
2907         }
2908         /* Recreate dc_state - DC invalidates it when setting power state to S3. */
2909         dc_release_state(dm_state->context);
2910         dm_state->context = dc_create_state(dm->dc);
2911         /* TODO: Remove dc_state->dccg, use dc->dccg directly. */
2912         dc_resource_state_construct(dm->dc, dm_state->context);
2913
2914         /* Before powering on DC we need to re-initialize DMUB. */
2915         dm_dmub_hw_resume(adev);
2916
2917         /* Re-enable outbox interrupts for DPIA. */
2918         if (dc_is_dmub_outbox_supported(adev->dm.dc)) {
2919                 amdgpu_dm_outbox_init(adev);
2920                 dc_enable_dmub_outbox(adev->dm.dc);
2921         }
2922
2923         /* power on hardware */
2924         dc_set_power_state(dm->dc, DC_ACPI_CM_POWER_STATE_D0);
2925
2926         /* program HPD filter */
2927         dc_resume(dm->dc);
2928
2929         /*
2930          * early enable HPD Rx IRQ, should be done before set mode as short
2931          * pulse interrupts are used for MST
2932          */
2933         amdgpu_dm_irq_resume_early(adev);
2934
2935         /* On resume we need to rewrite the MSTM control bits to enable MST*/
2936         s3_handle_mst(ddev, false);
2937
2938         /* Do detection*/
2939         drm_connector_list_iter_begin(ddev, &iter);
2940         drm_for_each_connector_iter(connector, &iter) {
2941
2942                 if (connector->connector_type == DRM_MODE_CONNECTOR_WRITEBACK)
2943                         continue;
2944
2945                 aconnector = to_amdgpu_dm_connector(connector);
2946
2947                 if (!aconnector->dc_link)
2948                         continue;
2949
2950                 /*
2951                  * this is the case when traversing through already created end sink
2952                  * MST connectors, should be skipped
2953                  */
2954                 if (aconnector && aconnector->mst_root)
2955                         continue;
2956
2957                 mutex_lock(&aconnector->hpd_lock);
2958                 if (!dc_link_detect_connection_type(aconnector->dc_link, &new_connection_type))
2959                         DRM_ERROR("KMS: Failed to detect connector\n");
2960
2961                 if (aconnector->base.force && new_connection_type == dc_connection_none) {
2962                         emulated_link_detect(aconnector->dc_link);
2963                 } else {
2964                         mutex_lock(&dm->dc_lock);
2965                         dc_link_detect(aconnector->dc_link, DETECT_REASON_HPD);
2966                         mutex_unlock(&dm->dc_lock);
2967                 }
2968
2969                 if (aconnector->fake_enable && aconnector->dc_link->local_sink)
2970                         aconnector->fake_enable = false;
2971
2972                 if (aconnector->dc_sink)
2973                         dc_sink_release(aconnector->dc_sink);
2974                 aconnector->dc_sink = NULL;
2975                 amdgpu_dm_update_connector_after_detect(aconnector);
2976                 mutex_unlock(&aconnector->hpd_lock);
2977         }
2978         drm_connector_list_iter_end(&iter);
2979
2980         /* Force mode set in atomic commit */
2981         for_each_new_crtc_in_state(dm->cached_state, crtc, new_crtc_state, i)
2982                 new_crtc_state->active_changed = true;
2983
2984         /*
2985          * atomic_check is expected to create the dc states. We need to release
2986          * them here, since they were duplicated as part of the suspend
2987          * procedure.
2988          */
2989         for_each_new_crtc_in_state(dm->cached_state, crtc, new_crtc_state, i) {
2990                 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
2991                 if (dm_new_crtc_state->stream) {
2992                         WARN_ON(kref_read(&dm_new_crtc_state->stream->refcount) > 1);
2993                         dc_stream_release(dm_new_crtc_state->stream);
2994                         dm_new_crtc_state->stream = NULL;
2995                 }
2996         }
2997
2998         for_each_new_plane_in_state(dm->cached_state, plane, new_plane_state, i) {
2999                 dm_new_plane_state = to_dm_plane_state(new_plane_state);
3000                 if (dm_new_plane_state->dc_state) {
3001                         WARN_ON(kref_read(&dm_new_plane_state->dc_state->refcount) > 1);
3002                         dc_plane_state_release(dm_new_plane_state->dc_state);
3003                         dm_new_plane_state->dc_state = NULL;
3004                 }
3005         }
3006
3007         drm_atomic_helper_resume(ddev, dm->cached_state);
3008
3009         dm->cached_state = NULL;
3010
3011         /* Do mst topology probing after resuming cached state*/
3012         drm_connector_list_iter_begin(ddev, &iter);
3013         drm_for_each_connector_iter(connector, &iter) {
3014                 aconnector = to_amdgpu_dm_connector(connector);
3015                 if (aconnector->dc_link->type != dc_connection_mst_branch ||
3016                     aconnector->mst_root)
3017                         continue;
3018
3019                 ret = drm_dp_mst_topology_mgr_resume(&aconnector->mst_mgr, true);
3020
3021                 if (ret < 0) {
3022                         dm_helpers_dp_mst_stop_top_mgr(aconnector->dc_link->ctx,
3023                                         aconnector->dc_link);
3024                         need_hotplug = true;
3025                 }
3026         }
3027         drm_connector_list_iter_end(&iter);
3028
3029         if (need_hotplug)
3030                 drm_kms_helper_hotplug_event(ddev);
3031
3032         amdgpu_dm_irq_resume_late(adev);
3033
3034         amdgpu_dm_smu_write_watermarks_table(adev);
3035
3036         return 0;
3037 }
3038
3039 /**
3040  * DOC: DM Lifecycle
3041  *
3042  * DM (and consequently DC) is registered in the amdgpu base driver as a IP
3043  * block. When CONFIG_DRM_AMD_DC is enabled, the DM device IP block is added to
3044  * the base driver's device list to be initialized and torn down accordingly.
3045  *
3046  * The functions to do so are provided as hooks in &struct amd_ip_funcs.
3047  */
3048
3049 static const struct amd_ip_funcs amdgpu_dm_funcs = {
3050         .name = "dm",
3051         .early_init = dm_early_init,
3052         .late_init = dm_late_init,
3053         .sw_init = dm_sw_init,
3054         .sw_fini = dm_sw_fini,
3055         .early_fini = amdgpu_dm_early_fini,
3056         .hw_init = dm_hw_init,
3057         .hw_fini = dm_hw_fini,
3058         .suspend = dm_suspend,
3059         .resume = dm_resume,
3060         .is_idle = dm_is_idle,
3061         .wait_for_idle = dm_wait_for_idle,
3062         .check_soft_reset = dm_check_soft_reset,
3063         .soft_reset = dm_soft_reset,
3064         .set_clockgating_state = dm_set_clockgating_state,
3065         .set_powergating_state = dm_set_powergating_state,
3066 };
3067
3068 const struct amdgpu_ip_block_version dm_ip_block = {
3069         .type = AMD_IP_BLOCK_TYPE_DCE,
3070         .major = 1,
3071         .minor = 0,
3072         .rev = 0,
3073         .funcs = &amdgpu_dm_funcs,
3074 };
3075
3076
3077 /**
3078  * DOC: atomic
3079  *
3080  * *WIP*
3081  */
3082
3083 static const struct drm_mode_config_funcs amdgpu_dm_mode_funcs = {
3084         .fb_create = amdgpu_display_user_framebuffer_create,
3085         .get_format_info = amdgpu_dm_plane_get_format_info,
3086         .atomic_check = amdgpu_dm_atomic_check,
3087         .atomic_commit = drm_atomic_helper_commit,
3088 };
3089
3090 static struct drm_mode_config_helper_funcs amdgpu_dm_mode_config_helperfuncs = {
3091         .atomic_commit_tail = amdgpu_dm_atomic_commit_tail,
3092         .atomic_commit_setup = drm_dp_mst_atomic_setup_commit,
3093 };
3094
3095 static void update_connector_ext_caps(struct amdgpu_dm_connector *aconnector)
3096 {
3097         struct amdgpu_dm_backlight_caps *caps;
3098         struct drm_connector *conn_base;
3099         struct amdgpu_device *adev;
3100         struct drm_luminance_range_info *luminance_range;
3101
3102         if (aconnector->bl_idx == -1 ||
3103             aconnector->dc_link->connector_signal != SIGNAL_TYPE_EDP)
3104                 return;
3105
3106         conn_base = &aconnector->base;
3107         adev = drm_to_adev(conn_base->dev);
3108
3109         caps = &adev->dm.backlight_caps[aconnector->bl_idx];
3110         caps->ext_caps = &aconnector->dc_link->dpcd_sink_ext_caps;
3111         caps->aux_support = false;
3112
3113         if (caps->ext_caps->bits.oled == 1
3114             /*
3115              * ||
3116              * caps->ext_caps->bits.sdr_aux_backlight_control == 1 ||
3117              * caps->ext_caps->bits.hdr_aux_backlight_control == 1
3118              */)
3119                 caps->aux_support = true;
3120
3121         if (amdgpu_backlight == 0)
3122                 caps->aux_support = false;
3123         else if (amdgpu_backlight == 1)
3124                 caps->aux_support = true;
3125
3126         luminance_range = &conn_base->display_info.luminance_range;
3127
3128         if (luminance_range->max_luminance) {
3129                 caps->aux_min_input_signal = luminance_range->min_luminance;
3130                 caps->aux_max_input_signal = luminance_range->max_luminance;
3131         } else {
3132                 caps->aux_min_input_signal = 0;
3133                 caps->aux_max_input_signal = 512;
3134         }
3135 }
3136
3137 void amdgpu_dm_update_connector_after_detect(
3138                 struct amdgpu_dm_connector *aconnector)
3139 {
3140         struct drm_connector *connector = &aconnector->base;
3141         struct drm_device *dev = connector->dev;
3142         struct dc_sink *sink;
3143
3144         /* MST handled by drm_mst framework */
3145         if (aconnector->mst_mgr.mst_state == true)
3146                 return;
3147
3148         sink = aconnector->dc_link->local_sink;
3149         if (sink)
3150                 dc_sink_retain(sink);
3151
3152         /*
3153          * Edid mgmt connector gets first update only in mode_valid hook and then
3154          * the connector sink is set to either fake or physical sink depends on link status.
3155          * Skip if already done during boot.
3156          */
3157         if (aconnector->base.force != DRM_FORCE_UNSPECIFIED
3158                         && aconnector->dc_em_sink) {
3159
3160                 /*
3161                  * For S3 resume with headless use eml_sink to fake stream
3162                  * because on resume connector->sink is set to NULL
3163                  */
3164                 mutex_lock(&dev->mode_config.mutex);
3165
3166                 if (sink) {
3167                         if (aconnector->dc_sink) {
3168                                 amdgpu_dm_update_freesync_caps(connector, NULL);
3169                                 /*
3170                                  * retain and release below are used to
3171                                  * bump up refcount for sink because the link doesn't point
3172                                  * to it anymore after disconnect, so on next crtc to connector
3173                                  * reshuffle by UMD we will get into unwanted dc_sink release
3174                                  */
3175                                 dc_sink_release(aconnector->dc_sink);
3176                         }
3177                         aconnector->dc_sink = sink;
3178                         dc_sink_retain(aconnector->dc_sink);
3179                         amdgpu_dm_update_freesync_caps(connector,
3180                                         aconnector->edid);
3181                 } else {
3182                         amdgpu_dm_update_freesync_caps(connector, NULL);
3183                         if (!aconnector->dc_sink) {
3184                                 aconnector->dc_sink = aconnector->dc_em_sink;
3185                                 dc_sink_retain(aconnector->dc_sink);
3186                         }
3187                 }
3188
3189                 mutex_unlock(&dev->mode_config.mutex);
3190
3191                 if (sink)
3192                         dc_sink_release(sink);
3193                 return;
3194         }
3195
3196         /*
3197          * TODO: temporary guard to look for proper fix
3198          * if this sink is MST sink, we should not do anything
3199          */
3200         if (sink && sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT_MST) {
3201                 dc_sink_release(sink);
3202                 return;
3203         }
3204
3205         if (aconnector->dc_sink == sink) {
3206                 /*
3207                  * We got a DP short pulse (Link Loss, DP CTS, etc...).
3208                  * Do nothing!!
3209                  */
3210                 DRM_DEBUG_DRIVER("DCHPD: connector_id=%d: dc_sink didn't change.\n",
3211                                 aconnector->connector_id);
3212                 if (sink)
3213                         dc_sink_release(sink);
3214                 return;
3215         }
3216
3217         DRM_DEBUG_DRIVER("DCHPD: connector_id=%d: Old sink=%p New sink=%p\n",
3218                 aconnector->connector_id, aconnector->dc_sink, sink);
3219
3220         mutex_lock(&dev->mode_config.mutex);
3221
3222         /*
3223          * 1. Update status of the drm connector
3224          * 2. Send an event and let userspace tell us what to do
3225          */
3226         if (sink) {
3227                 /*
3228                  * TODO: check if we still need the S3 mode update workaround.
3229                  * If yes, put it here.
3230                  */
3231                 if (aconnector->dc_sink) {
3232                         amdgpu_dm_update_freesync_caps(connector, NULL);
3233                         dc_sink_release(aconnector->dc_sink);
3234                 }
3235
3236                 aconnector->dc_sink = sink;
3237                 dc_sink_retain(aconnector->dc_sink);
3238                 if (sink->dc_edid.length == 0) {
3239                         aconnector->edid = NULL;
3240                         if (aconnector->dc_link->aux_mode) {
3241                                 drm_dp_cec_unset_edid(
3242                                         &aconnector->dm_dp_aux.aux);
3243                         }
3244                 } else {
3245                         aconnector->edid =
3246                                 (struct edid *)sink->dc_edid.raw_edid;
3247
3248                         if (aconnector->dc_link->aux_mode)
3249                                 drm_dp_cec_set_edid(&aconnector->dm_dp_aux.aux,
3250                                                     aconnector->edid);
3251                 }
3252
3253                 if (!aconnector->timing_requested) {
3254                         aconnector->timing_requested =
3255                                 kzalloc(sizeof(struct dc_crtc_timing), GFP_KERNEL);
3256                         if (!aconnector->timing_requested)
3257                                 drm_err(dev,
3258                                         "failed to create aconnector->requested_timing\n");
3259                 }
3260
3261                 drm_connector_update_edid_property(connector, aconnector->edid);
3262                 amdgpu_dm_update_freesync_caps(connector, aconnector->edid);
3263                 update_connector_ext_caps(aconnector);
3264         } else {
3265                 drm_dp_cec_unset_edid(&aconnector->dm_dp_aux.aux);
3266                 amdgpu_dm_update_freesync_caps(connector, NULL);
3267                 drm_connector_update_edid_property(connector, NULL);
3268                 aconnector->num_modes = 0;
3269                 dc_sink_release(aconnector->dc_sink);
3270                 aconnector->dc_sink = NULL;
3271                 aconnector->edid = NULL;
3272                 kfree(aconnector->timing_requested);
3273                 aconnector->timing_requested = NULL;
3274                 /* Set CP to DESIRED if it was ENABLED, so we can re-enable it again on hotplug */
3275                 if (connector->state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED)
3276                         connector->state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED;
3277         }
3278
3279         mutex_unlock(&dev->mode_config.mutex);
3280
3281         update_subconnector_property(aconnector);
3282
3283         if (sink)
3284                 dc_sink_release(sink);
3285 }
3286
3287 static void handle_hpd_irq_helper(struct amdgpu_dm_connector *aconnector)
3288 {
3289         struct drm_connector *connector = &aconnector->base;
3290         struct drm_device *dev = connector->dev;
3291         enum dc_connection_type new_connection_type = dc_connection_none;
3292         struct amdgpu_device *adev = drm_to_adev(dev);
3293         struct dm_connector_state *dm_con_state = to_dm_connector_state(connector->state);
3294         bool ret = false;
3295
3296         if (adev->dm.disable_hpd_irq)
3297                 return;
3298
3299         /*
3300          * In case of failure or MST no need to update connector status or notify the OS
3301          * since (for MST case) MST does this in its own context.
3302          */
3303         mutex_lock(&aconnector->hpd_lock);
3304
3305         if (adev->dm.hdcp_workqueue) {
3306                 hdcp_reset_display(adev->dm.hdcp_workqueue, aconnector->dc_link->link_index);
3307                 dm_con_state->update_hdcp = true;
3308         }
3309         if (aconnector->fake_enable)
3310                 aconnector->fake_enable = false;
3311
3312         aconnector->timing_changed = false;
3313
3314         if (!dc_link_detect_connection_type(aconnector->dc_link, &new_connection_type))
3315                 DRM_ERROR("KMS: Failed to detect connector\n");
3316
3317         if (aconnector->base.force && new_connection_type == dc_connection_none) {
3318                 emulated_link_detect(aconnector->dc_link);
3319
3320                 drm_modeset_lock_all(dev);
3321                 dm_restore_drm_connector_state(dev, connector);
3322                 drm_modeset_unlock_all(dev);
3323
3324                 if (aconnector->base.force == DRM_FORCE_UNSPECIFIED)
3325                         drm_kms_helper_connector_hotplug_event(connector);
3326         } else {
3327                 mutex_lock(&adev->dm.dc_lock);
3328                 ret = dc_link_detect(aconnector->dc_link, DETECT_REASON_HPD);
3329                 mutex_unlock(&adev->dm.dc_lock);
3330                 if (ret) {
3331                         amdgpu_dm_update_connector_after_detect(aconnector);
3332
3333                         drm_modeset_lock_all(dev);
3334                         dm_restore_drm_connector_state(dev, connector);
3335                         drm_modeset_unlock_all(dev);
3336
3337                         if (aconnector->base.force == DRM_FORCE_UNSPECIFIED)
3338                                 drm_kms_helper_connector_hotplug_event(connector);
3339                 }
3340         }
3341         mutex_unlock(&aconnector->hpd_lock);
3342
3343 }
3344
3345 static void handle_hpd_irq(void *param)
3346 {
3347         struct amdgpu_dm_connector *aconnector = (struct amdgpu_dm_connector *)param;
3348
3349         handle_hpd_irq_helper(aconnector);
3350
3351 }
3352
3353 static void schedule_hpd_rx_offload_work(struct hpd_rx_irq_offload_work_queue *offload_wq,
3354                                                         union hpd_irq_data hpd_irq_data)
3355 {
3356         struct hpd_rx_irq_offload_work *offload_work =
3357                                 kzalloc(sizeof(*offload_work), GFP_KERNEL);
3358
3359         if (!offload_work) {
3360                 DRM_ERROR("Failed to allocate hpd_rx_irq_offload_work.\n");
3361                 return;
3362         }
3363
3364         INIT_WORK(&offload_work->work, dm_handle_hpd_rx_offload_work);
3365         offload_work->data = hpd_irq_data;
3366         offload_work->offload_wq = offload_wq;
3367
3368         queue_work(offload_wq->wq, &offload_work->work);
3369         DRM_DEBUG_KMS("queue work to handle hpd_rx offload work");
3370 }
3371
3372 static void handle_hpd_rx_irq(void *param)
3373 {
3374         struct amdgpu_dm_connector *aconnector = (struct amdgpu_dm_connector *)param;
3375         struct drm_connector *connector = &aconnector->base;
3376         struct drm_device *dev = connector->dev;
3377         struct dc_link *dc_link = aconnector->dc_link;
3378         bool is_mst_root_connector = aconnector->mst_mgr.mst_state;
3379         bool result = false;
3380         enum dc_connection_type new_connection_type = dc_connection_none;
3381         struct amdgpu_device *adev = drm_to_adev(dev);
3382         union hpd_irq_data hpd_irq_data;
3383         bool link_loss = false;
3384         bool has_left_work = false;
3385         int idx = dc_link->link_index;
3386         struct hpd_rx_irq_offload_work_queue *offload_wq = &adev->dm.hpd_rx_offload_wq[idx];
3387
3388         memset(&hpd_irq_data, 0, sizeof(hpd_irq_data));
3389
3390         if (adev->dm.disable_hpd_irq)
3391                 return;
3392
3393         /*
3394          * TODO:Temporary add mutex to protect hpd interrupt not have a gpio
3395          * conflict, after implement i2c helper, this mutex should be
3396          * retired.
3397          */
3398         mutex_lock(&aconnector->hpd_lock);
3399
3400         result = dc_link_handle_hpd_rx_irq(dc_link, &hpd_irq_data,
3401                                                 &link_loss, true, &has_left_work);
3402
3403         if (!has_left_work)
3404                 goto out;
3405
3406         if (hpd_irq_data.bytes.device_service_irq.bits.AUTOMATED_TEST) {
3407                 schedule_hpd_rx_offload_work(offload_wq, hpd_irq_data);
3408                 goto out;
3409         }
3410
3411         if (dc_link_dp_allow_hpd_rx_irq(dc_link)) {
3412                 if (hpd_irq_data.bytes.device_service_irq.bits.UP_REQ_MSG_RDY ||
3413                         hpd_irq_data.bytes.device_service_irq.bits.DOWN_REP_MSG_RDY) {
3414                         bool skip = false;
3415
3416                         /*
3417                          * DOWN_REP_MSG_RDY is also handled by polling method
3418                          * mgr->cbs->poll_hpd_irq()
3419                          */
3420                         spin_lock(&offload_wq->offload_lock);
3421                         skip = offload_wq->is_handling_mst_msg_rdy_event;
3422
3423                         if (!skip)
3424                                 offload_wq->is_handling_mst_msg_rdy_event = true;
3425
3426                         spin_unlock(&offload_wq->offload_lock);
3427
3428                         if (!skip)
3429                                 schedule_hpd_rx_offload_work(offload_wq, hpd_irq_data);
3430
3431                         goto out;
3432                 }
3433
3434                 if (link_loss) {
3435                         bool skip = false;
3436
3437                         spin_lock(&offload_wq->offload_lock);
3438                         skip = offload_wq->is_handling_link_loss;
3439
3440                         if (!skip)
3441                                 offload_wq->is_handling_link_loss = true;
3442
3443                         spin_unlock(&offload_wq->offload_lock);
3444
3445                         if (!skip)
3446                                 schedule_hpd_rx_offload_work(offload_wq, hpd_irq_data);
3447
3448                         goto out;
3449                 }
3450         }
3451
3452 out:
3453         if (result && !is_mst_root_connector) {
3454                 /* Downstream Port status changed. */
3455                 if (!dc_link_detect_connection_type(dc_link, &new_connection_type))
3456                         DRM_ERROR("KMS: Failed to detect connector\n");
3457
3458                 if (aconnector->base.force && new_connection_type == dc_connection_none) {
3459                         emulated_link_detect(dc_link);
3460
3461                         if (aconnector->fake_enable)
3462                                 aconnector->fake_enable = false;
3463
3464                         amdgpu_dm_update_connector_after_detect(aconnector);
3465
3466
3467                         drm_modeset_lock_all(dev);
3468                         dm_restore_drm_connector_state(dev, connector);
3469                         drm_modeset_unlock_all(dev);
3470
3471                         drm_kms_helper_connector_hotplug_event(connector);
3472                 } else {
3473                         bool ret = false;
3474
3475                         mutex_lock(&adev->dm.dc_lock);
3476                         ret = dc_link_detect(dc_link, DETECT_REASON_HPDRX);
3477                         mutex_unlock(&adev->dm.dc_lock);
3478
3479                         if (ret) {
3480                                 if (aconnector->fake_enable)
3481                                         aconnector->fake_enable = false;
3482
3483                                 amdgpu_dm_update_connector_after_detect(aconnector);
3484
3485                                 drm_modeset_lock_all(dev);
3486                                 dm_restore_drm_connector_state(dev, connector);
3487                                 drm_modeset_unlock_all(dev);
3488
3489                                 drm_kms_helper_connector_hotplug_event(connector);
3490                         }
3491                 }
3492         }
3493         if (hpd_irq_data.bytes.device_service_irq.bits.CP_IRQ) {
3494                 if (adev->dm.hdcp_workqueue)
3495                         hdcp_handle_cpirq(adev->dm.hdcp_workqueue,  aconnector->base.index);
3496         }
3497
3498         if (dc_link->type != dc_connection_mst_branch)
3499                 drm_dp_cec_irq(&aconnector->dm_dp_aux.aux);
3500
3501         mutex_unlock(&aconnector->hpd_lock);
3502 }
3503
3504 static void register_hpd_handlers(struct amdgpu_device *adev)
3505 {
3506         struct drm_device *dev = adev_to_drm(adev);
3507         struct drm_connector *connector;
3508         struct amdgpu_dm_connector *aconnector;
3509         const struct dc_link *dc_link;
3510         struct dc_interrupt_params int_params = {0};
3511
3512         int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
3513         int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
3514
3515         list_for_each_entry(connector,
3516                         &dev->mode_config.connector_list, head) {
3517
3518                 if (connector->connector_type == DRM_MODE_CONNECTOR_WRITEBACK)
3519                         continue;
3520
3521                 aconnector = to_amdgpu_dm_connector(connector);
3522                 dc_link = aconnector->dc_link;
3523
3524                 if (dc_link->irq_source_hpd != DC_IRQ_SOURCE_INVALID) {
3525                         int_params.int_context = INTERRUPT_LOW_IRQ_CONTEXT;
3526                         int_params.irq_source = dc_link->irq_source_hpd;
3527
3528                         amdgpu_dm_irq_register_interrupt(adev, &int_params,
3529                                         handle_hpd_irq,
3530                                         (void *) aconnector);
3531                 }
3532
3533                 if (dc_link->irq_source_hpd_rx != DC_IRQ_SOURCE_INVALID) {
3534
3535                         /* Also register for DP short pulse (hpd_rx). */
3536                         int_params.int_context = INTERRUPT_LOW_IRQ_CONTEXT;
3537                         int_params.irq_source = dc_link->irq_source_hpd_rx;
3538
3539                         amdgpu_dm_irq_register_interrupt(adev, &int_params,
3540                                         handle_hpd_rx_irq,
3541                                         (void *) aconnector);
3542                 }
3543
3544                 if (adev->dm.hpd_rx_offload_wq)
3545                         adev->dm.hpd_rx_offload_wq[connector->index].aconnector =
3546                                 aconnector;
3547         }
3548 }
3549
3550 #if defined(CONFIG_DRM_AMD_DC_SI)
3551 /* Register IRQ sources and initialize IRQ callbacks */
3552 static int dce60_register_irq_handlers(struct amdgpu_device *adev)
3553 {
3554         struct dc *dc = adev->dm.dc;
3555         struct common_irq_params *c_irq_params;
3556         struct dc_interrupt_params int_params = {0};
3557         int r;
3558         int i;
3559         unsigned int client_id = AMDGPU_IRQ_CLIENTID_LEGACY;
3560
3561         int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
3562         int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
3563
3564         /*
3565          * Actions of amdgpu_irq_add_id():
3566          * 1. Register a set() function with base driver.
3567          *    Base driver will call set() function to enable/disable an
3568          *    interrupt in DC hardware.
3569          * 2. Register amdgpu_dm_irq_handler().
3570          *    Base driver will call amdgpu_dm_irq_handler() for ALL interrupts
3571          *    coming from DC hardware.
3572          *    amdgpu_dm_irq_handler() will re-direct the interrupt to DC
3573          *    for acknowledging and handling.
3574          */
3575
3576         /* Use VBLANK interrupt */
3577         for (i = 0; i < adev->mode_info.num_crtc; i++) {
3578                 r = amdgpu_irq_add_id(adev, client_id, i + 1, &adev->crtc_irq);
3579                 if (r) {
3580                         DRM_ERROR("Failed to add crtc irq id!\n");
3581                         return r;
3582                 }
3583
3584                 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
3585                 int_params.irq_source =
3586                         dc_interrupt_to_irq_source(dc, i + 1, 0);
3587
3588                 c_irq_params = &adev->dm.vblank_params[int_params.irq_source - DC_IRQ_SOURCE_VBLANK1];
3589
3590                 c_irq_params->adev = adev;
3591                 c_irq_params->irq_src = int_params.irq_source;
3592
3593                 amdgpu_dm_irq_register_interrupt(adev, &int_params,
3594                                 dm_crtc_high_irq, c_irq_params);
3595         }
3596
3597         /* Use GRPH_PFLIP interrupt */
3598         for (i = VISLANDS30_IV_SRCID_D1_GRPH_PFLIP;
3599                         i <= VISLANDS30_IV_SRCID_D6_GRPH_PFLIP; i += 2) {
3600                 r = amdgpu_irq_add_id(adev, client_id, i, &adev->pageflip_irq);
3601                 if (r) {
3602                         DRM_ERROR("Failed to add page flip irq id!\n");
3603                         return r;
3604                 }
3605
3606                 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
3607                 int_params.irq_source =
3608                         dc_interrupt_to_irq_source(dc, i, 0);
3609
3610                 c_irq_params = &adev->dm.pflip_params[int_params.irq_source - DC_IRQ_SOURCE_PFLIP_FIRST];
3611
3612                 c_irq_params->adev = adev;
3613                 c_irq_params->irq_src = int_params.irq_source;
3614
3615                 amdgpu_dm_irq_register_interrupt(adev, &int_params,
3616                                 dm_pflip_high_irq, c_irq_params);
3617
3618         }
3619
3620         /* HPD */
3621         r = amdgpu_irq_add_id(adev, client_id,
3622                         VISLANDS30_IV_SRCID_HOTPLUG_DETECT_A, &adev->hpd_irq);
3623         if (r) {
3624                 DRM_ERROR("Failed to add hpd irq id!\n");
3625                 return r;
3626         }
3627
3628         register_hpd_handlers(adev);
3629
3630         return 0;
3631 }
3632 #endif
3633
3634 /* Register IRQ sources and initialize IRQ callbacks */
3635 static int dce110_register_irq_handlers(struct amdgpu_device *adev)
3636 {
3637         struct dc *dc = adev->dm.dc;
3638         struct common_irq_params *c_irq_params;
3639         struct dc_interrupt_params int_params = {0};
3640         int r;
3641         int i;
3642         unsigned int client_id = AMDGPU_IRQ_CLIENTID_LEGACY;
3643
3644         if (adev->family >= AMDGPU_FAMILY_AI)
3645                 client_id = SOC15_IH_CLIENTID_DCE;
3646
3647         int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
3648         int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
3649
3650         /*
3651          * Actions of amdgpu_irq_add_id():
3652          * 1. Register a set() function with base driver.
3653          *    Base driver will call set() function to enable/disable an
3654          *    interrupt in DC hardware.
3655          * 2. Register amdgpu_dm_irq_handler().
3656          *    Base driver will call amdgpu_dm_irq_handler() for ALL interrupts
3657          *    coming from DC hardware.
3658          *    amdgpu_dm_irq_handler() will re-direct the interrupt to DC
3659          *    for acknowledging and handling.
3660          */
3661
3662         /* Use VBLANK interrupt */
3663         for (i = VISLANDS30_IV_SRCID_D1_VERTICAL_INTERRUPT0; i <= VISLANDS30_IV_SRCID_D6_VERTICAL_INTERRUPT0; i++) {
3664                 r = amdgpu_irq_add_id(adev, client_id, i, &adev->crtc_irq);
3665                 if (r) {
3666                         DRM_ERROR("Failed to add crtc irq id!\n");
3667                         return r;
3668                 }
3669
3670                 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
3671                 int_params.irq_source =
3672                         dc_interrupt_to_irq_source(dc, i, 0);
3673
3674                 c_irq_params = &adev->dm.vblank_params[int_params.irq_source - DC_IRQ_SOURCE_VBLANK1];
3675
3676                 c_irq_params->adev = adev;
3677                 c_irq_params->irq_src = int_params.irq_source;
3678
3679                 amdgpu_dm_irq_register_interrupt(adev, &int_params,
3680                                 dm_crtc_high_irq, c_irq_params);
3681         }
3682
3683         /* Use VUPDATE interrupt */
3684         for (i = VISLANDS30_IV_SRCID_D1_V_UPDATE_INT; i <= VISLANDS30_IV_SRCID_D6_V_UPDATE_INT; i += 2) {
3685                 r = amdgpu_irq_add_id(adev, client_id, i, &adev->vupdate_irq);
3686                 if (r) {
3687                         DRM_ERROR("Failed to add vupdate irq id!\n");
3688                         return r;
3689                 }
3690
3691                 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
3692                 int_params.irq_source =
3693                         dc_interrupt_to_irq_source(dc, i, 0);
3694
3695                 c_irq_params = &adev->dm.vupdate_params[int_params.irq_source - DC_IRQ_SOURCE_VUPDATE1];
3696
3697                 c_irq_params->adev = adev;
3698                 c_irq_params->irq_src = int_params.irq_source;
3699
3700                 amdgpu_dm_irq_register_interrupt(adev, &int_params,
3701                                 dm_vupdate_high_irq, c_irq_params);
3702         }
3703
3704         /* Use GRPH_PFLIP interrupt */
3705         for (i = VISLANDS30_IV_SRCID_D1_GRPH_PFLIP;
3706                         i <= VISLANDS30_IV_SRCID_D6_GRPH_PFLIP; i += 2) {
3707                 r = amdgpu_irq_add_id(adev, client_id, i, &adev->pageflip_irq);
3708                 if (r) {
3709                         DRM_ERROR("Failed to add page flip irq id!\n");
3710                         return r;
3711                 }
3712
3713                 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
3714                 int_params.irq_source =
3715                         dc_interrupt_to_irq_source(dc, i, 0);
3716
3717                 c_irq_params = &adev->dm.pflip_params[int_params.irq_source - DC_IRQ_SOURCE_PFLIP_FIRST];
3718
3719                 c_irq_params->adev = adev;
3720                 c_irq_params->irq_src = int_params.irq_source;
3721
3722                 amdgpu_dm_irq_register_interrupt(adev, &int_params,
3723                                 dm_pflip_high_irq, c_irq_params);
3724
3725         }
3726
3727         /* HPD */
3728         r = amdgpu_irq_add_id(adev, client_id,
3729                         VISLANDS30_IV_SRCID_HOTPLUG_DETECT_A, &adev->hpd_irq);
3730         if (r) {
3731                 DRM_ERROR("Failed to add hpd irq id!\n");
3732                 return r;
3733         }
3734
3735         register_hpd_handlers(adev);
3736
3737         return 0;
3738 }
3739
3740 /* Register IRQ sources and initialize IRQ callbacks */
3741 static int dcn10_register_irq_handlers(struct amdgpu_device *adev)
3742 {
3743         struct dc *dc = adev->dm.dc;
3744         struct common_irq_params *c_irq_params;
3745         struct dc_interrupt_params int_params = {0};
3746         int r;
3747         int i;
3748 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
3749         static const unsigned int vrtl_int_srcid[] = {
3750                 DCN_1_0__SRCID__OTG1_VERTICAL_INTERRUPT0_CONTROL,
3751                 DCN_1_0__SRCID__OTG2_VERTICAL_INTERRUPT0_CONTROL,
3752                 DCN_1_0__SRCID__OTG3_VERTICAL_INTERRUPT0_CONTROL,
3753                 DCN_1_0__SRCID__OTG4_VERTICAL_INTERRUPT0_CONTROL,
3754                 DCN_1_0__SRCID__OTG5_VERTICAL_INTERRUPT0_CONTROL,
3755                 DCN_1_0__SRCID__OTG6_VERTICAL_INTERRUPT0_CONTROL
3756         };
3757 #endif
3758
3759         int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
3760         int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
3761
3762         /*
3763          * Actions of amdgpu_irq_add_id():
3764          * 1. Register a set() function with base driver.
3765          *    Base driver will call set() function to enable/disable an
3766          *    interrupt in DC hardware.
3767          * 2. Register amdgpu_dm_irq_handler().
3768          *    Base driver will call amdgpu_dm_irq_handler() for ALL interrupts
3769          *    coming from DC hardware.
3770          *    amdgpu_dm_irq_handler() will re-direct the interrupt to DC
3771          *    for acknowledging and handling.
3772          */
3773
3774         /* Use VSTARTUP interrupt */
3775         for (i = DCN_1_0__SRCID__DC_D1_OTG_VSTARTUP;
3776                         i <= DCN_1_0__SRCID__DC_D1_OTG_VSTARTUP + adev->mode_info.num_crtc - 1;
3777                         i++) {
3778                 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, i, &adev->crtc_irq);
3779
3780                 if (r) {
3781                         DRM_ERROR("Failed to add crtc irq id!\n");
3782                         return r;
3783                 }
3784
3785                 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
3786                 int_params.irq_source =
3787                         dc_interrupt_to_irq_source(dc, i, 0);
3788
3789                 c_irq_params = &adev->dm.vblank_params[int_params.irq_source - DC_IRQ_SOURCE_VBLANK1];
3790
3791                 c_irq_params->adev = adev;
3792                 c_irq_params->irq_src = int_params.irq_source;
3793
3794                 amdgpu_dm_irq_register_interrupt(
3795                         adev, &int_params, dm_crtc_high_irq, c_irq_params);
3796         }
3797
3798         /* Use otg vertical line interrupt */
3799 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
3800         for (i = 0; i <= adev->mode_info.num_crtc - 1; i++) {
3801                 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE,
3802                                 vrtl_int_srcid[i], &adev->vline0_irq);
3803
3804                 if (r) {
3805                         DRM_ERROR("Failed to add vline0 irq id!\n");
3806                         return r;
3807                 }
3808
3809                 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
3810                 int_params.irq_source =
3811                         dc_interrupt_to_irq_source(dc, vrtl_int_srcid[i], 0);
3812
3813                 if (int_params.irq_source == DC_IRQ_SOURCE_INVALID) {
3814                         DRM_ERROR("Failed to register vline0 irq %d!\n", vrtl_int_srcid[i]);
3815                         break;
3816                 }
3817
3818                 c_irq_params = &adev->dm.vline0_params[int_params.irq_source
3819                                         - DC_IRQ_SOURCE_DC1_VLINE0];
3820
3821                 c_irq_params->adev = adev;
3822                 c_irq_params->irq_src = int_params.irq_source;
3823
3824                 amdgpu_dm_irq_register_interrupt(adev, &int_params,
3825                                 dm_dcn_vertical_interrupt0_high_irq, c_irq_params);
3826         }
3827 #endif
3828
3829         /* Use VUPDATE_NO_LOCK interrupt on DCN, which seems to correspond to
3830          * the regular VUPDATE interrupt on DCE. We want DC_IRQ_SOURCE_VUPDATEx
3831          * to trigger at end of each vblank, regardless of state of the lock,
3832          * matching DCE behaviour.
3833          */
3834         for (i = DCN_1_0__SRCID__OTG0_IHC_V_UPDATE_NO_LOCK_INTERRUPT;
3835              i <= DCN_1_0__SRCID__OTG0_IHC_V_UPDATE_NO_LOCK_INTERRUPT + adev->mode_info.num_crtc - 1;
3836              i++) {
3837                 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, i, &adev->vupdate_irq);
3838
3839                 if (r) {
3840                         DRM_ERROR("Failed to add vupdate irq id!\n");
3841                         return r;
3842                 }
3843
3844                 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
3845                 int_params.irq_source =
3846                         dc_interrupt_to_irq_source(dc, i, 0);
3847
3848                 c_irq_params = &adev->dm.vupdate_params[int_params.irq_source - DC_IRQ_SOURCE_VUPDATE1];
3849
3850                 c_irq_params->adev = adev;
3851                 c_irq_params->irq_src = int_params.irq_source;
3852
3853                 amdgpu_dm_irq_register_interrupt(adev, &int_params,
3854                                 dm_vupdate_high_irq, c_irq_params);
3855         }
3856
3857         /* Use GRPH_PFLIP interrupt */
3858         for (i = DCN_1_0__SRCID__HUBP0_FLIP_INTERRUPT;
3859                         i <= DCN_1_0__SRCID__HUBP0_FLIP_INTERRUPT + dc->caps.max_otg_num - 1;
3860                         i++) {
3861                 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, i, &adev->pageflip_irq);
3862                 if (r) {
3863                         DRM_ERROR("Failed to add page flip irq id!\n");
3864                         return r;
3865                 }
3866
3867                 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
3868                 int_params.irq_source =
3869                         dc_interrupt_to_irq_source(dc, i, 0);
3870
3871                 c_irq_params = &adev->dm.pflip_params[int_params.irq_source - DC_IRQ_SOURCE_PFLIP_FIRST];
3872
3873                 c_irq_params->adev = adev;
3874                 c_irq_params->irq_src = int_params.irq_source;
3875
3876                 amdgpu_dm_irq_register_interrupt(adev, &int_params,
3877                                 dm_pflip_high_irq, c_irq_params);
3878
3879         }
3880
3881         /* HPD */
3882         r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, DCN_1_0__SRCID__DC_HPD1_INT,
3883                         &adev->hpd_irq);
3884         if (r) {
3885                 DRM_ERROR("Failed to add hpd irq id!\n");
3886                 return r;
3887         }
3888
3889         register_hpd_handlers(adev);
3890
3891         return 0;
3892 }
3893 /* Register Outbox IRQ sources and initialize IRQ callbacks */
3894 static int register_outbox_irq_handlers(struct amdgpu_device *adev)
3895 {
3896         struct dc *dc = adev->dm.dc;
3897         struct common_irq_params *c_irq_params;
3898         struct dc_interrupt_params int_params = {0};
3899         int r, i;
3900
3901         int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
3902         int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
3903
3904         r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, DCN_1_0__SRCID__DMCUB_OUTBOX_LOW_PRIORITY_READY_INT,
3905                         &adev->dmub_outbox_irq);
3906         if (r) {
3907                 DRM_ERROR("Failed to add outbox irq id!\n");
3908                 return r;
3909         }
3910
3911         if (dc->ctx->dmub_srv) {
3912                 i = DCN_1_0__SRCID__DMCUB_OUTBOX_LOW_PRIORITY_READY_INT;
3913                 int_params.int_context = INTERRUPT_LOW_IRQ_CONTEXT;
3914                 int_params.irq_source =
3915                 dc_interrupt_to_irq_source(dc, i, 0);
3916
3917                 c_irq_params = &adev->dm.dmub_outbox_params[0];
3918
3919                 c_irq_params->adev = adev;
3920                 c_irq_params->irq_src = int_params.irq_source;
3921
3922                 amdgpu_dm_irq_register_interrupt(adev, &int_params,
3923                                 dm_dmub_outbox1_low_irq, c_irq_params);
3924         }
3925
3926         return 0;
3927 }
3928
3929 /*
3930  * Acquires the lock for the atomic state object and returns
3931  * the new atomic state.
3932  *
3933  * This should only be called during atomic check.
3934  */
3935 int dm_atomic_get_state(struct drm_atomic_state *state,
3936                         struct dm_atomic_state **dm_state)
3937 {
3938         struct drm_device *dev = state->dev;
3939         struct amdgpu_device *adev = drm_to_adev(dev);
3940         struct amdgpu_display_manager *dm = &adev->dm;
3941         struct drm_private_state *priv_state;
3942
3943         if (*dm_state)
3944                 return 0;
3945
3946         priv_state = drm_atomic_get_private_obj_state(state, &dm->atomic_obj);
3947         if (IS_ERR(priv_state))
3948                 return PTR_ERR(priv_state);
3949
3950         *dm_state = to_dm_atomic_state(priv_state);
3951
3952         return 0;
3953 }
3954
3955 static struct dm_atomic_state *
3956 dm_atomic_get_new_state(struct drm_atomic_state *state)
3957 {
3958         struct drm_device *dev = state->dev;
3959         struct amdgpu_device *adev = drm_to_adev(dev);
3960         struct amdgpu_display_manager *dm = &adev->dm;
3961         struct drm_private_obj *obj;
3962         struct drm_private_state *new_obj_state;
3963         int i;
3964
3965         for_each_new_private_obj_in_state(state, obj, new_obj_state, i) {
3966                 if (obj->funcs == dm->atomic_obj.funcs)
3967                         return to_dm_atomic_state(new_obj_state);
3968         }
3969
3970         return NULL;
3971 }
3972
3973 static struct drm_private_state *
3974 dm_atomic_duplicate_state(struct drm_private_obj *obj)
3975 {
3976         struct dm_atomic_state *old_state, *new_state;
3977
3978         new_state = kzalloc(sizeof(*new_state), GFP_KERNEL);
3979         if (!new_state)
3980                 return NULL;
3981
3982         __drm_atomic_helper_private_obj_duplicate_state(obj, &new_state->base);
3983
3984         old_state = to_dm_atomic_state(obj->state);
3985
3986         if (old_state && old_state->context)
3987                 new_state->context = dc_copy_state(old_state->context);
3988
3989         if (!new_state->context) {
3990                 kfree(new_state);
3991                 return NULL;
3992         }
3993
3994         return &new_state->base;
3995 }
3996
3997 static void dm_atomic_destroy_state(struct drm_private_obj *obj,
3998                                     struct drm_private_state *state)
3999 {
4000         struct dm_atomic_state *dm_state = to_dm_atomic_state(state);
4001
4002         if (dm_state && dm_state->context)
4003                 dc_release_state(dm_state->context);
4004
4005         kfree(dm_state);
4006 }
4007
4008 static struct drm_private_state_funcs dm_atomic_state_funcs = {
4009         .atomic_duplicate_state = dm_atomic_duplicate_state,
4010         .atomic_destroy_state = dm_atomic_destroy_state,
4011 };
4012
4013 static int amdgpu_dm_mode_config_init(struct amdgpu_device *adev)
4014 {
4015         struct dm_atomic_state *state;
4016         int r;
4017
4018         adev->mode_info.mode_config_initialized = true;
4019
4020         adev_to_drm(adev)->mode_config.funcs = (void *)&amdgpu_dm_mode_funcs;
4021         adev_to_drm(adev)->mode_config.helper_private = &amdgpu_dm_mode_config_helperfuncs;
4022
4023         adev_to_drm(adev)->mode_config.max_width = 16384;
4024         adev_to_drm(adev)->mode_config.max_height = 16384;
4025
4026         adev_to_drm(adev)->mode_config.preferred_depth = 24;
4027         if (adev->asic_type == CHIP_HAWAII)
4028                 /* disable prefer shadow for now due to hibernation issues */
4029                 adev_to_drm(adev)->mode_config.prefer_shadow = 0;
4030         else
4031                 adev_to_drm(adev)->mode_config.prefer_shadow = 1;
4032         /* indicates support for immediate flip */
4033         adev_to_drm(adev)->mode_config.async_page_flip = true;
4034
4035         state = kzalloc(sizeof(*state), GFP_KERNEL);
4036         if (!state)
4037                 return -ENOMEM;
4038
4039         state->context = dc_create_state(adev->dm.dc);
4040         if (!state->context) {
4041                 kfree(state);
4042                 return -ENOMEM;
4043         }
4044
4045         dc_resource_state_copy_construct_current(adev->dm.dc, state->context);
4046
4047         drm_atomic_private_obj_init(adev_to_drm(adev),
4048                                     &adev->dm.atomic_obj,
4049                                     &state->base,
4050                                     &dm_atomic_state_funcs);
4051
4052         r = amdgpu_display_modeset_create_props(adev);
4053         if (r) {
4054                 dc_release_state(state->context);
4055                 kfree(state);
4056                 return r;
4057         }
4058
4059         r = amdgpu_dm_audio_init(adev);
4060         if (r) {
4061                 dc_release_state(state->context);
4062                 kfree(state);
4063                 return r;
4064         }
4065
4066         return 0;
4067 }
4068
4069 #define AMDGPU_DM_DEFAULT_MIN_BACKLIGHT 12
4070 #define AMDGPU_DM_DEFAULT_MAX_BACKLIGHT 255
4071 #define AUX_BL_DEFAULT_TRANSITION_TIME_MS 50
4072
4073 static void amdgpu_dm_update_backlight_caps(struct amdgpu_display_manager *dm,
4074                                             int bl_idx)
4075 {
4076 #if defined(CONFIG_ACPI)
4077         struct amdgpu_dm_backlight_caps caps;
4078
4079         memset(&caps, 0, sizeof(caps));
4080
4081         if (dm->backlight_caps[bl_idx].caps_valid)
4082                 return;
4083
4084         amdgpu_acpi_get_backlight_caps(&caps);
4085         if (caps.caps_valid) {
4086                 dm->backlight_caps[bl_idx].caps_valid = true;
4087                 if (caps.aux_support)
4088                         return;
4089                 dm->backlight_caps[bl_idx].min_input_signal = caps.min_input_signal;
4090                 dm->backlight_caps[bl_idx].max_input_signal = caps.max_input_signal;
4091         } else {
4092                 dm->backlight_caps[bl_idx].min_input_signal =
4093                                 AMDGPU_DM_DEFAULT_MIN_BACKLIGHT;
4094                 dm->backlight_caps[bl_idx].max_input_signal =
4095                                 AMDGPU_DM_DEFAULT_MAX_BACKLIGHT;
4096         }
4097 #else
4098         if (dm->backlight_caps[bl_idx].aux_support)
4099                 return;
4100
4101         dm->backlight_caps[bl_idx].min_input_signal = AMDGPU_DM_DEFAULT_MIN_BACKLIGHT;
4102         dm->backlight_caps[bl_idx].max_input_signal = AMDGPU_DM_DEFAULT_MAX_BACKLIGHT;
4103 #endif
4104 }
4105
4106 static int get_brightness_range(const struct amdgpu_dm_backlight_caps *caps,
4107                                 unsigned int *min, unsigned int *max)
4108 {
4109         if (!caps)
4110                 return 0;
4111
4112         if (caps->aux_support) {
4113                 // Firmware limits are in nits, DC API wants millinits.
4114                 *max = 1000 * caps->aux_max_input_signal;
4115                 *min = 1000 * caps->aux_min_input_signal;
4116         } else {
4117                 // Firmware limits are 8-bit, PWM control is 16-bit.
4118                 *max = 0x101 * caps->max_input_signal;
4119                 *min = 0x101 * caps->min_input_signal;
4120         }
4121         return 1;
4122 }
4123
4124 static u32 convert_brightness_from_user(const struct amdgpu_dm_backlight_caps *caps,
4125                                         uint32_t brightness)
4126 {
4127         unsigned int min, max;
4128
4129         if (!get_brightness_range(caps, &min, &max))
4130                 return brightness;
4131
4132         // Rescale 0..255 to min..max
4133         return min + DIV_ROUND_CLOSEST((max - min) * brightness,
4134                                        AMDGPU_MAX_BL_LEVEL);
4135 }
4136
4137 static u32 convert_brightness_to_user(const struct amdgpu_dm_backlight_caps *caps,
4138                                       uint32_t brightness)
4139 {
4140         unsigned int min, max;
4141
4142         if (!get_brightness_range(caps, &min, &max))
4143                 return brightness;
4144
4145         if (brightness < min)
4146                 return 0;
4147         // Rescale min..max to 0..255
4148         return DIV_ROUND_CLOSEST(AMDGPU_MAX_BL_LEVEL * (brightness - min),
4149                                  max - min);
4150 }
4151
4152 static void amdgpu_dm_backlight_set_level(struct amdgpu_display_manager *dm,
4153                                          int bl_idx,
4154                                          u32 user_brightness)
4155 {
4156         struct amdgpu_dm_backlight_caps caps;
4157         struct dc_link *link;
4158         u32 brightness;
4159         bool rc;
4160
4161         amdgpu_dm_update_backlight_caps(dm, bl_idx);
4162         caps = dm->backlight_caps[bl_idx];
4163
4164         dm->brightness[bl_idx] = user_brightness;
4165         /* update scratch register */
4166         if (bl_idx == 0)
4167                 amdgpu_atombios_scratch_regs_set_backlight_level(dm->adev, dm->brightness[bl_idx]);
4168         brightness = convert_brightness_from_user(&caps, dm->brightness[bl_idx]);
4169         link = (struct dc_link *)dm->backlight_link[bl_idx];
4170
4171         /* Change brightness based on AUX property */
4172         if (caps.aux_support) {
4173                 rc = dc_link_set_backlight_level_nits(link, true, brightness,
4174                                                       AUX_BL_DEFAULT_TRANSITION_TIME_MS);
4175                 if (!rc)
4176                         DRM_DEBUG("DM: Failed to update backlight via AUX on eDP[%d]\n", bl_idx);
4177         } else {
4178                 rc = dc_link_set_backlight_level(link, brightness, 0);
4179                 if (!rc)
4180                         DRM_DEBUG("DM: Failed to update backlight on eDP[%d]\n", bl_idx);
4181         }
4182
4183         if (rc)
4184                 dm->actual_brightness[bl_idx] = user_brightness;
4185 }
4186
4187 static int amdgpu_dm_backlight_update_status(struct backlight_device *bd)
4188 {
4189         struct amdgpu_display_manager *dm = bl_get_data(bd);
4190         int i;
4191
4192         for (i = 0; i < dm->num_of_edps; i++) {
4193                 if (bd == dm->backlight_dev[i])
4194                         break;
4195         }
4196         if (i >= AMDGPU_DM_MAX_NUM_EDP)
4197                 i = 0;
4198         amdgpu_dm_backlight_set_level(dm, i, bd->props.brightness);
4199
4200         return 0;
4201 }
4202
4203 static u32 amdgpu_dm_backlight_get_level(struct amdgpu_display_manager *dm,
4204                                          int bl_idx)
4205 {
4206         int ret;
4207         struct amdgpu_dm_backlight_caps caps;
4208         struct dc_link *link = (struct dc_link *)dm->backlight_link[bl_idx];
4209
4210         amdgpu_dm_update_backlight_caps(dm, bl_idx);
4211         caps = dm->backlight_caps[bl_idx];
4212
4213         if (caps.aux_support) {
4214                 u32 avg, peak;
4215                 bool rc;
4216
4217                 rc = dc_link_get_backlight_level_nits(link, &avg, &peak);
4218                 if (!rc)
4219                         return dm->brightness[bl_idx];
4220                 return convert_brightness_to_user(&caps, avg);
4221         }
4222
4223         ret = dc_link_get_backlight_level(link);
4224
4225         if (ret == DC_ERROR_UNEXPECTED)
4226                 return dm->brightness[bl_idx];
4227
4228         return convert_brightness_to_user(&caps, ret);
4229 }
4230
4231 static int amdgpu_dm_backlight_get_brightness(struct backlight_device *bd)
4232 {
4233         struct amdgpu_display_manager *dm = bl_get_data(bd);
4234         int i;
4235
4236         for (i = 0; i < dm->num_of_edps; i++) {
4237                 if (bd == dm->backlight_dev[i])
4238                         break;
4239         }
4240         if (i >= AMDGPU_DM_MAX_NUM_EDP)
4241                 i = 0;
4242         return amdgpu_dm_backlight_get_level(dm, i);
4243 }
4244
4245 static const struct backlight_ops amdgpu_dm_backlight_ops = {
4246         .options = BL_CORE_SUSPENDRESUME,
4247         .get_brightness = amdgpu_dm_backlight_get_brightness,
4248         .update_status  = amdgpu_dm_backlight_update_status,
4249 };
4250
4251 static void
4252 amdgpu_dm_register_backlight_device(struct amdgpu_dm_connector *aconnector)
4253 {
4254         struct drm_device *drm = aconnector->base.dev;
4255         struct amdgpu_display_manager *dm = &drm_to_adev(drm)->dm;
4256         struct backlight_properties props = { 0 };
4257         char bl_name[16];
4258
4259         if (aconnector->bl_idx == -1)
4260                 return;
4261
4262         if (!acpi_video_backlight_use_native()) {
4263                 drm_info(drm, "Skipping amdgpu DM backlight registration\n");
4264                 /* Try registering an ACPI video backlight device instead. */
4265                 acpi_video_register_backlight();
4266                 return;
4267         }
4268
4269         props.max_brightness = AMDGPU_MAX_BL_LEVEL;
4270         props.brightness = AMDGPU_MAX_BL_LEVEL;
4271         props.type = BACKLIGHT_RAW;
4272
4273         snprintf(bl_name, sizeof(bl_name), "amdgpu_bl%d",
4274                  drm->primary->index + aconnector->bl_idx);
4275
4276         dm->backlight_dev[aconnector->bl_idx] =
4277                 backlight_device_register(bl_name, aconnector->base.kdev, dm,
4278                                           &amdgpu_dm_backlight_ops, &props);
4279
4280         if (IS_ERR(dm->backlight_dev[aconnector->bl_idx])) {
4281                 DRM_ERROR("DM: Backlight registration failed!\n");
4282                 dm->backlight_dev[aconnector->bl_idx] = NULL;
4283         } else
4284                 DRM_DEBUG_DRIVER("DM: Registered Backlight device: %s\n", bl_name);
4285 }
4286
4287 static int initialize_plane(struct amdgpu_display_manager *dm,
4288                             struct amdgpu_mode_info *mode_info, int plane_id,
4289                             enum drm_plane_type plane_type,
4290                             const struct dc_plane_cap *plane_cap)
4291 {
4292         struct drm_plane *plane;
4293         unsigned long possible_crtcs;
4294         int ret = 0;
4295
4296         plane = kzalloc(sizeof(struct drm_plane), GFP_KERNEL);
4297         if (!plane) {
4298                 DRM_ERROR("KMS: Failed to allocate plane\n");
4299                 return -ENOMEM;
4300         }
4301         plane->type = plane_type;
4302
4303         /*
4304          * HACK: IGT tests expect that the primary plane for a CRTC
4305          * can only have one possible CRTC. Only expose support for
4306          * any CRTC if they're not going to be used as a primary plane
4307          * for a CRTC - like overlay or underlay planes.
4308          */
4309         possible_crtcs = 1 << plane_id;
4310         if (plane_id >= dm->dc->caps.max_streams)
4311                 possible_crtcs = 0xff;
4312
4313         ret = amdgpu_dm_plane_init(dm, plane, possible_crtcs, plane_cap);
4314
4315         if (ret) {
4316                 DRM_ERROR("KMS: Failed to initialize plane\n");
4317                 kfree(plane);
4318                 return ret;
4319         }
4320
4321         if (mode_info)
4322                 mode_info->planes[plane_id] = plane;
4323
4324         return ret;
4325 }
4326
4327
4328 static void setup_backlight_device(struct amdgpu_display_manager *dm,
4329                                    struct amdgpu_dm_connector *aconnector)
4330 {
4331         struct dc_link *link = aconnector->dc_link;
4332         int bl_idx = dm->num_of_edps;
4333
4334         if (!(link->connector_signal & (SIGNAL_TYPE_EDP | SIGNAL_TYPE_LVDS)) ||
4335             link->type == dc_connection_none)
4336                 return;
4337
4338         if (dm->num_of_edps >= AMDGPU_DM_MAX_NUM_EDP) {
4339                 drm_warn(adev_to_drm(dm->adev), "Too much eDP connections, skipping backlight setup for additional eDPs\n");
4340                 return;
4341         }
4342
4343         aconnector->bl_idx = bl_idx;
4344
4345         amdgpu_dm_update_backlight_caps(dm, bl_idx);
4346         dm->brightness[bl_idx] = AMDGPU_MAX_BL_LEVEL;
4347         dm->backlight_link[bl_idx] = link;
4348         dm->num_of_edps++;
4349
4350         update_connector_ext_caps(aconnector);
4351 }
4352
4353 static void amdgpu_set_panel_orientation(struct drm_connector *connector);
4354
4355 /*
4356  * In this architecture, the association
4357  * connector -> encoder -> crtc
4358  * id not really requried. The crtc and connector will hold the
4359  * display_index as an abstraction to use with DAL component
4360  *
4361  * Returns 0 on success
4362  */
4363 static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev)
4364 {
4365         struct amdgpu_display_manager *dm = &adev->dm;
4366         s32 i;
4367         struct amdgpu_dm_connector *aconnector = NULL;
4368         struct amdgpu_encoder *aencoder = NULL;
4369         struct amdgpu_mode_info *mode_info = &adev->mode_info;
4370         u32 link_cnt;
4371         s32 primary_planes;
4372         enum dc_connection_type new_connection_type = dc_connection_none;
4373         const struct dc_plane_cap *plane;
4374         bool psr_feature_enabled = false;
4375         bool replay_feature_enabled = false;
4376         int max_overlay = dm->dc->caps.max_slave_planes;
4377
4378         dm->display_indexes_num = dm->dc->caps.max_streams;
4379         /* Update the actual used number of crtc */
4380         adev->mode_info.num_crtc = adev->dm.display_indexes_num;
4381
4382         amdgpu_dm_set_irq_funcs(adev);
4383
4384         link_cnt = dm->dc->caps.max_links;
4385         if (amdgpu_dm_mode_config_init(dm->adev)) {
4386                 DRM_ERROR("DM: Failed to initialize mode config\n");
4387                 return -EINVAL;
4388         }
4389
4390         /* There is one primary plane per CRTC */
4391         primary_planes = dm->dc->caps.max_streams;
4392         ASSERT(primary_planes <= AMDGPU_MAX_PLANES);
4393
4394         /*
4395          * Initialize primary planes, implicit planes for legacy IOCTLS.
4396          * Order is reversed to match iteration order in atomic check.
4397          */
4398         for (i = (primary_planes - 1); i >= 0; i--) {
4399                 plane = &dm->dc->caps.planes[i];
4400
4401                 if (initialize_plane(dm, mode_info, i,
4402                                      DRM_PLANE_TYPE_PRIMARY, plane)) {
4403                         DRM_ERROR("KMS: Failed to initialize primary plane\n");
4404                         goto fail;
4405                 }
4406         }
4407
4408         /*
4409          * Initialize overlay planes, index starting after primary planes.
4410          * These planes have a higher DRM index than the primary planes since
4411          * they should be considered as having a higher z-order.
4412          * Order is reversed to match iteration order in atomic check.
4413          *
4414          * Only support DCN for now, and only expose one so we don't encourage
4415          * userspace to use up all the pipes.
4416          */
4417         for (i = 0; i < dm->dc->caps.max_planes; ++i) {
4418                 struct dc_plane_cap *plane = &dm->dc->caps.planes[i];
4419
4420                 /* Do not create overlay if MPO disabled */
4421                 if (amdgpu_dc_debug_mask & DC_DISABLE_MPO)
4422                         break;
4423
4424                 if (plane->type != DC_PLANE_TYPE_DCN_UNIVERSAL)
4425                         continue;
4426
4427                 if (!plane->pixel_format_support.argb8888)
4428                         continue;
4429
4430                 if (max_overlay-- == 0)
4431                         break;
4432
4433                 if (initialize_plane(dm, NULL, primary_planes + i,
4434                                      DRM_PLANE_TYPE_OVERLAY, plane)) {
4435                         DRM_ERROR("KMS: Failed to initialize overlay plane\n");
4436                         goto fail;
4437                 }
4438         }
4439
4440         for (i = 0; i < dm->dc->caps.max_streams; i++)
4441                 if (amdgpu_dm_crtc_init(dm, mode_info->planes[i], i)) {
4442                         DRM_ERROR("KMS: Failed to initialize crtc\n");
4443                         goto fail;
4444                 }
4445
4446         /* Use Outbox interrupt */
4447         switch (amdgpu_ip_version(adev, DCE_HWIP, 0)) {
4448         case IP_VERSION(3, 0, 0):
4449         case IP_VERSION(3, 1, 2):
4450         case IP_VERSION(3, 1, 3):
4451         case IP_VERSION(3, 1, 4):
4452         case IP_VERSION(3, 1, 5):
4453         case IP_VERSION(3, 1, 6):
4454         case IP_VERSION(3, 2, 0):
4455         case IP_VERSION(3, 2, 1):
4456         case IP_VERSION(2, 1, 0):
4457         case IP_VERSION(3, 5, 0):
4458                 if (register_outbox_irq_handlers(dm->adev)) {
4459                         DRM_ERROR("DM: Failed to initialize IRQ\n");
4460                         goto fail;
4461                 }
4462                 break;
4463         default:
4464                 DRM_DEBUG_KMS("Unsupported DCN IP version for outbox: 0x%X\n",
4465                               amdgpu_ip_version(adev, DCE_HWIP, 0));
4466         }
4467
4468         /* Determine whether to enable PSR support by default. */
4469         if (!(amdgpu_dc_debug_mask & DC_DISABLE_PSR)) {
4470                 switch (amdgpu_ip_version(adev, DCE_HWIP, 0)) {
4471                 case IP_VERSION(3, 1, 2):
4472                 case IP_VERSION(3, 1, 3):
4473                 case IP_VERSION(3, 1, 4):
4474                 case IP_VERSION(3, 1, 5):
4475                 case IP_VERSION(3, 1, 6):
4476                 case IP_VERSION(3, 2, 0):
4477                 case IP_VERSION(3, 2, 1):
4478                 case IP_VERSION(3, 5, 0):
4479                         psr_feature_enabled = true;
4480                         break;
4481                 default:
4482                         psr_feature_enabled = amdgpu_dc_feature_mask & DC_PSR_MASK;
4483                         break;
4484                 }
4485         }
4486
4487         if (!(amdgpu_dc_debug_mask & DC_DISABLE_REPLAY)) {
4488                 switch (adev->ip_versions[DCE_HWIP][0]) {
4489                 case IP_VERSION(3, 1, 4):
4490                 case IP_VERSION(3, 1, 5):
4491                 case IP_VERSION(3, 1, 6):
4492                 case IP_VERSION(3, 2, 0):
4493                 case IP_VERSION(3, 2, 1):
4494                         replay_feature_enabled = true;
4495                         break;
4496                 default:
4497                         replay_feature_enabled = amdgpu_dc_feature_mask & DC_REPLAY_MASK;
4498                         break;
4499                 }
4500         }
4501         /* loops over all connectors on the board */
4502         for (i = 0; i < link_cnt; i++) {
4503                 struct dc_link *link = NULL;
4504
4505                 if (i > AMDGPU_DM_MAX_DISPLAY_INDEX) {
4506                         DRM_ERROR(
4507                                 "KMS: Cannot support more than %d display indexes\n",
4508                                         AMDGPU_DM_MAX_DISPLAY_INDEX);
4509                         continue;
4510                 }
4511
4512                 link = dc_get_link_at_index(dm->dc, i);
4513
4514                 if (link->connector_signal == SIGNAL_TYPE_VIRTUAL) {
4515                         struct amdgpu_dm_wb_connector *wbcon = kzalloc(sizeof(*wbcon), GFP_KERNEL);
4516
4517                         if (!wbcon) {
4518                                 DRM_ERROR("KMS: Failed to allocate writeback connector\n");
4519                                 continue;
4520                         }
4521
4522                         if (amdgpu_dm_wb_connector_init(dm, wbcon, i)) {
4523                                 DRM_ERROR("KMS: Failed to initialize writeback connector\n");
4524                                 kfree(wbcon);
4525                                 continue;
4526                         }
4527
4528                         link->psr_settings.psr_feature_enabled = false;
4529                         link->psr_settings.psr_version = DC_PSR_VERSION_UNSUPPORTED;
4530
4531                         continue;
4532                 }
4533
4534                 aconnector = kzalloc(sizeof(*aconnector), GFP_KERNEL);
4535                 if (!aconnector)
4536                         goto fail;
4537
4538                 aencoder = kzalloc(sizeof(*aencoder), GFP_KERNEL);
4539                 if (!aencoder)
4540                         goto fail;
4541
4542                 if (amdgpu_dm_encoder_init(dm->ddev, aencoder, i)) {
4543                         DRM_ERROR("KMS: Failed to initialize encoder\n");
4544                         goto fail;
4545                 }
4546
4547                 if (amdgpu_dm_connector_init(dm, aconnector, i, aencoder)) {
4548                         DRM_ERROR("KMS: Failed to initialize connector\n");
4549                         goto fail;
4550                 }
4551
4552                 if (!dc_link_detect_connection_type(link, &new_connection_type))
4553                         DRM_ERROR("KMS: Failed to detect connector\n");
4554
4555                 if (aconnector->base.force && new_connection_type == dc_connection_none) {
4556                         emulated_link_detect(link);
4557                         amdgpu_dm_update_connector_after_detect(aconnector);
4558                 } else {
4559                         bool ret = false;
4560
4561                         mutex_lock(&dm->dc_lock);
4562                         ret = dc_link_detect(link, DETECT_REASON_BOOT);
4563                         mutex_unlock(&dm->dc_lock);
4564
4565                         if (ret) {
4566                                 amdgpu_dm_update_connector_after_detect(aconnector);
4567                                 setup_backlight_device(dm, aconnector);
4568
4569                                 /*
4570                                  * Disable psr if replay can be enabled
4571                                  */
4572                                 if (replay_feature_enabled && amdgpu_dm_setup_replay(link, aconnector))
4573                                         psr_feature_enabled = false;
4574
4575                                 if (psr_feature_enabled)
4576                                         amdgpu_dm_set_psr_caps(link);
4577
4578                                 /* TODO: Fix vblank control helpers to delay PSR entry to allow this when
4579                                  * PSR is also supported.
4580                                  */
4581                                 if (link->psr_settings.psr_feature_enabled)
4582                                         adev_to_drm(adev)->vblank_disable_immediate = false;
4583                         }
4584                 }
4585                 amdgpu_set_panel_orientation(&aconnector->base);
4586         }
4587
4588         /* Software is initialized. Now we can register interrupt handlers. */
4589         switch (adev->asic_type) {
4590 #if defined(CONFIG_DRM_AMD_DC_SI)
4591         case CHIP_TAHITI:
4592         case CHIP_PITCAIRN:
4593         case CHIP_VERDE:
4594         case CHIP_OLAND:
4595                 if (dce60_register_irq_handlers(dm->adev)) {
4596                         DRM_ERROR("DM: Failed to initialize IRQ\n");
4597                         goto fail;
4598                 }
4599                 break;
4600 #endif
4601         case CHIP_BONAIRE:
4602         case CHIP_HAWAII:
4603         case CHIP_KAVERI:
4604         case CHIP_KABINI:
4605         case CHIP_MULLINS:
4606         case CHIP_TONGA:
4607         case CHIP_FIJI:
4608         case CHIP_CARRIZO:
4609         case CHIP_STONEY:
4610         case CHIP_POLARIS11:
4611         case CHIP_POLARIS10:
4612         case CHIP_POLARIS12:
4613         case CHIP_VEGAM:
4614         case CHIP_VEGA10:
4615         case CHIP_VEGA12:
4616         case CHIP_VEGA20:
4617                 if (dce110_register_irq_handlers(dm->adev)) {
4618                         DRM_ERROR("DM: Failed to initialize IRQ\n");
4619                         goto fail;
4620                 }
4621                 break;
4622         default:
4623                 switch (amdgpu_ip_version(adev, DCE_HWIP, 0)) {
4624                 case IP_VERSION(1, 0, 0):
4625                 case IP_VERSION(1, 0, 1):
4626                 case IP_VERSION(2, 0, 2):
4627                 case IP_VERSION(2, 0, 3):
4628                 case IP_VERSION(2, 0, 0):
4629                 case IP_VERSION(2, 1, 0):
4630                 case IP_VERSION(3, 0, 0):
4631                 case IP_VERSION(3, 0, 2):
4632                 case IP_VERSION(3, 0, 3):
4633                 case IP_VERSION(3, 0, 1):
4634                 case IP_VERSION(3, 1, 2):
4635                 case IP_VERSION(3, 1, 3):
4636                 case IP_VERSION(3, 1, 4):
4637                 case IP_VERSION(3, 1, 5):
4638                 case IP_VERSION(3, 1, 6):
4639                 case IP_VERSION(3, 2, 0):
4640                 case IP_VERSION(3, 2, 1):
4641                 case IP_VERSION(3, 5, 0):
4642                         if (dcn10_register_irq_handlers(dm->adev)) {
4643                                 DRM_ERROR("DM: Failed to initialize IRQ\n");
4644                                 goto fail;
4645                         }
4646                         break;
4647                 default:
4648                         DRM_ERROR("Unsupported DCE IP versions: 0x%X\n",
4649                                         amdgpu_ip_version(adev, DCE_HWIP, 0));
4650                         goto fail;
4651                 }
4652                 break;
4653         }
4654
4655         return 0;
4656 fail:
4657         kfree(aencoder);
4658         kfree(aconnector);
4659
4660         return -EINVAL;
4661 }
4662
4663 static void amdgpu_dm_destroy_drm_device(struct amdgpu_display_manager *dm)
4664 {
4665         drm_atomic_private_obj_fini(&dm->atomic_obj);
4666 }
4667
4668 /******************************************************************************
4669  * amdgpu_display_funcs functions
4670  *****************************************************************************/
4671
4672 /*
4673  * dm_bandwidth_update - program display watermarks
4674  *
4675  * @adev: amdgpu_device pointer
4676  *
4677  * Calculate and program the display watermarks and line buffer allocation.
4678  */
4679 static void dm_bandwidth_update(struct amdgpu_device *adev)
4680 {
4681         /* TODO: implement later */
4682 }
4683
4684 static const struct amdgpu_display_funcs dm_display_funcs = {
4685         .bandwidth_update = dm_bandwidth_update, /* called unconditionally */
4686         .vblank_get_counter = dm_vblank_get_counter,/* called unconditionally */
4687         .backlight_set_level = NULL, /* never called for DC */
4688         .backlight_get_level = NULL, /* never called for DC */
4689         .hpd_sense = NULL,/* called unconditionally */
4690         .hpd_set_polarity = NULL, /* called unconditionally */
4691         .hpd_get_gpio_reg = NULL, /* VBIOS parsing. DAL does it. */
4692         .page_flip_get_scanoutpos =
4693                 dm_crtc_get_scanoutpos,/* called unconditionally */
4694         .add_encoder = NULL, /* VBIOS parsing. DAL does it. */
4695         .add_connector = NULL, /* VBIOS parsing. DAL does it. */
4696 };
4697
4698 #if defined(CONFIG_DEBUG_KERNEL_DC)
4699
4700 static ssize_t s3_debug_store(struct device *device,
4701                               struct device_attribute *attr,
4702                               const char *buf,
4703                               size_t count)
4704 {
4705         int ret;
4706         int s3_state;
4707         struct drm_device *drm_dev = dev_get_drvdata(device);
4708         struct amdgpu_device *adev = drm_to_adev(drm_dev);
4709
4710         ret = kstrtoint(buf, 0, &s3_state);
4711
4712         if (ret == 0) {
4713                 if (s3_state) {
4714                         dm_resume(adev);
4715                         drm_kms_helper_hotplug_event(adev_to_drm(adev));
4716                 } else
4717                         dm_suspend(adev);
4718         }
4719
4720         return ret == 0 ? count : 0;
4721 }
4722
4723 DEVICE_ATTR_WO(s3_debug);
4724
4725 #endif
4726
4727 static int dm_init_microcode(struct amdgpu_device *adev)
4728 {
4729         char *fw_name_dmub;
4730         int r;
4731
4732         switch (amdgpu_ip_version(adev, DCE_HWIP, 0)) {
4733         case IP_VERSION(2, 1, 0):
4734                 fw_name_dmub = FIRMWARE_RENOIR_DMUB;
4735                 if (ASICREV_IS_GREEN_SARDINE(adev->external_rev_id))
4736                         fw_name_dmub = FIRMWARE_GREEN_SARDINE_DMUB;
4737                 break;
4738         case IP_VERSION(3, 0, 0):
4739                 if (amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(10, 3, 0))
4740                         fw_name_dmub = FIRMWARE_SIENNA_CICHLID_DMUB;
4741                 else
4742                         fw_name_dmub = FIRMWARE_NAVY_FLOUNDER_DMUB;
4743                 break;
4744         case IP_VERSION(3, 0, 1):
4745                 fw_name_dmub = FIRMWARE_VANGOGH_DMUB;
4746                 break;
4747         case IP_VERSION(3, 0, 2):
4748                 fw_name_dmub = FIRMWARE_DIMGREY_CAVEFISH_DMUB;
4749                 break;
4750         case IP_VERSION(3, 0, 3):
4751                 fw_name_dmub = FIRMWARE_BEIGE_GOBY_DMUB;
4752                 break;
4753         case IP_VERSION(3, 1, 2):
4754         case IP_VERSION(3, 1, 3):
4755                 fw_name_dmub = FIRMWARE_YELLOW_CARP_DMUB;
4756                 break;
4757         case IP_VERSION(3, 1, 4):
4758                 fw_name_dmub = FIRMWARE_DCN_314_DMUB;
4759                 break;
4760         case IP_VERSION(3, 1, 5):
4761                 fw_name_dmub = FIRMWARE_DCN_315_DMUB;
4762                 break;
4763         case IP_VERSION(3, 1, 6):
4764                 fw_name_dmub = FIRMWARE_DCN316_DMUB;
4765                 break;
4766         case IP_VERSION(3, 2, 0):
4767                 fw_name_dmub = FIRMWARE_DCN_V3_2_0_DMCUB;
4768                 break;
4769         case IP_VERSION(3, 2, 1):
4770                 fw_name_dmub = FIRMWARE_DCN_V3_2_1_DMCUB;
4771                 break;
4772         case IP_VERSION(3, 5, 0):
4773                 fw_name_dmub = FIRMWARE_DCN_35_DMUB;
4774                 break;
4775         default:
4776                 /* ASIC doesn't support DMUB. */
4777                 return 0;
4778         }
4779         r = amdgpu_ucode_request(adev, &adev->dm.dmub_fw, fw_name_dmub);
4780         return r;
4781 }
4782
4783 static int dm_early_init(void *handle)
4784 {
4785         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
4786         struct amdgpu_mode_info *mode_info = &adev->mode_info;
4787         struct atom_context *ctx = mode_info->atom_context;
4788         int index = GetIndexIntoMasterTable(DATA, Object_Header);
4789         u16 data_offset;
4790
4791         /* if there is no object header, skip DM */
4792         if (!amdgpu_atom_parse_data_header(ctx, index, NULL, NULL, NULL, &data_offset)) {
4793                 adev->harvest_ip_mask |= AMD_HARVEST_IP_DMU_MASK;
4794                 dev_info(adev->dev, "No object header, skipping DM\n");
4795                 return -ENOENT;
4796         }
4797
4798         switch (adev->asic_type) {
4799 #if defined(CONFIG_DRM_AMD_DC_SI)
4800         case CHIP_TAHITI:
4801         case CHIP_PITCAIRN:
4802         case CHIP_VERDE:
4803                 adev->mode_info.num_crtc = 6;
4804                 adev->mode_info.num_hpd = 6;
4805                 adev->mode_info.num_dig = 6;
4806                 break;
4807         case CHIP_OLAND:
4808                 adev->mode_info.num_crtc = 2;
4809                 adev->mode_info.num_hpd = 2;
4810                 adev->mode_info.num_dig = 2;
4811                 break;
4812 #endif
4813         case CHIP_BONAIRE:
4814         case CHIP_HAWAII:
4815                 adev->mode_info.num_crtc = 6;
4816                 adev->mode_info.num_hpd = 6;
4817                 adev->mode_info.num_dig = 6;
4818                 break;
4819         case CHIP_KAVERI:
4820                 adev->mode_info.num_crtc = 4;
4821                 adev->mode_info.num_hpd = 6;
4822                 adev->mode_info.num_dig = 7;
4823                 break;
4824         case CHIP_KABINI:
4825         case CHIP_MULLINS:
4826                 adev->mode_info.num_crtc = 2;
4827                 adev->mode_info.num_hpd = 6;
4828                 adev->mode_info.num_dig = 6;
4829                 break;
4830         case CHIP_FIJI:
4831         case CHIP_TONGA:
4832                 adev->mode_info.num_crtc = 6;
4833                 adev->mode_info.num_hpd = 6;
4834                 adev->mode_info.num_dig = 7;
4835                 break;
4836         case CHIP_CARRIZO:
4837                 adev->mode_info.num_crtc = 3;
4838                 adev->mode_info.num_hpd = 6;
4839                 adev->mode_info.num_dig = 9;
4840                 break;
4841         case CHIP_STONEY:
4842                 adev->mode_info.num_crtc = 2;
4843                 adev->mode_info.num_hpd = 6;
4844                 adev->mode_info.num_dig = 9;
4845                 break;
4846         case CHIP_POLARIS11:
4847         case CHIP_POLARIS12:
4848                 adev->mode_info.num_crtc = 5;
4849                 adev->mode_info.num_hpd = 5;
4850                 adev->mode_info.num_dig = 5;
4851                 break;
4852         case CHIP_POLARIS10:
4853         case CHIP_VEGAM:
4854                 adev->mode_info.num_crtc = 6;
4855                 adev->mode_info.num_hpd = 6;
4856                 adev->mode_info.num_dig = 6;
4857                 break;
4858         case CHIP_VEGA10:
4859         case CHIP_VEGA12:
4860         case CHIP_VEGA20:
4861                 adev->mode_info.num_crtc = 6;
4862                 adev->mode_info.num_hpd = 6;
4863                 adev->mode_info.num_dig = 6;
4864                 break;
4865         default:
4866
4867                 switch (amdgpu_ip_version(adev, DCE_HWIP, 0)) {
4868                 case IP_VERSION(2, 0, 2):
4869                 case IP_VERSION(3, 0, 0):
4870                         adev->mode_info.num_crtc = 6;
4871                         adev->mode_info.num_hpd = 6;
4872                         adev->mode_info.num_dig = 6;
4873                         break;
4874                 case IP_VERSION(2, 0, 0):
4875                 case IP_VERSION(3, 0, 2):
4876                         adev->mode_info.num_crtc = 5;
4877                         adev->mode_info.num_hpd = 5;
4878                         adev->mode_info.num_dig = 5;
4879                         break;
4880                 case IP_VERSION(2, 0, 3):
4881                 case IP_VERSION(3, 0, 3):
4882                         adev->mode_info.num_crtc = 2;
4883                         adev->mode_info.num_hpd = 2;
4884                         adev->mode_info.num_dig = 2;
4885                         break;
4886                 case IP_VERSION(1, 0, 0):
4887                 case IP_VERSION(1, 0, 1):
4888                 case IP_VERSION(3, 0, 1):
4889                 case IP_VERSION(2, 1, 0):
4890                 case IP_VERSION(3, 1, 2):
4891                 case IP_VERSION(3, 1, 3):
4892                 case IP_VERSION(3, 1, 4):
4893                 case IP_VERSION(3, 1, 5):
4894                 case IP_VERSION(3, 1, 6):
4895                 case IP_VERSION(3, 2, 0):
4896                 case IP_VERSION(3, 2, 1):
4897                 case IP_VERSION(3, 5, 0):
4898                         adev->mode_info.num_crtc = 4;
4899                         adev->mode_info.num_hpd = 4;
4900                         adev->mode_info.num_dig = 4;
4901                         break;
4902                 default:
4903                         DRM_ERROR("Unsupported DCE IP versions: 0x%x\n",
4904                                         amdgpu_ip_version(adev, DCE_HWIP, 0));
4905                         return -EINVAL;
4906                 }
4907                 break;
4908         }
4909
4910         if (adev->mode_info.funcs == NULL)
4911                 adev->mode_info.funcs = &dm_display_funcs;
4912
4913         /*
4914          * Note: Do NOT change adev->audio_endpt_rreg and
4915          * adev->audio_endpt_wreg because they are initialised in
4916          * amdgpu_device_init()
4917          */
4918 #if defined(CONFIG_DEBUG_KERNEL_DC)
4919         device_create_file(
4920                 adev_to_drm(adev)->dev,
4921                 &dev_attr_s3_debug);
4922 #endif
4923         adev->dc_enabled = true;
4924
4925         return dm_init_microcode(adev);
4926 }
4927
4928 static bool modereset_required(struct drm_crtc_state *crtc_state)
4929 {
4930         return !crtc_state->active && drm_atomic_crtc_needs_modeset(crtc_state);
4931 }
4932
4933 static void amdgpu_dm_encoder_destroy(struct drm_encoder *encoder)
4934 {
4935         drm_encoder_cleanup(encoder);
4936         kfree(encoder);
4937 }
4938
4939 static const struct drm_encoder_funcs amdgpu_dm_encoder_funcs = {
4940         .destroy = amdgpu_dm_encoder_destroy,
4941 };
4942
4943 static int
4944 fill_plane_color_attributes(const struct drm_plane_state *plane_state,
4945                             const enum surface_pixel_format format,
4946                             enum dc_color_space *color_space)
4947 {
4948         bool full_range;
4949
4950         *color_space = COLOR_SPACE_SRGB;
4951
4952         /* DRM color properties only affect non-RGB formats. */
4953         if (format < SURFACE_PIXEL_FORMAT_VIDEO_BEGIN)
4954                 return 0;
4955
4956         full_range = (plane_state->color_range == DRM_COLOR_YCBCR_FULL_RANGE);
4957
4958         switch (plane_state->color_encoding) {
4959         case DRM_COLOR_YCBCR_BT601:
4960                 if (full_range)
4961                         *color_space = COLOR_SPACE_YCBCR601;
4962                 else
4963                         *color_space = COLOR_SPACE_YCBCR601_LIMITED;
4964                 break;
4965
4966         case DRM_COLOR_YCBCR_BT709:
4967                 if (full_range)
4968                         *color_space = COLOR_SPACE_YCBCR709;
4969                 else
4970                         *color_space = COLOR_SPACE_YCBCR709_LIMITED;
4971                 break;
4972
4973         case DRM_COLOR_YCBCR_BT2020:
4974                 if (full_range)
4975                         *color_space = COLOR_SPACE_2020_YCBCR;
4976                 else
4977                         return -EINVAL;
4978                 break;
4979
4980         default:
4981                 return -EINVAL;
4982         }
4983
4984         return 0;
4985 }
4986
4987 static int
4988 fill_dc_plane_info_and_addr(struct amdgpu_device *adev,
4989                             const struct drm_plane_state *plane_state,
4990                             const u64 tiling_flags,
4991                             struct dc_plane_info *plane_info,
4992                             struct dc_plane_address *address,
4993                             bool tmz_surface,
4994                             bool force_disable_dcc)
4995 {
4996         const struct drm_framebuffer *fb = plane_state->fb;
4997         const struct amdgpu_framebuffer *afb =
4998                 to_amdgpu_framebuffer(plane_state->fb);
4999         int ret;
5000
5001         memset(plane_info, 0, sizeof(*plane_info));
5002
5003         switch (fb->format->format) {
5004         case DRM_FORMAT_C8:
5005                 plane_info->format =
5006                         SURFACE_PIXEL_FORMAT_GRPH_PALETA_256_COLORS;
5007                 break;
5008         case DRM_FORMAT_RGB565:
5009                 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_RGB565;
5010                 break;
5011         case DRM_FORMAT_XRGB8888:
5012         case DRM_FORMAT_ARGB8888:
5013                 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ARGB8888;
5014                 break;
5015         case DRM_FORMAT_XRGB2101010:
5016         case DRM_FORMAT_ARGB2101010:
5017                 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ARGB2101010;
5018                 break;
5019         case DRM_FORMAT_XBGR2101010:
5020         case DRM_FORMAT_ABGR2101010:
5021                 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ABGR2101010;
5022                 break;
5023         case DRM_FORMAT_XBGR8888:
5024         case DRM_FORMAT_ABGR8888:
5025                 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ABGR8888;
5026                 break;
5027         case DRM_FORMAT_NV21:
5028                 plane_info->format = SURFACE_PIXEL_FORMAT_VIDEO_420_YCbCr;
5029                 break;
5030         case DRM_FORMAT_NV12:
5031                 plane_info->format = SURFACE_PIXEL_FORMAT_VIDEO_420_YCrCb;
5032                 break;
5033         case DRM_FORMAT_P010:
5034                 plane_info->format = SURFACE_PIXEL_FORMAT_VIDEO_420_10bpc_YCrCb;
5035                 break;
5036         case DRM_FORMAT_XRGB16161616F:
5037         case DRM_FORMAT_ARGB16161616F:
5038                 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ARGB16161616F;
5039                 break;
5040         case DRM_FORMAT_XBGR16161616F:
5041         case DRM_FORMAT_ABGR16161616F:
5042                 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ABGR16161616F;
5043                 break;
5044         case DRM_FORMAT_XRGB16161616:
5045         case DRM_FORMAT_ARGB16161616:
5046                 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ARGB16161616;
5047                 break;
5048         case DRM_FORMAT_XBGR16161616:
5049         case DRM_FORMAT_ABGR16161616:
5050                 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ABGR16161616;
5051                 break;
5052         default:
5053                 DRM_ERROR(
5054                         "Unsupported screen format %p4cc\n",
5055                         &fb->format->format);
5056                 return -EINVAL;
5057         }
5058
5059         switch (plane_state->rotation & DRM_MODE_ROTATE_MASK) {
5060         case DRM_MODE_ROTATE_0:
5061                 plane_info->rotation = ROTATION_ANGLE_0;
5062                 break;
5063         case DRM_MODE_ROTATE_90:
5064                 plane_info->rotation = ROTATION_ANGLE_90;
5065                 break;
5066         case DRM_MODE_ROTATE_180:
5067                 plane_info->rotation = ROTATION_ANGLE_180;
5068                 break;
5069         case DRM_MODE_ROTATE_270:
5070                 plane_info->rotation = ROTATION_ANGLE_270;
5071                 break;
5072         default:
5073                 plane_info->rotation = ROTATION_ANGLE_0;
5074                 break;
5075         }
5076
5077
5078         plane_info->visible = true;
5079         plane_info->stereo_format = PLANE_STEREO_FORMAT_NONE;
5080
5081         plane_info->layer_index = plane_state->normalized_zpos;
5082
5083         ret = fill_plane_color_attributes(plane_state, plane_info->format,
5084                                           &plane_info->color_space);
5085         if (ret)
5086                 return ret;
5087
5088         ret = amdgpu_dm_plane_fill_plane_buffer_attributes(adev, afb, plane_info->format,
5089                                            plane_info->rotation, tiling_flags,
5090                                            &plane_info->tiling_info,
5091                                            &plane_info->plane_size,
5092                                            &plane_info->dcc, address,
5093                                            tmz_surface, force_disable_dcc);
5094         if (ret)
5095                 return ret;
5096
5097         amdgpu_dm_plane_fill_blending_from_plane_state(
5098                 plane_state, &plane_info->per_pixel_alpha, &plane_info->pre_multiplied_alpha,
5099                 &plane_info->global_alpha, &plane_info->global_alpha_value);
5100
5101         return 0;
5102 }
5103
5104 static int fill_dc_plane_attributes(struct amdgpu_device *adev,
5105                                     struct dc_plane_state *dc_plane_state,
5106                                     struct drm_plane_state *plane_state,
5107                                     struct drm_crtc_state *crtc_state)
5108 {
5109         struct dm_crtc_state *dm_crtc_state = to_dm_crtc_state(crtc_state);
5110         struct amdgpu_framebuffer *afb = (struct amdgpu_framebuffer *)plane_state->fb;
5111         struct dc_scaling_info scaling_info;
5112         struct dc_plane_info plane_info;
5113         int ret;
5114         bool force_disable_dcc = false;
5115
5116         ret = amdgpu_dm_plane_fill_dc_scaling_info(adev, plane_state, &scaling_info);
5117         if (ret)
5118                 return ret;
5119
5120         dc_plane_state->src_rect = scaling_info.src_rect;
5121         dc_plane_state->dst_rect = scaling_info.dst_rect;
5122         dc_plane_state->clip_rect = scaling_info.clip_rect;
5123         dc_plane_state->scaling_quality = scaling_info.scaling_quality;
5124
5125         force_disable_dcc = adev->asic_type == CHIP_RAVEN && adev->in_suspend;
5126         ret = fill_dc_plane_info_and_addr(adev, plane_state,
5127                                           afb->tiling_flags,
5128                                           &plane_info,
5129                                           &dc_plane_state->address,
5130                                           afb->tmz_surface,
5131                                           force_disable_dcc);
5132         if (ret)
5133                 return ret;
5134
5135         dc_plane_state->format = plane_info.format;
5136         dc_plane_state->color_space = plane_info.color_space;
5137         dc_plane_state->format = plane_info.format;
5138         dc_plane_state->plane_size = plane_info.plane_size;
5139         dc_plane_state->rotation = plane_info.rotation;
5140         dc_plane_state->horizontal_mirror = plane_info.horizontal_mirror;
5141         dc_plane_state->stereo_format = plane_info.stereo_format;
5142         dc_plane_state->tiling_info = plane_info.tiling_info;
5143         dc_plane_state->visible = plane_info.visible;
5144         dc_plane_state->per_pixel_alpha = plane_info.per_pixel_alpha;
5145         dc_plane_state->pre_multiplied_alpha = plane_info.pre_multiplied_alpha;
5146         dc_plane_state->global_alpha = plane_info.global_alpha;
5147         dc_plane_state->global_alpha_value = plane_info.global_alpha_value;
5148         dc_plane_state->dcc = plane_info.dcc;
5149         dc_plane_state->layer_index = plane_info.layer_index;
5150         dc_plane_state->flip_int_enabled = true;
5151
5152         /*
5153          * Always set input transfer function, since plane state is refreshed
5154          * every time.
5155          */
5156         ret = amdgpu_dm_update_plane_color_mgmt(dm_crtc_state, dc_plane_state);
5157         if (ret)
5158                 return ret;
5159
5160         return 0;
5161 }
5162
5163 static inline void fill_dc_dirty_rect(struct drm_plane *plane,
5164                                       struct rect *dirty_rect, int32_t x,
5165                                       s32 y, s32 width, s32 height,
5166                                       int *i, bool ffu)
5167 {
5168         WARN_ON(*i >= DC_MAX_DIRTY_RECTS);
5169
5170         dirty_rect->x = x;
5171         dirty_rect->y = y;
5172         dirty_rect->width = width;
5173         dirty_rect->height = height;
5174
5175         if (ffu)
5176                 drm_dbg(plane->dev,
5177                         "[PLANE:%d] PSR FFU dirty rect size (%d, %d)\n",
5178                         plane->base.id, width, height);
5179         else
5180                 drm_dbg(plane->dev,
5181                         "[PLANE:%d] PSR SU dirty rect at (%d, %d) size (%d, %d)",
5182                         plane->base.id, x, y, width, height);
5183
5184         (*i)++;
5185 }
5186
5187 /**
5188  * fill_dc_dirty_rects() - Fill DC dirty regions for PSR selective updates
5189  *
5190  * @plane: DRM plane containing dirty regions that need to be flushed to the eDP
5191  *         remote fb
5192  * @old_plane_state: Old state of @plane
5193  * @new_plane_state: New state of @plane
5194  * @crtc_state: New state of CRTC connected to the @plane
5195  * @flip_addrs: DC flip tracking struct, which also tracts dirty rects
5196  * @dirty_regions_changed: dirty regions changed
5197  *
5198  * For PSR SU, DC informs the DMUB uController of dirty rectangle regions
5199  * (referred to as "damage clips" in DRM nomenclature) that require updating on
5200  * the eDP remote buffer. The responsibility of specifying the dirty regions is
5201  * amdgpu_dm's.
5202  *
5203  * A damage-aware DRM client should fill the FB_DAMAGE_CLIPS property on the
5204  * plane with regions that require flushing to the eDP remote buffer. In
5205  * addition, certain use cases - such as cursor and multi-plane overlay (MPO) -
5206  * implicitly provide damage clips without any client support via the plane
5207  * bounds.
5208  */
5209 static void fill_dc_dirty_rects(struct drm_plane *plane,
5210                                 struct drm_plane_state *old_plane_state,
5211                                 struct drm_plane_state *new_plane_state,
5212                                 struct drm_crtc_state *crtc_state,
5213                                 struct dc_flip_addrs *flip_addrs,
5214                                 bool *dirty_regions_changed)
5215 {
5216         struct dm_crtc_state *dm_crtc_state = to_dm_crtc_state(crtc_state);
5217         struct rect *dirty_rects = flip_addrs->dirty_rects;
5218         u32 num_clips;
5219         struct drm_mode_rect *clips;
5220         bool bb_changed;
5221         bool fb_changed;
5222         u32 i = 0;
5223         *dirty_regions_changed = false;
5224
5225         /*
5226          * Cursor plane has it's own dirty rect update interface. See
5227          * dcn10_dmub_update_cursor_data and dmub_cmd_update_cursor_info_data
5228          */
5229         if (plane->type == DRM_PLANE_TYPE_CURSOR)
5230                 return;
5231
5232         num_clips = drm_plane_get_damage_clips_count(new_plane_state);
5233         clips = drm_plane_get_damage_clips(new_plane_state);
5234
5235         if (!dm_crtc_state->mpo_requested) {
5236                 if (!num_clips || num_clips > DC_MAX_DIRTY_RECTS)
5237                         goto ffu;
5238
5239                 for (; flip_addrs->dirty_rect_count < num_clips; clips++)
5240                         fill_dc_dirty_rect(new_plane_state->plane,
5241                                            &dirty_rects[flip_addrs->dirty_rect_count],
5242                                            clips->x1, clips->y1,
5243                                            clips->x2 - clips->x1, clips->y2 - clips->y1,
5244                                            &flip_addrs->dirty_rect_count,
5245                                            false);
5246                 return;
5247         }
5248
5249         /*
5250          * MPO is requested. Add entire plane bounding box to dirty rects if
5251          * flipped to or damaged.
5252          *
5253          * If plane is moved or resized, also add old bounding box to dirty
5254          * rects.
5255          */
5256         fb_changed = old_plane_state->fb->base.id !=
5257                      new_plane_state->fb->base.id;
5258         bb_changed = (old_plane_state->crtc_x != new_plane_state->crtc_x ||
5259                       old_plane_state->crtc_y != new_plane_state->crtc_y ||
5260                       old_plane_state->crtc_w != new_plane_state->crtc_w ||
5261                       old_plane_state->crtc_h != new_plane_state->crtc_h);
5262
5263         drm_dbg(plane->dev,
5264                 "[PLANE:%d] PSR bb_changed:%d fb_changed:%d num_clips:%d\n",
5265                 new_plane_state->plane->base.id,
5266                 bb_changed, fb_changed, num_clips);
5267
5268         *dirty_regions_changed = bb_changed;
5269
5270         if ((num_clips + (bb_changed ? 2 : 0)) > DC_MAX_DIRTY_RECTS)
5271                 goto ffu;
5272
5273         if (bb_changed) {
5274                 fill_dc_dirty_rect(new_plane_state->plane, &dirty_rects[i],
5275                                    new_plane_state->crtc_x,
5276                                    new_plane_state->crtc_y,
5277                                    new_plane_state->crtc_w,
5278                                    new_plane_state->crtc_h, &i, false);
5279
5280                 /* Add old plane bounding-box if plane is moved or resized */
5281                 fill_dc_dirty_rect(new_plane_state->plane, &dirty_rects[i],
5282                                    old_plane_state->crtc_x,
5283                                    old_plane_state->crtc_y,
5284                                    old_plane_state->crtc_w,
5285                                    old_plane_state->crtc_h, &i, false);
5286         }
5287
5288         if (num_clips) {
5289                 for (; i < num_clips; clips++)
5290                         fill_dc_dirty_rect(new_plane_state->plane,
5291                                            &dirty_rects[i], clips->x1,
5292                                            clips->y1, clips->x2 - clips->x1,
5293                                            clips->y2 - clips->y1, &i, false);
5294         } else if (fb_changed && !bb_changed) {
5295                 fill_dc_dirty_rect(new_plane_state->plane, &dirty_rects[i],
5296                                    new_plane_state->crtc_x,
5297                                    new_plane_state->crtc_y,
5298                                    new_plane_state->crtc_w,
5299                                    new_plane_state->crtc_h, &i, false);
5300         }
5301
5302         flip_addrs->dirty_rect_count = i;
5303         return;
5304
5305 ffu:
5306         fill_dc_dirty_rect(new_plane_state->plane, &dirty_rects[0], 0, 0,
5307                            dm_crtc_state->base.mode.crtc_hdisplay,
5308                            dm_crtc_state->base.mode.crtc_vdisplay,
5309                            &flip_addrs->dirty_rect_count, true);
5310 }
5311
5312 static void update_stream_scaling_settings(const struct drm_display_mode *mode,
5313                                            const struct dm_connector_state *dm_state,
5314                                            struct dc_stream_state *stream)
5315 {
5316         enum amdgpu_rmx_type rmx_type;
5317
5318         struct rect src = { 0 }; /* viewport in composition space*/
5319         struct rect dst = { 0 }; /* stream addressable area */
5320
5321         /* no mode. nothing to be done */
5322         if (!mode)
5323                 return;
5324
5325         /* Full screen scaling by default */
5326         src.width = mode->hdisplay;
5327         src.height = mode->vdisplay;
5328         dst.width = stream->timing.h_addressable;
5329         dst.height = stream->timing.v_addressable;
5330
5331         if (dm_state) {
5332                 rmx_type = dm_state->scaling;
5333                 if (rmx_type == RMX_ASPECT || rmx_type == RMX_OFF) {
5334                         if (src.width * dst.height <
5335                                         src.height * dst.width) {
5336                                 /* height needs less upscaling/more downscaling */
5337                                 dst.width = src.width *
5338                                                 dst.height / src.height;
5339                         } else {
5340                                 /* width needs less upscaling/more downscaling */
5341                                 dst.height = src.height *
5342                                                 dst.width / src.width;
5343                         }
5344                 } else if (rmx_type == RMX_CENTER) {
5345                         dst = src;
5346                 }
5347
5348                 dst.x = (stream->timing.h_addressable - dst.width) / 2;
5349                 dst.y = (stream->timing.v_addressable - dst.height) / 2;
5350
5351                 if (dm_state->underscan_enable) {
5352                         dst.x += dm_state->underscan_hborder / 2;
5353                         dst.y += dm_state->underscan_vborder / 2;
5354                         dst.width -= dm_state->underscan_hborder;
5355                         dst.height -= dm_state->underscan_vborder;
5356                 }
5357         }
5358
5359         stream->src = src;
5360         stream->dst = dst;
5361
5362         DRM_DEBUG_KMS("Destination Rectangle x:%d  y:%d  width:%d  height:%d\n",
5363                       dst.x, dst.y, dst.width, dst.height);
5364
5365 }
5366
5367 static enum dc_color_depth
5368 convert_color_depth_from_display_info(const struct drm_connector *connector,
5369                                       bool is_y420, int requested_bpc)
5370 {
5371         u8 bpc;
5372
5373         if (is_y420) {
5374                 bpc = 8;
5375
5376                 /* Cap display bpc based on HDMI 2.0 HF-VSDB */
5377                 if (connector->display_info.hdmi.y420_dc_modes & DRM_EDID_YCBCR420_DC_48)
5378                         bpc = 16;
5379                 else if (connector->display_info.hdmi.y420_dc_modes & DRM_EDID_YCBCR420_DC_36)
5380                         bpc = 12;
5381                 else if (connector->display_info.hdmi.y420_dc_modes & DRM_EDID_YCBCR420_DC_30)
5382                         bpc = 10;
5383         } else {
5384                 bpc = (uint8_t)connector->display_info.bpc;
5385                 /* Assume 8 bpc by default if no bpc is specified. */
5386                 bpc = bpc ? bpc : 8;
5387         }
5388
5389         if (requested_bpc > 0) {
5390                 /*
5391                  * Cap display bpc based on the user requested value.
5392                  *
5393                  * The value for state->max_bpc may not correctly updated
5394                  * depending on when the connector gets added to the state
5395                  * or if this was called outside of atomic check, so it
5396                  * can't be used directly.
5397                  */
5398                 bpc = min_t(u8, bpc, requested_bpc);
5399
5400                 /* Round down to the nearest even number. */
5401                 bpc = bpc - (bpc & 1);
5402         }
5403
5404         switch (bpc) {
5405         case 0:
5406                 /*
5407                  * Temporary Work around, DRM doesn't parse color depth for
5408                  * EDID revision before 1.4
5409                  * TODO: Fix edid parsing
5410                  */
5411                 return COLOR_DEPTH_888;
5412         case 6:
5413                 return COLOR_DEPTH_666;
5414         case 8:
5415                 return COLOR_DEPTH_888;
5416         case 10:
5417                 return COLOR_DEPTH_101010;
5418         case 12:
5419                 return COLOR_DEPTH_121212;
5420         case 14:
5421                 return COLOR_DEPTH_141414;
5422         case 16:
5423                 return COLOR_DEPTH_161616;
5424         default:
5425                 return COLOR_DEPTH_UNDEFINED;
5426         }
5427 }
5428
5429 static enum dc_aspect_ratio
5430 get_aspect_ratio(const struct drm_display_mode *mode_in)
5431 {
5432         /* 1-1 mapping, since both enums follow the HDMI spec. */
5433         return (enum dc_aspect_ratio) mode_in->picture_aspect_ratio;
5434 }
5435
5436 static enum dc_color_space
5437 get_output_color_space(const struct dc_crtc_timing *dc_crtc_timing,
5438                        const struct drm_connector_state *connector_state)
5439 {
5440         enum dc_color_space color_space = COLOR_SPACE_SRGB;
5441
5442         switch (connector_state->colorspace) {
5443         case DRM_MODE_COLORIMETRY_BT601_YCC:
5444                 if (dc_crtc_timing->flags.Y_ONLY)
5445                         color_space = COLOR_SPACE_YCBCR601_LIMITED;
5446                 else
5447                         color_space = COLOR_SPACE_YCBCR601;
5448                 break;
5449         case DRM_MODE_COLORIMETRY_BT709_YCC:
5450                 if (dc_crtc_timing->flags.Y_ONLY)
5451                         color_space = COLOR_SPACE_YCBCR709_LIMITED;
5452                 else
5453                         color_space = COLOR_SPACE_YCBCR709;
5454                 break;
5455         case DRM_MODE_COLORIMETRY_OPRGB:
5456                 color_space = COLOR_SPACE_ADOBERGB;
5457                 break;
5458         case DRM_MODE_COLORIMETRY_BT2020_RGB:
5459         case DRM_MODE_COLORIMETRY_BT2020_YCC:
5460                 if (dc_crtc_timing->pixel_encoding == PIXEL_ENCODING_RGB)
5461                         color_space = COLOR_SPACE_2020_RGB_FULLRANGE;
5462                 else
5463                         color_space = COLOR_SPACE_2020_YCBCR;
5464                 break;
5465         case DRM_MODE_COLORIMETRY_DEFAULT: // ITU601
5466         default:
5467                 if (dc_crtc_timing->pixel_encoding == PIXEL_ENCODING_RGB) {
5468                         color_space = COLOR_SPACE_SRGB;
5469                 /*
5470                  * 27030khz is the separation point between HDTV and SDTV
5471                  * according to HDMI spec, we use YCbCr709 and YCbCr601
5472                  * respectively
5473                  */
5474                 } else if (dc_crtc_timing->pix_clk_100hz > 270300) {
5475                         if (dc_crtc_timing->flags.Y_ONLY)
5476                                 color_space =
5477                                         COLOR_SPACE_YCBCR709_LIMITED;
5478                         else
5479                                 color_space = COLOR_SPACE_YCBCR709;
5480                 } else {
5481                         if (dc_crtc_timing->flags.Y_ONLY)
5482                                 color_space =
5483                                         COLOR_SPACE_YCBCR601_LIMITED;
5484                         else
5485                                 color_space = COLOR_SPACE_YCBCR601;
5486                 }
5487                 break;
5488         }
5489
5490         return color_space;
5491 }
5492
5493 static enum display_content_type
5494 get_output_content_type(const struct drm_connector_state *connector_state)
5495 {
5496         switch (connector_state->content_type) {
5497         default:
5498         case DRM_MODE_CONTENT_TYPE_NO_DATA:
5499                 return DISPLAY_CONTENT_TYPE_NO_DATA;
5500         case DRM_MODE_CONTENT_TYPE_GRAPHICS:
5501                 return DISPLAY_CONTENT_TYPE_GRAPHICS;
5502         case DRM_MODE_CONTENT_TYPE_PHOTO:
5503                 return DISPLAY_CONTENT_TYPE_PHOTO;
5504         case DRM_MODE_CONTENT_TYPE_CINEMA:
5505                 return DISPLAY_CONTENT_TYPE_CINEMA;
5506         case DRM_MODE_CONTENT_TYPE_GAME:
5507                 return DISPLAY_CONTENT_TYPE_GAME;
5508         }
5509 }
5510
5511 static bool adjust_colour_depth_from_display_info(
5512         struct dc_crtc_timing *timing_out,
5513         const struct drm_display_info *info)
5514 {
5515         enum dc_color_depth depth = timing_out->display_color_depth;
5516         int normalized_clk;
5517
5518         do {
5519                 normalized_clk = timing_out->pix_clk_100hz / 10;
5520                 /* YCbCr 4:2:0 requires additional adjustment of 1/2 */
5521                 if (timing_out->pixel_encoding == PIXEL_ENCODING_YCBCR420)
5522                         normalized_clk /= 2;
5523                 /* Adjusting pix clock following on HDMI spec based on colour depth */
5524                 switch (depth) {
5525                 case COLOR_DEPTH_888:
5526                         break;
5527                 case COLOR_DEPTH_101010:
5528                         normalized_clk = (normalized_clk * 30) / 24;
5529                         break;
5530                 case COLOR_DEPTH_121212:
5531                         normalized_clk = (normalized_clk * 36) / 24;
5532                         break;
5533                 case COLOR_DEPTH_161616:
5534                         normalized_clk = (normalized_clk * 48) / 24;
5535                         break;
5536                 default:
5537                         /* The above depths are the only ones valid for HDMI. */
5538                         return false;
5539                 }
5540                 if (normalized_clk <= info->max_tmds_clock) {
5541                         timing_out->display_color_depth = depth;
5542                         return true;
5543                 }
5544         } while (--depth > COLOR_DEPTH_666);
5545         return false;
5546 }
5547
5548 static void fill_stream_properties_from_drm_display_mode(
5549         struct dc_stream_state *stream,
5550         const struct drm_display_mode *mode_in,
5551         const struct drm_connector *connector,
5552         const struct drm_connector_state *connector_state,
5553         const struct dc_stream_state *old_stream,
5554         int requested_bpc)
5555 {
5556         struct dc_crtc_timing *timing_out = &stream->timing;
5557         const struct drm_display_info *info = &connector->display_info;
5558         struct amdgpu_dm_connector *aconnector = NULL;
5559         struct hdmi_vendor_infoframe hv_frame;
5560         struct hdmi_avi_infoframe avi_frame;
5561
5562         if (connector->connector_type != DRM_MODE_CONNECTOR_WRITEBACK)
5563                 aconnector = to_amdgpu_dm_connector(connector);
5564
5565         memset(&hv_frame, 0, sizeof(hv_frame));
5566         memset(&avi_frame, 0, sizeof(avi_frame));
5567
5568         timing_out->h_border_left = 0;
5569         timing_out->h_border_right = 0;
5570         timing_out->v_border_top = 0;
5571         timing_out->v_border_bottom = 0;
5572         /* TODO: un-hardcode */
5573         if (drm_mode_is_420_only(info, mode_in)
5574                         && stream->signal == SIGNAL_TYPE_HDMI_TYPE_A)
5575                 timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR420;
5576         else if (drm_mode_is_420_also(info, mode_in)
5577                         && aconnector
5578                         && aconnector->force_yuv420_output)
5579                 timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR420;
5580         else if ((connector->display_info.color_formats & DRM_COLOR_FORMAT_YCBCR444)
5581                         && stream->signal == SIGNAL_TYPE_HDMI_TYPE_A)
5582                 timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR444;
5583         else
5584                 timing_out->pixel_encoding = PIXEL_ENCODING_RGB;
5585
5586         timing_out->timing_3d_format = TIMING_3D_FORMAT_NONE;
5587         timing_out->display_color_depth = convert_color_depth_from_display_info(
5588                 connector,
5589                 (timing_out->pixel_encoding == PIXEL_ENCODING_YCBCR420),
5590                 requested_bpc);
5591         timing_out->scan_type = SCANNING_TYPE_NODATA;
5592         timing_out->hdmi_vic = 0;
5593
5594         if (old_stream) {
5595                 timing_out->vic = old_stream->timing.vic;
5596                 timing_out->flags.HSYNC_POSITIVE_POLARITY = old_stream->timing.flags.HSYNC_POSITIVE_POLARITY;
5597                 timing_out->flags.VSYNC_POSITIVE_POLARITY = old_stream->timing.flags.VSYNC_POSITIVE_POLARITY;
5598         } else {
5599                 timing_out->vic = drm_match_cea_mode(mode_in);
5600                 if (mode_in->flags & DRM_MODE_FLAG_PHSYNC)
5601                         timing_out->flags.HSYNC_POSITIVE_POLARITY = 1;
5602                 if (mode_in->flags & DRM_MODE_FLAG_PVSYNC)
5603                         timing_out->flags.VSYNC_POSITIVE_POLARITY = 1;
5604         }
5605
5606         if (stream->signal == SIGNAL_TYPE_HDMI_TYPE_A) {
5607                 drm_hdmi_avi_infoframe_from_display_mode(&avi_frame, (struct drm_connector *)connector, mode_in);
5608                 timing_out->vic = avi_frame.video_code;
5609                 drm_hdmi_vendor_infoframe_from_display_mode(&hv_frame, (struct drm_connector *)connector, mode_in);
5610                 timing_out->hdmi_vic = hv_frame.vic;
5611         }
5612
5613         if (aconnector && is_freesync_video_mode(mode_in, aconnector)) {
5614                 timing_out->h_addressable = mode_in->hdisplay;
5615                 timing_out->h_total = mode_in->htotal;
5616                 timing_out->h_sync_width = mode_in->hsync_end - mode_in->hsync_start;
5617                 timing_out->h_front_porch = mode_in->hsync_start - mode_in->hdisplay;
5618                 timing_out->v_total = mode_in->vtotal;
5619                 timing_out->v_addressable = mode_in->vdisplay;
5620                 timing_out->v_front_porch = mode_in->vsync_start - mode_in->vdisplay;
5621                 timing_out->v_sync_width = mode_in->vsync_end - mode_in->vsync_start;
5622                 timing_out->pix_clk_100hz = mode_in->clock * 10;
5623         } else {
5624                 timing_out->h_addressable = mode_in->crtc_hdisplay;
5625                 timing_out->h_total = mode_in->crtc_htotal;
5626                 timing_out->h_sync_width = mode_in->crtc_hsync_end - mode_in->crtc_hsync_start;
5627                 timing_out->h_front_porch = mode_in->crtc_hsync_start - mode_in->crtc_hdisplay;
5628                 timing_out->v_total = mode_in->crtc_vtotal;
5629                 timing_out->v_addressable = mode_in->crtc_vdisplay;
5630                 timing_out->v_front_porch = mode_in->crtc_vsync_start - mode_in->crtc_vdisplay;
5631                 timing_out->v_sync_width = mode_in->crtc_vsync_end - mode_in->crtc_vsync_start;
5632                 timing_out->pix_clk_100hz = mode_in->crtc_clock * 10;
5633         }
5634
5635         timing_out->aspect_ratio = get_aspect_ratio(mode_in);
5636
5637         stream->out_transfer_func->type = TF_TYPE_PREDEFINED;
5638         stream->out_transfer_func->tf = TRANSFER_FUNCTION_SRGB;
5639         if (stream->signal == SIGNAL_TYPE_HDMI_TYPE_A) {
5640                 if (!adjust_colour_depth_from_display_info(timing_out, info) &&
5641                     drm_mode_is_420_also(info, mode_in) &&
5642                     timing_out->pixel_encoding != PIXEL_ENCODING_YCBCR420) {
5643                         timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR420;
5644                         adjust_colour_depth_from_display_info(timing_out, info);
5645                 }
5646         }
5647
5648         stream->output_color_space = get_output_color_space(timing_out, connector_state);
5649         stream->content_type = get_output_content_type(connector_state);
5650 }
5651
5652 static void fill_audio_info(struct audio_info *audio_info,
5653                             const struct drm_connector *drm_connector,
5654                             const struct dc_sink *dc_sink)
5655 {
5656         int i = 0;
5657         int cea_revision = 0;
5658         const struct dc_edid_caps *edid_caps = &dc_sink->edid_caps;
5659
5660         audio_info->manufacture_id = edid_caps->manufacturer_id;
5661         audio_info->product_id = edid_caps->product_id;
5662
5663         cea_revision = drm_connector->display_info.cea_rev;
5664
5665         strscpy(audio_info->display_name,
5666                 edid_caps->display_name,
5667                 AUDIO_INFO_DISPLAY_NAME_SIZE_IN_CHARS);
5668
5669         if (cea_revision >= 3) {
5670                 audio_info->mode_count = edid_caps->audio_mode_count;
5671
5672                 for (i = 0; i < audio_info->mode_count; ++i) {
5673                         audio_info->modes[i].format_code =
5674                                         (enum audio_format_code)
5675                                         (edid_caps->audio_modes[i].format_code);
5676                         audio_info->modes[i].channel_count =
5677                                         edid_caps->audio_modes[i].channel_count;
5678                         audio_info->modes[i].sample_rates.all =
5679                                         edid_caps->audio_modes[i].sample_rate;
5680                         audio_info->modes[i].sample_size =
5681                                         edid_caps->audio_modes[i].sample_size;
5682                 }
5683         }
5684
5685         audio_info->flags.all = edid_caps->speaker_flags;
5686
5687         /* TODO: We only check for the progressive mode, check for interlace mode too */
5688         if (drm_connector->latency_present[0]) {
5689                 audio_info->video_latency = drm_connector->video_latency[0];
5690                 audio_info->audio_latency = drm_connector->audio_latency[0];
5691         }
5692
5693         /* TODO: For DP, video and audio latency should be calculated from DPCD caps */
5694
5695 }
5696
5697 static void
5698 copy_crtc_timing_for_drm_display_mode(const struct drm_display_mode *src_mode,
5699                                       struct drm_display_mode *dst_mode)
5700 {
5701         dst_mode->crtc_hdisplay = src_mode->crtc_hdisplay;
5702         dst_mode->crtc_vdisplay = src_mode->crtc_vdisplay;
5703         dst_mode->crtc_clock = src_mode->crtc_clock;
5704         dst_mode->crtc_hblank_start = src_mode->crtc_hblank_start;
5705         dst_mode->crtc_hblank_end = src_mode->crtc_hblank_end;
5706         dst_mode->crtc_hsync_start =  src_mode->crtc_hsync_start;
5707         dst_mode->crtc_hsync_end = src_mode->crtc_hsync_end;
5708         dst_mode->crtc_htotal = src_mode->crtc_htotal;
5709         dst_mode->crtc_hskew = src_mode->crtc_hskew;
5710         dst_mode->crtc_vblank_start = src_mode->crtc_vblank_start;
5711         dst_mode->crtc_vblank_end = src_mode->crtc_vblank_end;
5712         dst_mode->crtc_vsync_start = src_mode->crtc_vsync_start;
5713         dst_mode->crtc_vsync_end = src_mode->crtc_vsync_end;
5714         dst_mode->crtc_vtotal = src_mode->crtc_vtotal;
5715 }
5716
5717 static void
5718 decide_crtc_timing_for_drm_display_mode(struct drm_display_mode *drm_mode,
5719                                         const struct drm_display_mode *native_mode,
5720                                         bool scale_enabled)
5721 {
5722         if (scale_enabled) {
5723                 copy_crtc_timing_for_drm_display_mode(native_mode, drm_mode);
5724         } else if (native_mode->clock == drm_mode->clock &&
5725                         native_mode->htotal == drm_mode->htotal &&
5726                         native_mode->vtotal == drm_mode->vtotal) {
5727                 copy_crtc_timing_for_drm_display_mode(native_mode, drm_mode);
5728         } else {
5729                 /* no scaling nor amdgpu inserted, no need to patch */
5730         }
5731 }
5732
5733 static struct dc_sink *
5734 create_fake_sink(struct dc_link *link)
5735 {
5736         struct dc_sink_init_data sink_init_data = { 0 };
5737         struct dc_sink *sink = NULL;
5738
5739         sink_init_data.link = link;
5740         sink_init_data.sink_signal = link->connector_signal;
5741
5742         sink = dc_sink_create(&sink_init_data);
5743         if (!sink) {
5744                 DRM_ERROR("Failed to create sink!\n");
5745                 return NULL;
5746         }
5747         sink->sink_signal = SIGNAL_TYPE_VIRTUAL;
5748
5749         return sink;
5750 }
5751
5752 static void set_multisync_trigger_params(
5753                 struct dc_stream_state *stream)
5754 {
5755         struct dc_stream_state *master = NULL;
5756
5757         if (stream->triggered_crtc_reset.enabled) {
5758                 master = stream->triggered_crtc_reset.event_source;
5759                 stream->triggered_crtc_reset.event =
5760                         master->timing.flags.VSYNC_POSITIVE_POLARITY ?
5761                         CRTC_EVENT_VSYNC_RISING : CRTC_EVENT_VSYNC_FALLING;
5762                 stream->triggered_crtc_reset.delay = TRIGGER_DELAY_NEXT_PIXEL;
5763         }
5764 }
5765
5766 static void set_master_stream(struct dc_stream_state *stream_set[],
5767                               int stream_count)
5768 {
5769         int j, highest_rfr = 0, master_stream = 0;
5770
5771         for (j = 0;  j < stream_count; j++) {
5772                 if (stream_set[j] && stream_set[j]->triggered_crtc_reset.enabled) {
5773                         int refresh_rate = 0;
5774
5775                         refresh_rate = (stream_set[j]->timing.pix_clk_100hz*100)/
5776                                 (stream_set[j]->timing.h_total*stream_set[j]->timing.v_total);
5777                         if (refresh_rate > highest_rfr) {
5778                                 highest_rfr = refresh_rate;
5779                                 master_stream = j;
5780                         }
5781                 }
5782         }
5783         for (j = 0;  j < stream_count; j++) {
5784                 if (stream_set[j])
5785                         stream_set[j]->triggered_crtc_reset.event_source = stream_set[master_stream];
5786         }
5787 }
5788
5789 static void dm_enable_per_frame_crtc_master_sync(struct dc_state *context)
5790 {
5791         int i = 0;
5792         struct dc_stream_state *stream;
5793
5794         if (context->stream_count < 2)
5795                 return;
5796         for (i = 0; i < context->stream_count ; i++) {
5797                 if (!context->streams[i])
5798                         continue;
5799                 /*
5800                  * TODO: add a function to read AMD VSDB bits and set
5801                  * crtc_sync_master.multi_sync_enabled flag
5802                  * For now it's set to false
5803                  */
5804         }
5805
5806         set_master_stream(context->streams, context->stream_count);
5807
5808         for (i = 0; i < context->stream_count ; i++) {
5809                 stream = context->streams[i];
5810
5811                 if (!stream)
5812                         continue;
5813
5814                 set_multisync_trigger_params(stream);
5815         }
5816 }
5817
5818 /**
5819  * DOC: FreeSync Video
5820  *
5821  * When a userspace application wants to play a video, the content follows a
5822  * standard format definition that usually specifies the FPS for that format.
5823  * The below list illustrates some video format and the expected FPS,
5824  * respectively:
5825  *
5826  * - TV/NTSC (23.976 FPS)
5827  * - Cinema (24 FPS)
5828  * - TV/PAL (25 FPS)
5829  * - TV/NTSC (29.97 FPS)
5830  * - TV/NTSC (30 FPS)
5831  * - Cinema HFR (48 FPS)
5832  * - TV/PAL (50 FPS)
5833  * - Commonly used (60 FPS)
5834  * - Multiples of 24 (48,72,96 FPS)
5835  *
5836  * The list of standards video format is not huge and can be added to the
5837  * connector modeset list beforehand. With that, userspace can leverage
5838  * FreeSync to extends the front porch in order to attain the target refresh
5839  * rate. Such a switch will happen seamlessly, without screen blanking or
5840  * reprogramming of the output in any other way. If the userspace requests a
5841  * modesetting change compatible with FreeSync modes that only differ in the
5842  * refresh rate, DC will skip the full update and avoid blink during the
5843  * transition. For example, the video player can change the modesetting from
5844  * 60Hz to 30Hz for playing TV/NTSC content when it goes full screen without
5845  * causing any display blink. This same concept can be applied to a mode
5846  * setting change.
5847  */
5848 static struct drm_display_mode *
5849 get_highest_refresh_rate_mode(struct amdgpu_dm_connector *aconnector,
5850                 bool use_probed_modes)
5851 {
5852         struct drm_display_mode *m, *m_pref = NULL;
5853         u16 current_refresh, highest_refresh;
5854         struct list_head *list_head = use_probed_modes ?
5855                 &aconnector->base.probed_modes :
5856                 &aconnector->base.modes;
5857
5858         if (aconnector->freesync_vid_base.clock != 0)
5859                 return &aconnector->freesync_vid_base;
5860
5861         /* Find the preferred mode */
5862         list_for_each_entry(m, list_head, head) {
5863                 if (m->type & DRM_MODE_TYPE_PREFERRED) {
5864                         m_pref = m;
5865                         break;
5866                 }
5867         }
5868
5869         if (!m_pref) {
5870                 /* Probably an EDID with no preferred mode. Fallback to first entry */
5871                 m_pref = list_first_entry_or_null(
5872                                 &aconnector->base.modes, struct drm_display_mode, head);
5873                 if (!m_pref) {
5874                         DRM_DEBUG_DRIVER("No preferred mode found in EDID\n");
5875                         return NULL;
5876                 }
5877         }
5878
5879         highest_refresh = drm_mode_vrefresh(m_pref);
5880
5881         /*
5882          * Find the mode with highest refresh rate with same resolution.
5883          * For some monitors, preferred mode is not the mode with highest
5884          * supported refresh rate.
5885          */
5886         list_for_each_entry(m, list_head, head) {
5887                 current_refresh  = drm_mode_vrefresh(m);
5888
5889                 if (m->hdisplay == m_pref->hdisplay &&
5890                     m->vdisplay == m_pref->vdisplay &&
5891                     highest_refresh < current_refresh) {
5892                         highest_refresh = current_refresh;
5893                         m_pref = m;
5894                 }
5895         }
5896
5897         drm_mode_copy(&aconnector->freesync_vid_base, m_pref);
5898         return m_pref;
5899 }
5900
5901 static bool is_freesync_video_mode(const struct drm_display_mode *mode,
5902                 struct amdgpu_dm_connector *aconnector)
5903 {
5904         struct drm_display_mode *high_mode;
5905         int timing_diff;
5906
5907         high_mode = get_highest_refresh_rate_mode(aconnector, false);
5908         if (!high_mode || !mode)
5909                 return false;
5910
5911         timing_diff = high_mode->vtotal - mode->vtotal;
5912
5913         if (high_mode->clock == 0 || high_mode->clock != mode->clock ||
5914             high_mode->hdisplay != mode->hdisplay ||
5915             high_mode->vdisplay != mode->vdisplay ||
5916             high_mode->hsync_start != mode->hsync_start ||
5917             high_mode->hsync_end != mode->hsync_end ||
5918             high_mode->htotal != mode->htotal ||
5919             high_mode->hskew != mode->hskew ||
5920             high_mode->vscan != mode->vscan ||
5921             high_mode->vsync_start - mode->vsync_start != timing_diff ||
5922             high_mode->vsync_end - mode->vsync_end != timing_diff)
5923                 return false;
5924         else
5925                 return true;
5926 }
5927
5928 static void update_dsc_caps(struct amdgpu_dm_connector *aconnector,
5929                             struct dc_sink *sink, struct dc_stream_state *stream,
5930                             struct dsc_dec_dpcd_caps *dsc_caps)
5931 {
5932         stream->timing.flags.DSC = 0;
5933         dsc_caps->is_dsc_supported = false;
5934
5935         if (aconnector->dc_link && (sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT ||
5936             sink->sink_signal == SIGNAL_TYPE_EDP)) {
5937                 if (sink->link->dpcd_caps.dongle_type == DISPLAY_DONGLE_NONE ||
5938                         sink->link->dpcd_caps.dongle_type == DISPLAY_DONGLE_DP_HDMI_CONVERTER)
5939                         dc_dsc_parse_dsc_dpcd(aconnector->dc_link->ctx->dc,
5940                                 aconnector->dc_link->dpcd_caps.dsc_caps.dsc_basic_caps.raw,
5941                                 aconnector->dc_link->dpcd_caps.dsc_caps.dsc_branch_decoder_caps.raw,
5942                                 dsc_caps);
5943         }
5944 }
5945
5946
5947 static void apply_dsc_policy_for_edp(struct amdgpu_dm_connector *aconnector,
5948                                     struct dc_sink *sink, struct dc_stream_state *stream,
5949                                     struct dsc_dec_dpcd_caps *dsc_caps,
5950                                     uint32_t max_dsc_target_bpp_limit_override)
5951 {
5952         const struct dc_link_settings *verified_link_cap = NULL;
5953         u32 link_bw_in_kbps;
5954         u32 edp_min_bpp_x16, edp_max_bpp_x16;
5955         struct dc *dc = sink->ctx->dc;
5956         struct dc_dsc_bw_range bw_range = {0};
5957         struct dc_dsc_config dsc_cfg = {0};
5958         struct dc_dsc_config_options dsc_options = {0};
5959
5960         dc_dsc_get_default_config_option(dc, &dsc_options);
5961         dsc_options.max_target_bpp_limit_override_x16 = max_dsc_target_bpp_limit_override * 16;
5962
5963         verified_link_cap = dc_link_get_link_cap(stream->link);
5964         link_bw_in_kbps = dc_link_bandwidth_kbps(stream->link, verified_link_cap);
5965         edp_min_bpp_x16 = 8 * 16;
5966         edp_max_bpp_x16 = 8 * 16;
5967
5968         if (edp_max_bpp_x16 > dsc_caps->edp_max_bits_per_pixel)
5969                 edp_max_bpp_x16 = dsc_caps->edp_max_bits_per_pixel;
5970
5971         if (edp_max_bpp_x16 < edp_min_bpp_x16)
5972                 edp_min_bpp_x16 = edp_max_bpp_x16;
5973
5974         if (dc_dsc_compute_bandwidth_range(dc->res_pool->dscs[0],
5975                                 dc->debug.dsc_min_slice_height_override,
5976                                 edp_min_bpp_x16, edp_max_bpp_x16,
5977                                 dsc_caps,
5978                                 &stream->timing,
5979                                 dc_link_get_highest_encoding_format(aconnector->dc_link),
5980                                 &bw_range)) {
5981
5982                 if (bw_range.max_kbps < link_bw_in_kbps) {
5983                         if (dc_dsc_compute_config(dc->res_pool->dscs[0],
5984                                         dsc_caps,
5985                                         &dsc_options,
5986                                         0,
5987                                         &stream->timing,
5988                                         dc_link_get_highest_encoding_format(aconnector->dc_link),
5989                                         &dsc_cfg)) {
5990                                 stream->timing.dsc_cfg = dsc_cfg;
5991                                 stream->timing.flags.DSC = 1;
5992                                 stream->timing.dsc_cfg.bits_per_pixel = edp_max_bpp_x16;
5993                         }
5994                         return;
5995                 }
5996         }
5997
5998         if (dc_dsc_compute_config(dc->res_pool->dscs[0],
5999                                 dsc_caps,
6000                                 &dsc_options,
6001                                 link_bw_in_kbps,
6002                                 &stream->timing,
6003                                 dc_link_get_highest_encoding_format(aconnector->dc_link),
6004                                 &dsc_cfg)) {
6005                 stream->timing.dsc_cfg = dsc_cfg;
6006                 stream->timing.flags.DSC = 1;
6007         }
6008 }
6009
6010
6011 static void apply_dsc_policy_for_stream(struct amdgpu_dm_connector *aconnector,
6012                                         struct dc_sink *sink, struct dc_stream_state *stream,
6013                                         struct dsc_dec_dpcd_caps *dsc_caps)
6014 {
6015         struct drm_connector *drm_connector = &aconnector->base;
6016         u32 link_bandwidth_kbps;
6017         struct dc *dc = sink->ctx->dc;
6018         u32 max_supported_bw_in_kbps, timing_bw_in_kbps;
6019         u32 dsc_max_supported_bw_in_kbps;
6020         u32 max_dsc_target_bpp_limit_override =
6021                 drm_connector->display_info.max_dsc_bpp;
6022         struct dc_dsc_config_options dsc_options = {0};
6023
6024         dc_dsc_get_default_config_option(dc, &dsc_options);
6025         dsc_options.max_target_bpp_limit_override_x16 = max_dsc_target_bpp_limit_override * 16;
6026
6027         link_bandwidth_kbps = dc_link_bandwidth_kbps(aconnector->dc_link,
6028                                                         dc_link_get_link_cap(aconnector->dc_link));
6029
6030         /* Set DSC policy according to dsc_clock_en */
6031         dc_dsc_policy_set_enable_dsc_when_not_needed(
6032                 aconnector->dsc_settings.dsc_force_enable == DSC_CLK_FORCE_ENABLE);
6033
6034         if (aconnector->dc_link && sink->sink_signal == SIGNAL_TYPE_EDP &&
6035             !aconnector->dc_link->panel_config.dsc.disable_dsc_edp &&
6036             dc->caps.edp_dsc_support && aconnector->dsc_settings.dsc_force_enable != DSC_CLK_FORCE_DISABLE) {
6037
6038                 apply_dsc_policy_for_edp(aconnector, sink, stream, dsc_caps, max_dsc_target_bpp_limit_override);
6039
6040         } else if (aconnector->dc_link && sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT) {
6041                 if (sink->link->dpcd_caps.dongle_type == DISPLAY_DONGLE_NONE) {
6042                         if (dc_dsc_compute_config(aconnector->dc_link->ctx->dc->res_pool->dscs[0],
6043                                                 dsc_caps,
6044                                                 &dsc_options,
6045                                                 link_bandwidth_kbps,
6046                                                 &stream->timing,
6047                                                 dc_link_get_highest_encoding_format(aconnector->dc_link),
6048                                                 &stream->timing.dsc_cfg)) {
6049                                 stream->timing.flags.DSC = 1;
6050                                 DRM_DEBUG_DRIVER("%s: [%s] DSC is selected from SST RX\n", __func__, drm_connector->name);
6051                         }
6052                 } else if (sink->link->dpcd_caps.dongle_type == DISPLAY_DONGLE_DP_HDMI_CONVERTER) {
6053                         timing_bw_in_kbps = dc_bandwidth_in_kbps_from_timing(&stream->timing,
6054                                         dc_link_get_highest_encoding_format(aconnector->dc_link));
6055                         max_supported_bw_in_kbps = link_bandwidth_kbps;
6056                         dsc_max_supported_bw_in_kbps = link_bandwidth_kbps;
6057
6058                         if (timing_bw_in_kbps > max_supported_bw_in_kbps &&
6059                                         max_supported_bw_in_kbps > 0 &&
6060                                         dsc_max_supported_bw_in_kbps > 0)
6061                                 if (dc_dsc_compute_config(aconnector->dc_link->ctx->dc->res_pool->dscs[0],
6062                                                 dsc_caps,
6063                                                 &dsc_options,
6064                                                 dsc_max_supported_bw_in_kbps,
6065                                                 &stream->timing,
6066                                                 dc_link_get_highest_encoding_format(aconnector->dc_link),
6067                                                 &stream->timing.dsc_cfg)) {
6068                                         stream->timing.flags.DSC = 1;
6069                                         DRM_DEBUG_DRIVER("%s: [%s] DSC is selected from DP-HDMI PCON\n",
6070                                                                          __func__, drm_connector->name);
6071                                 }
6072                 }
6073         }
6074
6075         /* Overwrite the stream flag if DSC is enabled through debugfs */
6076         if (aconnector->dsc_settings.dsc_force_enable == DSC_CLK_FORCE_ENABLE)
6077                 stream->timing.flags.DSC = 1;
6078
6079         if (stream->timing.flags.DSC && aconnector->dsc_settings.dsc_num_slices_h)
6080                 stream->timing.dsc_cfg.num_slices_h = aconnector->dsc_settings.dsc_num_slices_h;
6081
6082         if (stream->timing.flags.DSC && aconnector->dsc_settings.dsc_num_slices_v)
6083                 stream->timing.dsc_cfg.num_slices_v = aconnector->dsc_settings.dsc_num_slices_v;
6084
6085         if (stream->timing.flags.DSC && aconnector->dsc_settings.dsc_bits_per_pixel)
6086                 stream->timing.dsc_cfg.bits_per_pixel = aconnector->dsc_settings.dsc_bits_per_pixel;
6087 }
6088
6089 static struct dc_stream_state *
6090 create_stream_for_sink(struct drm_connector *connector,
6091                        const struct drm_display_mode *drm_mode,
6092                        const struct dm_connector_state *dm_state,
6093                        const struct dc_stream_state *old_stream,
6094                        int requested_bpc)
6095 {
6096         struct amdgpu_dm_connector *aconnector = NULL;
6097         struct drm_display_mode *preferred_mode = NULL;
6098         const struct drm_connector_state *con_state = &dm_state->base;
6099         struct dc_stream_state *stream = NULL;
6100         struct drm_display_mode mode;
6101         struct drm_display_mode saved_mode;
6102         struct drm_display_mode *freesync_mode = NULL;
6103         bool native_mode_found = false;
6104         bool recalculate_timing = false;
6105         bool scale = dm_state->scaling != RMX_OFF;
6106         int mode_refresh;
6107         int preferred_refresh = 0;
6108         enum color_transfer_func tf = TRANSFER_FUNC_UNKNOWN;
6109         struct dsc_dec_dpcd_caps dsc_caps;
6110
6111         struct dc_link *link = NULL;
6112         struct dc_sink *sink = NULL;
6113
6114         drm_mode_init(&mode, drm_mode);
6115         memset(&saved_mode, 0, sizeof(saved_mode));
6116
6117         if (connector == NULL) {
6118                 DRM_ERROR("connector is NULL!\n");
6119                 return stream;
6120         }
6121
6122         if (connector->connector_type != DRM_MODE_CONNECTOR_WRITEBACK) {
6123                 aconnector = NULL;
6124                 aconnector = to_amdgpu_dm_connector(connector);
6125                 link = aconnector->dc_link;
6126         } else {
6127                 struct drm_writeback_connector *wbcon = NULL;
6128                 struct amdgpu_dm_wb_connector *dm_wbcon = NULL;
6129
6130                 wbcon = drm_connector_to_writeback(connector);
6131                 dm_wbcon = to_amdgpu_dm_wb_connector(wbcon);
6132                 link = dm_wbcon->link;
6133         }
6134
6135         if (!aconnector || !aconnector->dc_sink) {
6136                 sink = create_fake_sink(link);
6137                 if (!sink)
6138                         return stream;
6139
6140         } else {
6141                 sink = aconnector->dc_sink;
6142                 dc_sink_retain(sink);
6143         }
6144
6145         stream = dc_create_stream_for_sink(sink);
6146
6147         if (stream == NULL) {
6148                 DRM_ERROR("Failed to create stream for sink!\n");
6149                 goto finish;
6150         }
6151
6152         /* We leave this NULL for writeback connectors */
6153         stream->dm_stream_context = aconnector;
6154
6155         stream->timing.flags.LTE_340MCSC_SCRAMBLE =
6156                 connector->display_info.hdmi.scdc.scrambling.low_rates;
6157
6158         list_for_each_entry(preferred_mode, &connector->modes, head) {
6159                 /* Search for preferred mode */
6160                 if (preferred_mode->type & DRM_MODE_TYPE_PREFERRED) {
6161                         native_mode_found = true;
6162                         break;
6163                 }
6164         }
6165         if (!native_mode_found)
6166                 preferred_mode = list_first_entry_or_null(
6167                                 &connector->modes,
6168                                 struct drm_display_mode,
6169                                 head);
6170
6171         mode_refresh = drm_mode_vrefresh(&mode);
6172
6173         if (preferred_mode == NULL) {
6174                 /*
6175                  * This may not be an error, the use case is when we have no
6176                  * usermode calls to reset and set mode upon hotplug. In this
6177                  * case, we call set mode ourselves to restore the previous mode
6178                  * and the modelist may not be filled in time.
6179                  */
6180                 DRM_DEBUG_DRIVER("No preferred mode found\n");
6181         } else if (aconnector) {
6182                 recalculate_timing = is_freesync_video_mode(&mode, aconnector);
6183                 if (recalculate_timing) {
6184                         freesync_mode = get_highest_refresh_rate_mode(aconnector, false);
6185                         drm_mode_copy(&saved_mode, &mode);
6186                         drm_mode_copy(&mode, freesync_mode);
6187                 } else {
6188                         decide_crtc_timing_for_drm_display_mode(
6189                                         &mode, preferred_mode, scale);
6190
6191                         preferred_refresh = drm_mode_vrefresh(preferred_mode);
6192                 }
6193         }
6194
6195         if (recalculate_timing)
6196                 drm_mode_set_crtcinfo(&saved_mode, 0);
6197
6198         /*
6199          * If scaling is enabled and refresh rate didn't change
6200          * we copy the vic and polarities of the old timings
6201          */
6202         if (!scale || mode_refresh != preferred_refresh)
6203                 fill_stream_properties_from_drm_display_mode(
6204                         stream, &mode, connector, con_state, NULL,
6205                         requested_bpc);
6206         else
6207                 fill_stream_properties_from_drm_display_mode(
6208                         stream, &mode, connector, con_state, old_stream,
6209                         requested_bpc);
6210
6211         /* The rest isn't needed for writeback connectors */
6212         if (!aconnector)
6213                 goto finish;
6214
6215         if (aconnector->timing_changed) {
6216                 drm_dbg(aconnector->base.dev,
6217                         "overriding timing for automated test, bpc %d, changing to %d\n",
6218                         stream->timing.display_color_depth,
6219                         aconnector->timing_requested->display_color_depth);
6220                 stream->timing = *aconnector->timing_requested;
6221         }
6222
6223         /* SST DSC determination policy */
6224         update_dsc_caps(aconnector, sink, stream, &dsc_caps);
6225         if (aconnector->dsc_settings.dsc_force_enable != DSC_CLK_FORCE_DISABLE && dsc_caps.is_dsc_supported)
6226                 apply_dsc_policy_for_stream(aconnector, sink, stream, &dsc_caps);
6227
6228         update_stream_scaling_settings(&mode, dm_state, stream);
6229
6230         fill_audio_info(
6231                 &stream->audio_info,
6232                 connector,
6233                 sink);
6234
6235         update_stream_signal(stream, sink);
6236
6237         if (stream->signal == SIGNAL_TYPE_HDMI_TYPE_A)
6238                 mod_build_hf_vsif_infopacket(stream, &stream->vsp_infopacket);
6239
6240         if (stream->link->psr_settings.psr_feature_enabled || stream->link->replay_settings.replay_feature_enabled) {
6241                 //
6242                 // should decide stream support vsc sdp colorimetry capability
6243                 // before building vsc info packet
6244                 //
6245                 stream->use_vsc_sdp_for_colorimetry = false;
6246                 if (aconnector->dc_sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT_MST) {
6247                         stream->use_vsc_sdp_for_colorimetry =
6248                                 aconnector->dc_sink->is_vsc_sdp_colorimetry_supported;
6249                 } else {
6250                         if (stream->link->dpcd_caps.dprx_feature.bits.VSC_SDP_COLORIMETRY_SUPPORTED)
6251                                 stream->use_vsc_sdp_for_colorimetry = true;
6252                 }
6253                 if (stream->out_transfer_func->tf == TRANSFER_FUNCTION_GAMMA22)
6254                         tf = TRANSFER_FUNC_GAMMA_22;
6255                 mod_build_vsc_infopacket(stream, &stream->vsc_infopacket, stream->output_color_space, tf);
6256                 aconnector->psr_skip_count = AMDGPU_DM_PSR_ENTRY_DELAY;
6257
6258         }
6259 finish:
6260         dc_sink_release(sink);
6261
6262         return stream;
6263 }
6264
6265 static enum drm_connector_status
6266 amdgpu_dm_connector_detect(struct drm_connector *connector, bool force)
6267 {
6268         bool connected;
6269         struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
6270
6271         /*
6272          * Notes:
6273          * 1. This interface is NOT called in context of HPD irq.
6274          * 2. This interface *is called* in context of user-mode ioctl. Which
6275          * makes it a bad place for *any* MST-related activity.
6276          */
6277
6278         if (aconnector->base.force == DRM_FORCE_UNSPECIFIED &&
6279             !aconnector->fake_enable)
6280                 connected = (aconnector->dc_sink != NULL);
6281         else
6282                 connected = (aconnector->base.force == DRM_FORCE_ON ||
6283                                 aconnector->base.force == DRM_FORCE_ON_DIGITAL);
6284
6285         update_subconnector_property(aconnector);
6286
6287         return (connected ? connector_status_connected :
6288                         connector_status_disconnected);
6289 }
6290
6291 int amdgpu_dm_connector_atomic_set_property(struct drm_connector *connector,
6292                                             struct drm_connector_state *connector_state,
6293                                             struct drm_property *property,
6294                                             uint64_t val)
6295 {
6296         struct drm_device *dev = connector->dev;
6297         struct amdgpu_device *adev = drm_to_adev(dev);
6298         struct dm_connector_state *dm_old_state =
6299                 to_dm_connector_state(connector->state);
6300         struct dm_connector_state *dm_new_state =
6301                 to_dm_connector_state(connector_state);
6302
6303         int ret = -EINVAL;
6304
6305         if (property == dev->mode_config.scaling_mode_property) {
6306                 enum amdgpu_rmx_type rmx_type;
6307
6308                 switch (val) {
6309                 case DRM_MODE_SCALE_CENTER:
6310                         rmx_type = RMX_CENTER;
6311                         break;
6312                 case DRM_MODE_SCALE_ASPECT:
6313                         rmx_type = RMX_ASPECT;
6314                         break;
6315                 case DRM_MODE_SCALE_FULLSCREEN:
6316                         rmx_type = RMX_FULL;
6317                         break;
6318                 case DRM_MODE_SCALE_NONE:
6319                 default:
6320                         rmx_type = RMX_OFF;
6321                         break;
6322                 }
6323
6324                 if (dm_old_state->scaling == rmx_type)
6325                         return 0;
6326
6327                 dm_new_state->scaling = rmx_type;
6328                 ret = 0;
6329         } else if (property == adev->mode_info.underscan_hborder_property) {
6330                 dm_new_state->underscan_hborder = val;
6331                 ret = 0;
6332         } else if (property == adev->mode_info.underscan_vborder_property) {
6333                 dm_new_state->underscan_vborder = val;
6334                 ret = 0;
6335         } else if (property == adev->mode_info.underscan_property) {
6336                 dm_new_state->underscan_enable = val;
6337                 ret = 0;
6338         } else if (property == adev->mode_info.abm_level_property) {
6339                 dm_new_state->abm_level = val ?: ABM_LEVEL_IMMEDIATE_DISABLE;
6340                 ret = 0;
6341         }
6342
6343         return ret;
6344 }
6345
6346 int amdgpu_dm_connector_atomic_get_property(struct drm_connector *connector,
6347                                             const struct drm_connector_state *state,
6348                                             struct drm_property *property,
6349                                             uint64_t *val)
6350 {
6351         struct drm_device *dev = connector->dev;
6352         struct amdgpu_device *adev = drm_to_adev(dev);
6353         struct dm_connector_state *dm_state =
6354                 to_dm_connector_state(state);
6355         int ret = -EINVAL;
6356
6357         if (property == dev->mode_config.scaling_mode_property) {
6358                 switch (dm_state->scaling) {
6359                 case RMX_CENTER:
6360                         *val = DRM_MODE_SCALE_CENTER;
6361                         break;
6362                 case RMX_ASPECT:
6363                         *val = DRM_MODE_SCALE_ASPECT;
6364                         break;
6365                 case RMX_FULL:
6366                         *val = DRM_MODE_SCALE_FULLSCREEN;
6367                         break;
6368                 case RMX_OFF:
6369                 default:
6370                         *val = DRM_MODE_SCALE_NONE;
6371                         break;
6372                 }
6373                 ret = 0;
6374         } else if (property == adev->mode_info.underscan_hborder_property) {
6375                 *val = dm_state->underscan_hborder;
6376                 ret = 0;
6377         } else if (property == adev->mode_info.underscan_vborder_property) {
6378                 *val = dm_state->underscan_vborder;
6379                 ret = 0;
6380         } else if (property == adev->mode_info.underscan_property) {
6381                 *val = dm_state->underscan_enable;
6382                 ret = 0;
6383         } else if (property == adev->mode_info.abm_level_property) {
6384                 *val = (dm_state->abm_level != ABM_LEVEL_IMMEDIATE_DISABLE) ?
6385                         dm_state->abm_level : 0;
6386                 ret = 0;
6387         }
6388
6389         return ret;
6390 }
6391
6392 static void amdgpu_dm_connector_unregister(struct drm_connector *connector)
6393 {
6394         struct amdgpu_dm_connector *amdgpu_dm_connector = to_amdgpu_dm_connector(connector);
6395
6396         drm_dp_aux_unregister(&amdgpu_dm_connector->dm_dp_aux.aux);
6397 }
6398
6399 static void amdgpu_dm_connector_destroy(struct drm_connector *connector)
6400 {
6401         struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
6402         struct amdgpu_device *adev = drm_to_adev(connector->dev);
6403         struct amdgpu_display_manager *dm = &adev->dm;
6404
6405         /*
6406          * Call only if mst_mgr was initialized before since it's not done
6407          * for all connector types.
6408          */
6409         if (aconnector->mst_mgr.dev)
6410                 drm_dp_mst_topology_mgr_destroy(&aconnector->mst_mgr);
6411
6412         if (aconnector->bl_idx != -1) {
6413                 backlight_device_unregister(dm->backlight_dev[aconnector->bl_idx]);
6414                 dm->backlight_dev[aconnector->bl_idx] = NULL;
6415         }
6416
6417         if (aconnector->dc_em_sink)
6418                 dc_sink_release(aconnector->dc_em_sink);
6419         aconnector->dc_em_sink = NULL;
6420         if (aconnector->dc_sink)
6421                 dc_sink_release(aconnector->dc_sink);
6422         aconnector->dc_sink = NULL;
6423
6424         drm_dp_cec_unregister_connector(&aconnector->dm_dp_aux.aux);
6425         drm_connector_unregister(connector);
6426         drm_connector_cleanup(connector);
6427         if (aconnector->i2c) {
6428                 i2c_del_adapter(&aconnector->i2c->base);
6429                 kfree(aconnector->i2c);
6430         }
6431         kfree(aconnector->dm_dp_aux.aux.name);
6432
6433         kfree(connector);
6434 }
6435
6436 void amdgpu_dm_connector_funcs_reset(struct drm_connector *connector)
6437 {
6438         struct dm_connector_state *state =
6439                 to_dm_connector_state(connector->state);
6440
6441         if (connector->state)
6442                 __drm_atomic_helper_connector_destroy_state(connector->state);
6443
6444         kfree(state);
6445
6446         state = kzalloc(sizeof(*state), GFP_KERNEL);
6447
6448         if (state) {
6449                 state->scaling = RMX_OFF;
6450                 state->underscan_enable = false;
6451                 state->underscan_hborder = 0;
6452                 state->underscan_vborder = 0;
6453                 state->base.max_requested_bpc = 8;
6454                 state->vcpi_slots = 0;
6455                 state->pbn = 0;
6456
6457                 if (connector->connector_type == DRM_MODE_CONNECTOR_eDP)
6458                         state->abm_level = amdgpu_dm_abm_level ?:
6459                                 ABM_LEVEL_IMMEDIATE_DISABLE;
6460
6461                 __drm_atomic_helper_connector_reset(connector, &state->base);
6462         }
6463 }
6464
6465 struct drm_connector_state *
6466 amdgpu_dm_connector_atomic_duplicate_state(struct drm_connector *connector)
6467 {
6468         struct dm_connector_state *state =
6469                 to_dm_connector_state(connector->state);
6470
6471         struct dm_connector_state *new_state =
6472                         kmemdup(state, sizeof(*state), GFP_KERNEL);
6473
6474         if (!new_state)
6475                 return NULL;
6476
6477         __drm_atomic_helper_connector_duplicate_state(connector, &new_state->base);
6478
6479         new_state->freesync_capable = state->freesync_capable;
6480         new_state->abm_level = state->abm_level;
6481         new_state->scaling = state->scaling;
6482         new_state->underscan_enable = state->underscan_enable;
6483         new_state->underscan_hborder = state->underscan_hborder;
6484         new_state->underscan_vborder = state->underscan_vborder;
6485         new_state->vcpi_slots = state->vcpi_slots;
6486         new_state->pbn = state->pbn;
6487         return &new_state->base;
6488 }
6489
6490 static int
6491 amdgpu_dm_connector_late_register(struct drm_connector *connector)
6492 {
6493         struct amdgpu_dm_connector *amdgpu_dm_connector =
6494                 to_amdgpu_dm_connector(connector);
6495         int r;
6496
6497         amdgpu_dm_register_backlight_device(amdgpu_dm_connector);
6498
6499         if ((connector->connector_type == DRM_MODE_CONNECTOR_DisplayPort) ||
6500             (connector->connector_type == DRM_MODE_CONNECTOR_eDP)) {
6501                 amdgpu_dm_connector->dm_dp_aux.aux.dev = connector->kdev;
6502                 r = drm_dp_aux_register(&amdgpu_dm_connector->dm_dp_aux.aux);
6503                 if (r)
6504                         return r;
6505         }
6506
6507 #if defined(CONFIG_DEBUG_FS)
6508         connector_debugfs_init(amdgpu_dm_connector);
6509 #endif
6510
6511         return 0;
6512 }
6513
6514 static void amdgpu_dm_connector_funcs_force(struct drm_connector *connector)
6515 {
6516         struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
6517         struct amdgpu_connector *amdgpu_connector = to_amdgpu_connector(connector);
6518         struct dc_link *dc_link = aconnector->dc_link;
6519         struct dc_sink *dc_em_sink = aconnector->dc_em_sink;
6520         struct edid *edid;
6521
6522         /*
6523          * Note: drm_get_edid gets edid in the following order:
6524          * 1) override EDID if set via edid_override debugfs,
6525          * 2) firmware EDID if set via edid_firmware module parameter
6526          * 3) regular DDC read.
6527          */
6528         edid = drm_get_edid(connector, &amdgpu_connector->ddc_bus->aux.ddc);
6529         if (!edid) {
6530                 DRM_ERROR("No EDID found on connector: %s.\n", connector->name);
6531                 return;
6532         }
6533
6534         aconnector->edid = edid;
6535
6536         /* Update emulated (virtual) sink's EDID */
6537         if (dc_em_sink && dc_link) {
6538                 memset(&dc_em_sink->edid_caps, 0, sizeof(struct dc_edid_caps));
6539                 memmove(dc_em_sink->dc_edid.raw_edid, edid, (edid->extensions + 1) * EDID_LENGTH);
6540                 dm_helpers_parse_edid_caps(
6541                         dc_link,
6542                         &dc_em_sink->dc_edid,
6543                         &dc_em_sink->edid_caps);
6544         }
6545 }
6546
6547 static const struct drm_connector_funcs amdgpu_dm_connector_funcs = {
6548         .reset = amdgpu_dm_connector_funcs_reset,
6549         .detect = amdgpu_dm_connector_detect,
6550         .fill_modes = drm_helper_probe_single_connector_modes,
6551         .destroy = amdgpu_dm_connector_destroy,
6552         .atomic_duplicate_state = amdgpu_dm_connector_atomic_duplicate_state,
6553         .atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
6554         .atomic_set_property = amdgpu_dm_connector_atomic_set_property,
6555         .atomic_get_property = amdgpu_dm_connector_atomic_get_property,
6556         .late_register = amdgpu_dm_connector_late_register,
6557         .early_unregister = amdgpu_dm_connector_unregister,
6558         .force = amdgpu_dm_connector_funcs_force
6559 };
6560
6561 static int get_modes(struct drm_connector *connector)
6562 {
6563         return amdgpu_dm_connector_get_modes(connector);
6564 }
6565
6566 static void create_eml_sink(struct amdgpu_dm_connector *aconnector)
6567 {
6568         struct drm_connector *connector = &aconnector->base;
6569         struct amdgpu_connector *amdgpu_connector = to_amdgpu_connector(&aconnector->base);
6570         struct dc_sink_init_data init_params = {
6571                         .link = aconnector->dc_link,
6572                         .sink_signal = SIGNAL_TYPE_VIRTUAL
6573         };
6574         struct edid *edid;
6575
6576         /*
6577          * Note: drm_get_edid gets edid in the following order:
6578          * 1) override EDID if set via edid_override debugfs,
6579          * 2) firmware EDID if set via edid_firmware module parameter
6580          * 3) regular DDC read.
6581          */
6582         edid = drm_get_edid(connector, &amdgpu_connector->ddc_bus->aux.ddc);
6583         if (!edid) {
6584                 DRM_ERROR("No EDID found on connector: %s.\n", connector->name);
6585                 return;
6586         }
6587
6588         if (drm_detect_hdmi_monitor(edid))
6589                 init_params.sink_signal = SIGNAL_TYPE_HDMI_TYPE_A;
6590
6591         aconnector->edid = edid;
6592
6593         aconnector->dc_em_sink = dc_link_add_remote_sink(
6594                 aconnector->dc_link,
6595                 (uint8_t *)edid,
6596                 (edid->extensions + 1) * EDID_LENGTH,
6597                 &init_params);
6598
6599         if (aconnector->base.force == DRM_FORCE_ON) {
6600                 aconnector->dc_sink = aconnector->dc_link->local_sink ?
6601                 aconnector->dc_link->local_sink :
6602                 aconnector->dc_em_sink;
6603                 dc_sink_retain(aconnector->dc_sink);
6604         }
6605 }
6606
6607 static void handle_edid_mgmt(struct amdgpu_dm_connector *aconnector)
6608 {
6609         struct dc_link *link = (struct dc_link *)aconnector->dc_link;
6610
6611         /*
6612          * In case of headless boot with force on for DP managed connector
6613          * Those settings have to be != 0 to get initial modeset
6614          */
6615         if (link->connector_signal == SIGNAL_TYPE_DISPLAY_PORT) {
6616                 link->verified_link_cap.lane_count = LANE_COUNT_FOUR;
6617                 link->verified_link_cap.link_rate = LINK_RATE_HIGH2;
6618         }
6619
6620         create_eml_sink(aconnector);
6621 }
6622
6623 static enum dc_status dm_validate_stream_and_context(struct dc *dc,
6624                                                 struct dc_stream_state *stream)
6625 {
6626         enum dc_status dc_result = DC_ERROR_UNEXPECTED;
6627         struct dc_plane_state *dc_plane_state = NULL;
6628         struct dc_state *dc_state = NULL;
6629
6630         if (!stream)
6631                 goto cleanup;
6632
6633         dc_plane_state = dc_create_plane_state(dc);
6634         if (!dc_plane_state)
6635                 goto cleanup;
6636
6637         dc_state = dc_create_state(dc);
6638         if (!dc_state)
6639                 goto cleanup;
6640
6641         /* populate stream to plane */
6642         dc_plane_state->src_rect.height  = stream->src.height;
6643         dc_plane_state->src_rect.width   = stream->src.width;
6644         dc_plane_state->dst_rect.height  = stream->src.height;
6645         dc_plane_state->dst_rect.width   = stream->src.width;
6646         dc_plane_state->clip_rect.height = stream->src.height;
6647         dc_plane_state->clip_rect.width  = stream->src.width;
6648         dc_plane_state->plane_size.surface_pitch = ((stream->src.width + 255) / 256) * 256;
6649         dc_plane_state->plane_size.surface_size.height = stream->src.height;
6650         dc_plane_state->plane_size.surface_size.width  = stream->src.width;
6651         dc_plane_state->plane_size.chroma_size.height  = stream->src.height;
6652         dc_plane_state->plane_size.chroma_size.width   = stream->src.width;
6653         dc_plane_state->format = SURFACE_PIXEL_FORMAT_GRPH_ARGB8888;
6654         dc_plane_state->tiling_info.gfx9.swizzle = DC_SW_UNKNOWN;
6655         dc_plane_state->rotation = ROTATION_ANGLE_0;
6656         dc_plane_state->is_tiling_rotated = false;
6657         dc_plane_state->tiling_info.gfx8.array_mode = DC_ARRAY_LINEAR_GENERAL;
6658
6659         dc_result = dc_validate_stream(dc, stream);
6660         if (dc_result == DC_OK)
6661                 dc_result = dc_validate_plane(dc, dc_plane_state);
6662
6663         if (dc_result == DC_OK)
6664                 dc_result = dc_add_stream_to_ctx(dc, dc_state, stream);
6665
6666         if (dc_result == DC_OK && !dc_add_plane_to_context(
6667                                                 dc,
6668                                                 stream,
6669                                                 dc_plane_state,
6670                                                 dc_state))
6671                 dc_result = DC_FAIL_ATTACH_SURFACES;
6672
6673         if (dc_result == DC_OK)
6674                 dc_result = dc_validate_global_state(dc, dc_state, true);
6675
6676 cleanup:
6677         if (dc_state)
6678                 dc_release_state(dc_state);
6679
6680         if (dc_plane_state)
6681                 dc_plane_state_release(dc_plane_state);
6682
6683         return dc_result;
6684 }
6685
6686 struct dc_stream_state *
6687 create_validate_stream_for_sink(struct amdgpu_dm_connector *aconnector,
6688                                 const struct drm_display_mode *drm_mode,
6689                                 const struct dm_connector_state *dm_state,
6690                                 const struct dc_stream_state *old_stream)
6691 {
6692         struct drm_connector *connector = &aconnector->base;
6693         struct amdgpu_device *adev = drm_to_adev(connector->dev);
6694         struct dc_stream_state *stream;
6695         const struct drm_connector_state *drm_state = dm_state ? &dm_state->base : NULL;
6696         int requested_bpc = drm_state ? drm_state->max_requested_bpc : 8;
6697         enum dc_status dc_result = DC_OK;
6698
6699         do {
6700                 stream = create_stream_for_sink(connector, drm_mode,
6701                                                 dm_state, old_stream,
6702                                                 requested_bpc);
6703                 if (stream == NULL) {
6704                         DRM_ERROR("Failed to create stream for sink!\n");
6705                         break;
6706                 }
6707
6708                 if (aconnector->base.connector_type == DRM_MODE_CONNECTOR_WRITEBACK)
6709                         return stream;
6710
6711                 dc_result = dc_validate_stream(adev->dm.dc, stream);
6712                 if (dc_result == DC_OK && stream->signal == SIGNAL_TYPE_DISPLAY_PORT_MST)
6713                         dc_result = dm_dp_mst_is_port_support_mode(aconnector, stream);
6714
6715                 if (dc_result == DC_OK)
6716                         dc_result = dm_validate_stream_and_context(adev->dm.dc, stream);
6717
6718                 if (dc_result != DC_OK) {
6719                         DRM_DEBUG_KMS("Mode %dx%d (clk %d) failed DC validation with error %d (%s)\n",
6720                                       drm_mode->hdisplay,
6721                                       drm_mode->vdisplay,
6722                                       drm_mode->clock,
6723                                       dc_result,
6724                                       dc_status_to_str(dc_result));
6725
6726                         dc_stream_release(stream);
6727                         stream = NULL;
6728                         requested_bpc -= 2; /* lower bpc to retry validation */
6729                 }
6730
6731         } while (stream == NULL && requested_bpc >= 6);
6732
6733         if (dc_result == DC_FAIL_ENC_VALIDATE && !aconnector->force_yuv420_output) {
6734                 DRM_DEBUG_KMS("Retry forcing YCbCr420 encoding\n");
6735
6736                 aconnector->force_yuv420_output = true;
6737                 stream = create_validate_stream_for_sink(aconnector, drm_mode,
6738                                                 dm_state, old_stream);
6739                 aconnector->force_yuv420_output = false;
6740         }
6741
6742         return stream;
6743 }
6744
6745 enum drm_mode_status amdgpu_dm_connector_mode_valid(struct drm_connector *connector,
6746                                    struct drm_display_mode *mode)
6747 {
6748         int result = MODE_ERROR;
6749         struct dc_sink *dc_sink;
6750         /* TODO: Unhardcode stream count */
6751         struct dc_stream_state *stream;
6752         struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
6753
6754         if ((mode->flags & DRM_MODE_FLAG_INTERLACE) ||
6755                         (mode->flags & DRM_MODE_FLAG_DBLSCAN))
6756                 return result;
6757
6758         /*
6759          * Only run this the first time mode_valid is called to initilialize
6760          * EDID mgmt
6761          */
6762         if (aconnector->base.force != DRM_FORCE_UNSPECIFIED &&
6763                 !aconnector->dc_em_sink)
6764                 handle_edid_mgmt(aconnector);
6765
6766         dc_sink = to_amdgpu_dm_connector(connector)->dc_sink;
6767
6768         if (dc_sink == NULL && aconnector->base.force != DRM_FORCE_ON_DIGITAL &&
6769                                 aconnector->base.force != DRM_FORCE_ON) {
6770                 DRM_ERROR("dc_sink is NULL!\n");
6771                 goto fail;
6772         }
6773
6774         drm_mode_set_crtcinfo(mode, 0);
6775
6776         stream = create_validate_stream_for_sink(aconnector, mode,
6777                                                  to_dm_connector_state(connector->state),
6778                                                  NULL);
6779         if (stream) {
6780                 dc_stream_release(stream);
6781                 result = MODE_OK;
6782         }
6783
6784 fail:
6785         /* TODO: error handling*/
6786         return result;
6787 }
6788
6789 static int fill_hdr_info_packet(const struct drm_connector_state *state,
6790                                 struct dc_info_packet *out)
6791 {
6792         struct hdmi_drm_infoframe frame;
6793         unsigned char buf[30]; /* 26 + 4 */
6794         ssize_t len;
6795         int ret, i;
6796
6797         memset(out, 0, sizeof(*out));
6798
6799         if (!state->hdr_output_metadata)
6800                 return 0;
6801
6802         ret = drm_hdmi_infoframe_set_hdr_metadata(&frame, state);
6803         if (ret)
6804                 return ret;
6805
6806         len = hdmi_drm_infoframe_pack_only(&frame, buf, sizeof(buf));
6807         if (len < 0)
6808                 return (int)len;
6809
6810         /* Static metadata is a fixed 26 bytes + 4 byte header. */
6811         if (len != 30)
6812                 return -EINVAL;
6813
6814         /* Prepare the infopacket for DC. */
6815         switch (state->connector->connector_type) {
6816         case DRM_MODE_CONNECTOR_HDMIA:
6817                 out->hb0 = 0x87; /* type */
6818                 out->hb1 = 0x01; /* version */
6819                 out->hb2 = 0x1A; /* length */
6820                 out->sb[0] = buf[3]; /* checksum */
6821                 i = 1;
6822                 break;
6823
6824         case DRM_MODE_CONNECTOR_DisplayPort:
6825         case DRM_MODE_CONNECTOR_eDP:
6826                 out->hb0 = 0x00; /* sdp id, zero */
6827                 out->hb1 = 0x87; /* type */
6828                 out->hb2 = 0x1D; /* payload len - 1 */
6829                 out->hb3 = (0x13 << 2); /* sdp version */
6830                 out->sb[0] = 0x01; /* version */
6831                 out->sb[1] = 0x1A; /* length */
6832                 i = 2;
6833                 break;
6834
6835         default:
6836                 return -EINVAL;
6837         }
6838
6839         memcpy(&out->sb[i], &buf[4], 26);
6840         out->valid = true;
6841
6842         print_hex_dump(KERN_DEBUG, "HDR SB:", DUMP_PREFIX_NONE, 16, 1, out->sb,
6843                        sizeof(out->sb), false);
6844
6845         return 0;
6846 }
6847
6848 static int
6849 amdgpu_dm_connector_atomic_check(struct drm_connector *conn,
6850                                  struct drm_atomic_state *state)
6851 {
6852         struct drm_connector_state *new_con_state =
6853                 drm_atomic_get_new_connector_state(state, conn);
6854         struct drm_connector_state *old_con_state =
6855                 drm_atomic_get_old_connector_state(state, conn);
6856         struct drm_crtc *crtc = new_con_state->crtc;
6857         struct drm_crtc_state *new_crtc_state;
6858         struct amdgpu_dm_connector *aconn = to_amdgpu_dm_connector(conn);
6859         int ret;
6860
6861         trace_amdgpu_dm_connector_atomic_check(new_con_state);
6862
6863         if (conn->connector_type == DRM_MODE_CONNECTOR_DisplayPort) {
6864                 ret = drm_dp_mst_root_conn_atomic_check(new_con_state, &aconn->mst_mgr);
6865                 if (ret < 0)
6866                         return ret;
6867         }
6868
6869         if (!crtc)
6870                 return 0;
6871
6872         if (new_con_state->colorspace != old_con_state->colorspace) {
6873                 new_crtc_state = drm_atomic_get_crtc_state(state, crtc);
6874                 if (IS_ERR(new_crtc_state))
6875                         return PTR_ERR(new_crtc_state);
6876
6877                 new_crtc_state->mode_changed = true;
6878         }
6879
6880         if (new_con_state->content_type != old_con_state->content_type) {
6881                 new_crtc_state = drm_atomic_get_crtc_state(state, crtc);
6882                 if (IS_ERR(new_crtc_state))
6883                         return PTR_ERR(new_crtc_state);
6884
6885                 new_crtc_state->mode_changed = true;
6886         }
6887
6888         if (!drm_connector_atomic_hdr_metadata_equal(old_con_state, new_con_state)) {
6889                 struct dc_info_packet hdr_infopacket;
6890
6891                 ret = fill_hdr_info_packet(new_con_state, &hdr_infopacket);
6892                 if (ret)
6893                         return ret;
6894
6895                 new_crtc_state = drm_atomic_get_crtc_state(state, crtc);
6896                 if (IS_ERR(new_crtc_state))
6897                         return PTR_ERR(new_crtc_state);
6898
6899                 /*
6900                  * DC considers the stream backends changed if the
6901                  * static metadata changes. Forcing the modeset also
6902                  * gives a simple way for userspace to switch from
6903                  * 8bpc to 10bpc when setting the metadata to enter
6904                  * or exit HDR.
6905                  *
6906                  * Changing the static metadata after it's been
6907                  * set is permissible, however. So only force a
6908                  * modeset if we're entering or exiting HDR.
6909                  */
6910                 new_crtc_state->mode_changed = new_crtc_state->mode_changed ||
6911                         !old_con_state->hdr_output_metadata ||
6912                         !new_con_state->hdr_output_metadata;
6913         }
6914
6915         return 0;
6916 }
6917
6918 static const struct drm_connector_helper_funcs
6919 amdgpu_dm_connector_helper_funcs = {
6920         /*
6921          * If hotplugging a second bigger display in FB Con mode, bigger resolution
6922          * modes will be filtered by drm_mode_validate_size(), and those modes
6923          * are missing after user start lightdm. So we need to renew modes list.
6924          * in get_modes call back, not just return the modes count
6925          */
6926         .get_modes = get_modes,
6927         .mode_valid = amdgpu_dm_connector_mode_valid,
6928         .atomic_check = amdgpu_dm_connector_atomic_check,
6929 };
6930
6931 static void dm_encoder_helper_disable(struct drm_encoder *encoder)
6932 {
6933
6934 }
6935
6936 int convert_dc_color_depth_into_bpc(enum dc_color_depth display_color_depth)
6937 {
6938         switch (display_color_depth) {
6939         case COLOR_DEPTH_666:
6940                 return 6;
6941         case COLOR_DEPTH_888:
6942                 return 8;
6943         case COLOR_DEPTH_101010:
6944                 return 10;
6945         case COLOR_DEPTH_121212:
6946                 return 12;
6947         case COLOR_DEPTH_141414:
6948                 return 14;
6949         case COLOR_DEPTH_161616:
6950                 return 16;
6951         default:
6952                 break;
6953         }
6954         return 0;
6955 }
6956
6957 static int dm_encoder_helper_atomic_check(struct drm_encoder *encoder,
6958                                           struct drm_crtc_state *crtc_state,
6959                                           struct drm_connector_state *conn_state)
6960 {
6961         struct drm_atomic_state *state = crtc_state->state;
6962         struct drm_connector *connector = conn_state->connector;
6963         struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
6964         struct dm_connector_state *dm_new_connector_state = to_dm_connector_state(conn_state);
6965         const struct drm_display_mode *adjusted_mode = &crtc_state->adjusted_mode;
6966         struct drm_dp_mst_topology_mgr *mst_mgr;
6967         struct drm_dp_mst_port *mst_port;
6968         struct drm_dp_mst_topology_state *mst_state;
6969         enum dc_color_depth color_depth;
6970         int clock, bpp = 0;
6971         bool is_y420 = false;
6972
6973         if (!aconnector->mst_output_port)
6974                 return 0;
6975
6976         mst_port = aconnector->mst_output_port;
6977         mst_mgr = &aconnector->mst_root->mst_mgr;
6978
6979         if (!crtc_state->connectors_changed && !crtc_state->mode_changed)
6980                 return 0;
6981
6982         mst_state = drm_atomic_get_mst_topology_state(state, mst_mgr);
6983         if (IS_ERR(mst_state))
6984                 return PTR_ERR(mst_state);
6985
6986         if (!mst_state->pbn_div.full)
6987                 mst_state->pbn_div.full = dfixed_const(dm_mst_get_pbn_divider(aconnector->mst_root->dc_link));
6988
6989         if (!state->duplicated) {
6990                 int max_bpc = conn_state->max_requested_bpc;
6991
6992                 is_y420 = drm_mode_is_420_also(&connector->display_info, adjusted_mode) &&
6993                           aconnector->force_yuv420_output;
6994                 color_depth = convert_color_depth_from_display_info(connector,
6995                                                                     is_y420,
6996                                                                     max_bpc);
6997                 bpp = convert_dc_color_depth_into_bpc(color_depth) * 3;
6998                 clock = adjusted_mode->clock;
6999                 dm_new_connector_state->pbn = drm_dp_calc_pbn_mode(clock, bpp << 4);
7000         }
7001
7002         dm_new_connector_state->vcpi_slots =
7003                 drm_dp_atomic_find_time_slots(state, mst_mgr, mst_port,
7004                                               dm_new_connector_state->pbn);
7005         if (dm_new_connector_state->vcpi_slots < 0) {
7006                 DRM_DEBUG_ATOMIC("failed finding vcpi slots: %d\n", (int)dm_new_connector_state->vcpi_slots);
7007                 return dm_new_connector_state->vcpi_slots;
7008         }
7009         return 0;
7010 }
7011
7012 const struct drm_encoder_helper_funcs amdgpu_dm_encoder_helper_funcs = {
7013         .disable = dm_encoder_helper_disable,
7014         .atomic_check = dm_encoder_helper_atomic_check
7015 };
7016
7017 static int dm_update_mst_vcpi_slots_for_dsc(struct drm_atomic_state *state,
7018                                             struct dc_state *dc_state,
7019                                             struct dsc_mst_fairness_vars *vars)
7020 {
7021         struct dc_stream_state *stream = NULL;
7022         struct drm_connector *connector;
7023         struct drm_connector_state *new_con_state;
7024         struct amdgpu_dm_connector *aconnector;
7025         struct dm_connector_state *dm_conn_state;
7026         int i, j, ret;
7027         int vcpi, pbn_div, pbn, slot_num = 0;
7028
7029         for_each_new_connector_in_state(state, connector, new_con_state, i) {
7030
7031                 if (connector->connector_type == DRM_MODE_CONNECTOR_WRITEBACK)
7032                         continue;
7033
7034                 aconnector = to_amdgpu_dm_connector(connector);
7035
7036                 if (!aconnector->mst_output_port)
7037                         continue;
7038
7039                 if (!new_con_state || !new_con_state->crtc)
7040                         continue;
7041
7042                 dm_conn_state = to_dm_connector_state(new_con_state);
7043
7044                 for (j = 0; j < dc_state->stream_count; j++) {
7045                         stream = dc_state->streams[j];
7046                         if (!stream)
7047                                 continue;
7048
7049                         if ((struct amdgpu_dm_connector *)stream->dm_stream_context == aconnector)
7050                                 break;
7051
7052                         stream = NULL;
7053                 }
7054
7055                 if (!stream)
7056                         continue;
7057
7058                 pbn_div = dm_mst_get_pbn_divider(stream->link);
7059                 /* pbn is calculated by compute_mst_dsc_configs_for_state*/
7060                 for (j = 0; j < dc_state->stream_count; j++) {
7061                         if (vars[j].aconnector == aconnector) {
7062                                 pbn = vars[j].pbn;
7063                                 break;
7064                         }
7065                 }
7066
7067                 if (j == dc_state->stream_count)
7068                         continue;
7069
7070                 slot_num = DIV_ROUND_UP(pbn, pbn_div);
7071
7072                 if (stream->timing.flags.DSC != 1) {
7073                         dm_conn_state->pbn = pbn;
7074                         dm_conn_state->vcpi_slots = slot_num;
7075
7076                         ret = drm_dp_mst_atomic_enable_dsc(state, aconnector->mst_output_port,
7077                                                            dm_conn_state->pbn, false);
7078                         if (ret < 0)
7079                                 return ret;
7080
7081                         continue;
7082                 }
7083
7084                 vcpi = drm_dp_mst_atomic_enable_dsc(state, aconnector->mst_output_port, pbn, true);
7085                 if (vcpi < 0)
7086                         return vcpi;
7087
7088                 dm_conn_state->pbn = pbn;
7089                 dm_conn_state->vcpi_slots = vcpi;
7090         }
7091         return 0;
7092 }
7093
7094 static int to_drm_connector_type(enum signal_type st)
7095 {
7096         switch (st) {
7097         case SIGNAL_TYPE_HDMI_TYPE_A:
7098                 return DRM_MODE_CONNECTOR_HDMIA;
7099         case SIGNAL_TYPE_EDP:
7100                 return DRM_MODE_CONNECTOR_eDP;
7101         case SIGNAL_TYPE_LVDS:
7102                 return DRM_MODE_CONNECTOR_LVDS;
7103         case SIGNAL_TYPE_RGB:
7104                 return DRM_MODE_CONNECTOR_VGA;
7105         case SIGNAL_TYPE_DISPLAY_PORT:
7106         case SIGNAL_TYPE_DISPLAY_PORT_MST:
7107                 return DRM_MODE_CONNECTOR_DisplayPort;
7108         case SIGNAL_TYPE_DVI_DUAL_LINK:
7109         case SIGNAL_TYPE_DVI_SINGLE_LINK:
7110                 return DRM_MODE_CONNECTOR_DVID;
7111         case SIGNAL_TYPE_VIRTUAL:
7112                 return DRM_MODE_CONNECTOR_VIRTUAL;
7113
7114         default:
7115                 return DRM_MODE_CONNECTOR_Unknown;
7116         }
7117 }
7118
7119 static struct drm_encoder *amdgpu_dm_connector_to_encoder(struct drm_connector *connector)
7120 {
7121         struct drm_encoder *encoder;
7122
7123         /* There is only one encoder per connector */
7124         drm_connector_for_each_possible_encoder(connector, encoder)
7125                 return encoder;
7126
7127         return NULL;
7128 }
7129
7130 static void amdgpu_dm_get_native_mode(struct drm_connector *connector)
7131 {
7132         struct drm_encoder *encoder;
7133         struct amdgpu_encoder *amdgpu_encoder;
7134
7135         encoder = amdgpu_dm_connector_to_encoder(connector);
7136
7137         if (encoder == NULL)
7138                 return;
7139
7140         amdgpu_encoder = to_amdgpu_encoder(encoder);
7141
7142         amdgpu_encoder->native_mode.clock = 0;
7143
7144         if (!list_empty(&connector->probed_modes)) {
7145                 struct drm_display_mode *preferred_mode = NULL;
7146
7147                 list_for_each_entry(preferred_mode,
7148                                     &connector->probed_modes,
7149                                     head) {
7150                         if (preferred_mode->type & DRM_MODE_TYPE_PREFERRED)
7151                                 amdgpu_encoder->native_mode = *preferred_mode;
7152
7153                         break;
7154                 }
7155
7156         }
7157 }
7158
7159 static struct drm_display_mode *
7160 amdgpu_dm_create_common_mode(struct drm_encoder *encoder,
7161                              char *name,
7162                              int hdisplay, int vdisplay)
7163 {
7164         struct drm_device *dev = encoder->dev;
7165         struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
7166         struct drm_display_mode *mode = NULL;
7167         struct drm_display_mode *native_mode = &amdgpu_encoder->native_mode;
7168
7169         mode = drm_mode_duplicate(dev, native_mode);
7170
7171         if (mode == NULL)
7172                 return NULL;
7173
7174         mode->hdisplay = hdisplay;
7175         mode->vdisplay = vdisplay;
7176         mode->type &= ~DRM_MODE_TYPE_PREFERRED;
7177         strscpy(mode->name, name, DRM_DISPLAY_MODE_LEN);
7178
7179         return mode;
7180
7181 }
7182
7183 static void amdgpu_dm_connector_add_common_modes(struct drm_encoder *encoder,
7184                                                  struct drm_connector *connector)
7185 {
7186         struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
7187         struct drm_display_mode *mode = NULL;
7188         struct drm_display_mode *native_mode = &amdgpu_encoder->native_mode;
7189         struct amdgpu_dm_connector *amdgpu_dm_connector =
7190                                 to_amdgpu_dm_connector(connector);
7191         int i;
7192         int n;
7193         struct mode_size {
7194                 char name[DRM_DISPLAY_MODE_LEN];
7195                 int w;
7196                 int h;
7197         } common_modes[] = {
7198                 {  "640x480",  640,  480},
7199                 {  "800x600",  800,  600},
7200                 { "1024x768", 1024,  768},
7201                 { "1280x720", 1280,  720},
7202                 { "1280x800", 1280,  800},
7203                 {"1280x1024", 1280, 1024},
7204                 { "1440x900", 1440,  900},
7205                 {"1680x1050", 1680, 1050},
7206                 {"1600x1200", 1600, 1200},
7207                 {"1920x1080", 1920, 1080},
7208                 {"1920x1200", 1920, 1200}
7209         };
7210
7211         n = ARRAY_SIZE(common_modes);
7212
7213         for (i = 0; i < n; i++) {
7214                 struct drm_display_mode *curmode = NULL;
7215                 bool mode_existed = false;
7216
7217                 if (common_modes[i].w > native_mode->hdisplay ||
7218                     common_modes[i].h > native_mode->vdisplay ||
7219                    (common_modes[i].w == native_mode->hdisplay &&
7220                     common_modes[i].h == native_mode->vdisplay))
7221                         continue;
7222
7223                 list_for_each_entry(curmode, &connector->probed_modes, head) {
7224                         if (common_modes[i].w == curmode->hdisplay &&
7225                             common_modes[i].h == curmode->vdisplay) {
7226                                 mode_existed = true;
7227                                 break;
7228                         }
7229                 }
7230
7231                 if (mode_existed)
7232                         continue;
7233
7234                 mode = amdgpu_dm_create_common_mode(encoder,
7235                                 common_modes[i].name, common_modes[i].w,
7236                                 common_modes[i].h);
7237                 if (!mode)
7238                         continue;
7239
7240                 drm_mode_probed_add(connector, mode);
7241                 amdgpu_dm_connector->num_modes++;
7242         }
7243 }
7244
7245 static void amdgpu_set_panel_orientation(struct drm_connector *connector)
7246 {
7247         struct drm_encoder *encoder;
7248         struct amdgpu_encoder *amdgpu_encoder;
7249         const struct drm_display_mode *native_mode;
7250
7251         if (connector->connector_type != DRM_MODE_CONNECTOR_eDP &&
7252             connector->connector_type != DRM_MODE_CONNECTOR_LVDS)
7253                 return;
7254
7255         mutex_lock(&connector->dev->mode_config.mutex);
7256         amdgpu_dm_connector_get_modes(connector);
7257         mutex_unlock(&connector->dev->mode_config.mutex);
7258
7259         encoder = amdgpu_dm_connector_to_encoder(connector);
7260         if (!encoder)
7261                 return;
7262
7263         amdgpu_encoder = to_amdgpu_encoder(encoder);
7264
7265         native_mode = &amdgpu_encoder->native_mode;
7266         if (native_mode->hdisplay == 0 || native_mode->vdisplay == 0)
7267                 return;
7268
7269         drm_connector_set_panel_orientation_with_quirk(connector,
7270                                                        DRM_MODE_PANEL_ORIENTATION_UNKNOWN,
7271                                                        native_mode->hdisplay,
7272                                                        native_mode->vdisplay);
7273 }
7274
7275 static void amdgpu_dm_connector_ddc_get_modes(struct drm_connector *connector,
7276                                               struct edid *edid)
7277 {
7278         struct amdgpu_dm_connector *amdgpu_dm_connector =
7279                         to_amdgpu_dm_connector(connector);
7280
7281         if (edid) {
7282                 /* empty probed_modes */
7283                 INIT_LIST_HEAD(&connector->probed_modes);
7284                 amdgpu_dm_connector->num_modes =
7285                                 drm_add_edid_modes(connector, edid);
7286
7287                 /* sorting the probed modes before calling function
7288                  * amdgpu_dm_get_native_mode() since EDID can have
7289                  * more than one preferred mode. The modes that are
7290                  * later in the probed mode list could be of higher
7291                  * and preferred resolution. For example, 3840x2160
7292                  * resolution in base EDID preferred timing and 4096x2160
7293                  * preferred resolution in DID extension block later.
7294                  */
7295                 drm_mode_sort(&connector->probed_modes);
7296                 amdgpu_dm_get_native_mode(connector);
7297
7298                 /* Freesync capabilities are reset by calling
7299                  * drm_add_edid_modes() and need to be
7300                  * restored here.
7301                  */
7302                 amdgpu_dm_update_freesync_caps(connector, edid);
7303         } else {
7304                 amdgpu_dm_connector->num_modes = 0;
7305         }
7306 }
7307
7308 static bool is_duplicate_mode(struct amdgpu_dm_connector *aconnector,
7309                               struct drm_display_mode *mode)
7310 {
7311         struct drm_display_mode *m;
7312
7313         list_for_each_entry(m, &aconnector->base.probed_modes, head) {
7314                 if (drm_mode_equal(m, mode))
7315                         return true;
7316         }
7317
7318         return false;
7319 }
7320
7321 static uint add_fs_modes(struct amdgpu_dm_connector *aconnector)
7322 {
7323         const struct drm_display_mode *m;
7324         struct drm_display_mode *new_mode;
7325         uint i;
7326         u32 new_modes_count = 0;
7327
7328         /* Standard FPS values
7329          *
7330          * 23.976       - TV/NTSC
7331          * 24           - Cinema
7332          * 25           - TV/PAL
7333          * 29.97        - TV/NTSC
7334          * 30           - TV/NTSC
7335          * 48           - Cinema HFR
7336          * 50           - TV/PAL
7337          * 60           - Commonly used
7338          * 48,72,96,120 - Multiples of 24
7339          */
7340         static const u32 common_rates[] = {
7341                 23976, 24000, 25000, 29970, 30000,
7342                 48000, 50000, 60000, 72000, 96000, 120000
7343         };
7344
7345         /*
7346          * Find mode with highest refresh rate with the same resolution
7347          * as the preferred mode. Some monitors report a preferred mode
7348          * with lower resolution than the highest refresh rate supported.
7349          */
7350
7351         m = get_highest_refresh_rate_mode(aconnector, true);
7352         if (!m)
7353                 return 0;
7354
7355         for (i = 0; i < ARRAY_SIZE(common_rates); i++) {
7356                 u64 target_vtotal, target_vtotal_diff;
7357                 u64 num, den;
7358
7359                 if (drm_mode_vrefresh(m) * 1000 < common_rates[i])
7360                         continue;
7361
7362                 if (common_rates[i] < aconnector->min_vfreq * 1000 ||
7363                     common_rates[i] > aconnector->max_vfreq * 1000)
7364                         continue;
7365
7366                 num = (unsigned long long)m->clock * 1000 * 1000;
7367                 den = common_rates[i] * (unsigned long long)m->htotal;
7368                 target_vtotal = div_u64(num, den);
7369                 target_vtotal_diff = target_vtotal - m->vtotal;
7370
7371                 /* Check for illegal modes */
7372                 if (m->vsync_start + target_vtotal_diff < m->vdisplay ||
7373                     m->vsync_end + target_vtotal_diff < m->vsync_start ||
7374                     m->vtotal + target_vtotal_diff < m->vsync_end)
7375                         continue;
7376
7377                 new_mode = drm_mode_duplicate(aconnector->base.dev, m);
7378                 if (!new_mode)
7379                         goto out;
7380
7381                 new_mode->vtotal += (u16)target_vtotal_diff;
7382                 new_mode->vsync_start += (u16)target_vtotal_diff;
7383                 new_mode->vsync_end += (u16)target_vtotal_diff;
7384                 new_mode->type &= ~DRM_MODE_TYPE_PREFERRED;
7385                 new_mode->type |= DRM_MODE_TYPE_DRIVER;
7386
7387                 if (!is_duplicate_mode(aconnector, new_mode)) {
7388                         drm_mode_probed_add(&aconnector->base, new_mode);
7389                         new_modes_count += 1;
7390                 } else
7391                         drm_mode_destroy(aconnector->base.dev, new_mode);
7392         }
7393  out:
7394         return new_modes_count;
7395 }
7396
7397 static void amdgpu_dm_connector_add_freesync_modes(struct drm_connector *connector,
7398                                                    struct edid *edid)
7399 {
7400         struct amdgpu_dm_connector *amdgpu_dm_connector =
7401                 to_amdgpu_dm_connector(connector);
7402
7403         if (!edid)
7404                 return;
7405
7406         if (amdgpu_dm_connector->max_vfreq - amdgpu_dm_connector->min_vfreq > 10)
7407                 amdgpu_dm_connector->num_modes +=
7408                         add_fs_modes(amdgpu_dm_connector);
7409 }
7410
7411 static int amdgpu_dm_connector_get_modes(struct drm_connector *connector)
7412 {
7413         struct amdgpu_dm_connector *amdgpu_dm_connector =
7414                         to_amdgpu_dm_connector(connector);
7415         struct drm_encoder *encoder;
7416         struct edid *edid = amdgpu_dm_connector->edid;
7417         struct dc_link_settings *verified_link_cap =
7418                         &amdgpu_dm_connector->dc_link->verified_link_cap;
7419         const struct dc *dc = amdgpu_dm_connector->dc_link->dc;
7420
7421         encoder = amdgpu_dm_connector_to_encoder(connector);
7422
7423         if (!drm_edid_is_valid(edid)) {
7424                 amdgpu_dm_connector->num_modes =
7425                                 drm_add_modes_noedid(connector, 640, 480);
7426                 if (dc->link_srv->dp_get_encoding_format(verified_link_cap) == DP_128b_132b_ENCODING)
7427                         amdgpu_dm_connector->num_modes +=
7428                                 drm_add_modes_noedid(connector, 1920, 1080);
7429         } else {
7430                 amdgpu_dm_connector_ddc_get_modes(connector, edid);
7431                 amdgpu_dm_connector_add_common_modes(encoder, connector);
7432                 amdgpu_dm_connector_add_freesync_modes(connector, edid);
7433         }
7434         amdgpu_dm_fbc_init(connector);
7435
7436         return amdgpu_dm_connector->num_modes;
7437 }
7438
7439 static const u32 supported_colorspaces =
7440         BIT(DRM_MODE_COLORIMETRY_BT709_YCC) |
7441         BIT(DRM_MODE_COLORIMETRY_OPRGB) |
7442         BIT(DRM_MODE_COLORIMETRY_BT2020_RGB) |
7443         BIT(DRM_MODE_COLORIMETRY_BT2020_YCC);
7444
7445 void amdgpu_dm_connector_init_helper(struct amdgpu_display_manager *dm,
7446                                      struct amdgpu_dm_connector *aconnector,
7447                                      int connector_type,
7448                                      struct dc_link *link,
7449                                      int link_index)
7450 {
7451         struct amdgpu_device *adev = drm_to_adev(dm->ddev);
7452
7453         /*
7454          * Some of the properties below require access to state, like bpc.
7455          * Allocate some default initial connector state with our reset helper.
7456          */
7457         if (aconnector->base.funcs->reset)
7458                 aconnector->base.funcs->reset(&aconnector->base);
7459
7460         aconnector->connector_id = link_index;
7461         aconnector->bl_idx = -1;
7462         aconnector->dc_link = link;
7463         aconnector->base.interlace_allowed = false;
7464         aconnector->base.doublescan_allowed = false;
7465         aconnector->base.stereo_allowed = false;
7466         aconnector->base.dpms = DRM_MODE_DPMS_OFF;
7467         aconnector->hpd.hpd = AMDGPU_HPD_NONE; /* not used */
7468         aconnector->audio_inst = -1;
7469         aconnector->pack_sdp_v1_3 = false;
7470         aconnector->as_type = ADAPTIVE_SYNC_TYPE_NONE;
7471         memset(&aconnector->vsdb_info, 0, sizeof(aconnector->vsdb_info));
7472         mutex_init(&aconnector->hpd_lock);
7473         mutex_init(&aconnector->handle_mst_msg_ready);
7474
7475         /*
7476          * configure support HPD hot plug connector_>polled default value is 0
7477          * which means HPD hot plug not supported
7478          */
7479         switch (connector_type) {
7480         case DRM_MODE_CONNECTOR_HDMIA:
7481                 aconnector->base.polled = DRM_CONNECTOR_POLL_HPD;
7482                 aconnector->base.ycbcr_420_allowed =
7483                         link->link_enc->features.hdmi_ycbcr420_supported ? true : false;
7484                 break;
7485         case DRM_MODE_CONNECTOR_DisplayPort:
7486                 aconnector->base.polled = DRM_CONNECTOR_POLL_HPD;
7487                 link->link_enc = link_enc_cfg_get_link_enc(link);
7488                 ASSERT(link->link_enc);
7489                 if (link->link_enc)
7490                         aconnector->base.ycbcr_420_allowed =
7491                         link->link_enc->features.dp_ycbcr420_supported ? true : false;
7492                 break;
7493         case DRM_MODE_CONNECTOR_DVID:
7494                 aconnector->base.polled = DRM_CONNECTOR_POLL_HPD;
7495                 break;
7496         default:
7497                 break;
7498         }
7499
7500         drm_object_attach_property(&aconnector->base.base,
7501                                 dm->ddev->mode_config.scaling_mode_property,
7502                                 DRM_MODE_SCALE_NONE);
7503
7504         drm_object_attach_property(&aconnector->base.base,
7505                                 adev->mode_info.underscan_property,
7506                                 UNDERSCAN_OFF);
7507         drm_object_attach_property(&aconnector->base.base,
7508                                 adev->mode_info.underscan_hborder_property,
7509                                 0);
7510         drm_object_attach_property(&aconnector->base.base,
7511                                 adev->mode_info.underscan_vborder_property,
7512                                 0);
7513
7514         if (!aconnector->mst_root)
7515                 drm_connector_attach_max_bpc_property(&aconnector->base, 8, 16);
7516
7517         aconnector->base.state->max_bpc = 16;
7518         aconnector->base.state->max_requested_bpc = aconnector->base.state->max_bpc;
7519
7520         if (connector_type == DRM_MODE_CONNECTOR_eDP &&
7521             (dc_is_dmcu_initialized(adev->dm.dc) || adev->dm.dc->ctx->dmub_srv)) {
7522                 drm_object_attach_property(&aconnector->base.base,
7523                                 adev->mode_info.abm_level_property, 0);
7524         }
7525
7526         if (connector_type == DRM_MODE_CONNECTOR_HDMIA) {
7527                 /* Content Type is currently only implemented for HDMI. */
7528                 drm_connector_attach_content_type_property(&aconnector->base);
7529         }
7530
7531         if (connector_type == DRM_MODE_CONNECTOR_HDMIA) {
7532                 if (!drm_mode_create_hdmi_colorspace_property(&aconnector->base, supported_colorspaces))
7533                         drm_connector_attach_colorspace_property(&aconnector->base);
7534         } else if ((connector_type == DRM_MODE_CONNECTOR_DisplayPort && !aconnector->mst_root) ||
7535                    connector_type == DRM_MODE_CONNECTOR_eDP) {
7536                 if (!drm_mode_create_dp_colorspace_property(&aconnector->base, supported_colorspaces))
7537                         drm_connector_attach_colorspace_property(&aconnector->base);
7538         }
7539
7540         if (connector_type == DRM_MODE_CONNECTOR_HDMIA ||
7541             connector_type == DRM_MODE_CONNECTOR_DisplayPort ||
7542             connector_type == DRM_MODE_CONNECTOR_eDP) {
7543                 drm_connector_attach_hdr_output_metadata_property(&aconnector->base);
7544
7545                 if (!aconnector->mst_root)
7546                         drm_connector_attach_vrr_capable_property(&aconnector->base);
7547
7548                 if (adev->dm.hdcp_workqueue)
7549                         drm_connector_attach_content_protection_property(&aconnector->base, true);
7550         }
7551 }
7552
7553 static int amdgpu_dm_i2c_xfer(struct i2c_adapter *i2c_adap,
7554                               struct i2c_msg *msgs, int num)
7555 {
7556         struct amdgpu_i2c_adapter *i2c = i2c_get_adapdata(i2c_adap);
7557         struct ddc_service *ddc_service = i2c->ddc_service;
7558         struct i2c_command cmd;
7559         int i;
7560         int result = -EIO;
7561
7562         if (!ddc_service->ddc_pin || !ddc_service->ddc_pin->hw_info.hw_supported)
7563                 return result;
7564
7565         cmd.payloads = kcalloc(num, sizeof(struct i2c_payload), GFP_KERNEL);
7566
7567         if (!cmd.payloads)
7568                 return result;
7569
7570         cmd.number_of_payloads = num;
7571         cmd.engine = I2C_COMMAND_ENGINE_DEFAULT;
7572         cmd.speed = 100;
7573
7574         for (i = 0; i < num; i++) {
7575                 cmd.payloads[i].write = !(msgs[i].flags & I2C_M_RD);
7576                 cmd.payloads[i].address = msgs[i].addr;
7577                 cmd.payloads[i].length = msgs[i].len;
7578                 cmd.payloads[i].data = msgs[i].buf;
7579         }
7580
7581         if (dc_submit_i2c(
7582                         ddc_service->ctx->dc,
7583                         ddc_service->link->link_index,
7584                         &cmd))
7585                 result = num;
7586
7587         kfree(cmd.payloads);
7588         return result;
7589 }
7590
7591 static u32 amdgpu_dm_i2c_func(struct i2c_adapter *adap)
7592 {
7593         return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL;
7594 }
7595
7596 static const struct i2c_algorithm amdgpu_dm_i2c_algo = {
7597         .master_xfer = amdgpu_dm_i2c_xfer,
7598         .functionality = amdgpu_dm_i2c_func,
7599 };
7600
7601 static struct amdgpu_i2c_adapter *
7602 create_i2c(struct ddc_service *ddc_service,
7603            int link_index,
7604            int *res)
7605 {
7606         struct amdgpu_device *adev = ddc_service->ctx->driver_context;
7607         struct amdgpu_i2c_adapter *i2c;
7608
7609         i2c = kzalloc(sizeof(struct amdgpu_i2c_adapter), GFP_KERNEL);
7610         if (!i2c)
7611                 return NULL;
7612         i2c->base.owner = THIS_MODULE;
7613         i2c->base.class = I2C_CLASS_DDC;
7614         i2c->base.dev.parent = &adev->pdev->dev;
7615         i2c->base.algo = &amdgpu_dm_i2c_algo;
7616         snprintf(i2c->base.name, sizeof(i2c->base.name), "AMDGPU DM i2c hw bus %d", link_index);
7617         i2c_set_adapdata(&i2c->base, i2c);
7618         i2c->ddc_service = ddc_service;
7619
7620         return i2c;
7621 }
7622
7623
7624 /*
7625  * Note: this function assumes that dc_link_detect() was called for the
7626  * dc_link which will be represented by this aconnector.
7627  */
7628 static int amdgpu_dm_connector_init(struct amdgpu_display_manager *dm,
7629                                     struct amdgpu_dm_connector *aconnector,
7630                                     u32 link_index,
7631                                     struct amdgpu_encoder *aencoder)
7632 {
7633         int res = 0;
7634         int connector_type;
7635         struct dc *dc = dm->dc;
7636         struct dc_link *link = dc_get_link_at_index(dc, link_index);
7637         struct amdgpu_i2c_adapter *i2c;
7638
7639         /* Not needed for writeback connector */
7640         link->priv = aconnector;
7641
7642
7643         i2c = create_i2c(link->ddc, link->link_index, &res);
7644         if (!i2c) {
7645                 DRM_ERROR("Failed to create i2c adapter data\n");
7646                 return -ENOMEM;
7647         }
7648
7649         aconnector->i2c = i2c;
7650         res = i2c_add_adapter(&i2c->base);
7651
7652         if (res) {
7653                 DRM_ERROR("Failed to register hw i2c %d\n", link->link_index);
7654                 goto out_free;
7655         }
7656
7657         connector_type = to_drm_connector_type(link->connector_signal);
7658
7659         res = drm_connector_init_with_ddc(
7660                         dm->ddev,
7661                         &aconnector->base,
7662                         &amdgpu_dm_connector_funcs,
7663                         connector_type,
7664                         &i2c->base);
7665
7666         if (res) {
7667                 DRM_ERROR("connector_init failed\n");
7668                 aconnector->connector_id = -1;
7669                 goto out_free;
7670         }
7671
7672         drm_connector_helper_add(
7673                         &aconnector->base,
7674                         &amdgpu_dm_connector_helper_funcs);
7675
7676         amdgpu_dm_connector_init_helper(
7677                 dm,
7678                 aconnector,
7679                 connector_type,
7680                 link,
7681                 link_index);
7682
7683         drm_connector_attach_encoder(
7684                 &aconnector->base, &aencoder->base);
7685
7686         if (connector_type == DRM_MODE_CONNECTOR_DisplayPort
7687                 || connector_type == DRM_MODE_CONNECTOR_eDP)
7688                 amdgpu_dm_initialize_dp_connector(dm, aconnector, link->link_index);
7689
7690 out_free:
7691         if (res) {
7692                 kfree(i2c);
7693                 aconnector->i2c = NULL;
7694         }
7695         return res;
7696 }
7697
7698 int amdgpu_dm_get_encoder_crtc_mask(struct amdgpu_device *adev)
7699 {
7700         switch (adev->mode_info.num_crtc) {
7701         case 1:
7702                 return 0x1;
7703         case 2:
7704                 return 0x3;
7705         case 3:
7706                 return 0x7;
7707         case 4:
7708                 return 0xf;
7709         case 5:
7710                 return 0x1f;
7711         case 6:
7712         default:
7713                 return 0x3f;
7714         }
7715 }
7716
7717 static int amdgpu_dm_encoder_init(struct drm_device *dev,
7718                                   struct amdgpu_encoder *aencoder,
7719                                   uint32_t link_index)
7720 {
7721         struct amdgpu_device *adev = drm_to_adev(dev);
7722
7723         int res = drm_encoder_init(dev,
7724                                    &aencoder->base,
7725                                    &amdgpu_dm_encoder_funcs,
7726                                    DRM_MODE_ENCODER_TMDS,
7727                                    NULL);
7728
7729         aencoder->base.possible_crtcs = amdgpu_dm_get_encoder_crtc_mask(adev);
7730
7731         if (!res)
7732                 aencoder->encoder_id = link_index;
7733         else
7734                 aencoder->encoder_id = -1;
7735
7736         drm_encoder_helper_add(&aencoder->base, &amdgpu_dm_encoder_helper_funcs);
7737
7738         return res;
7739 }
7740
7741 static void manage_dm_interrupts(struct amdgpu_device *adev,
7742                                  struct amdgpu_crtc *acrtc,
7743                                  bool enable)
7744 {
7745         /*
7746          * We have no guarantee that the frontend index maps to the same
7747          * backend index - some even map to more than one.
7748          *
7749          * TODO: Use a different interrupt or check DC itself for the mapping.
7750          */
7751         int irq_type =
7752                 amdgpu_display_crtc_idx_to_irq_type(
7753                         adev,
7754                         acrtc->crtc_id);
7755
7756         if (enable) {
7757                 drm_crtc_vblank_on(&acrtc->base);
7758                 amdgpu_irq_get(
7759                         adev,
7760                         &adev->pageflip_irq,
7761                         irq_type);
7762 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
7763                 amdgpu_irq_get(
7764                         adev,
7765                         &adev->vline0_irq,
7766                         irq_type);
7767 #endif
7768         } else {
7769 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
7770                 amdgpu_irq_put(
7771                         adev,
7772                         &adev->vline0_irq,
7773                         irq_type);
7774 #endif
7775                 amdgpu_irq_put(
7776                         adev,
7777                         &adev->pageflip_irq,
7778                         irq_type);
7779                 drm_crtc_vblank_off(&acrtc->base);
7780         }
7781 }
7782
7783 static void dm_update_pflip_irq_state(struct amdgpu_device *adev,
7784                                       struct amdgpu_crtc *acrtc)
7785 {
7786         int irq_type =
7787                 amdgpu_display_crtc_idx_to_irq_type(adev, acrtc->crtc_id);
7788
7789         /**
7790          * This reads the current state for the IRQ and force reapplies
7791          * the setting to hardware.
7792          */
7793         amdgpu_irq_update(adev, &adev->pageflip_irq, irq_type);
7794 }
7795
7796 static bool
7797 is_scaling_state_different(const struct dm_connector_state *dm_state,
7798                            const struct dm_connector_state *old_dm_state)
7799 {
7800         if (dm_state->scaling != old_dm_state->scaling)
7801                 return true;
7802         if (!dm_state->underscan_enable && old_dm_state->underscan_enable) {
7803                 if (old_dm_state->underscan_hborder != 0 && old_dm_state->underscan_vborder != 0)
7804                         return true;
7805         } else  if (dm_state->underscan_enable && !old_dm_state->underscan_enable) {
7806                 if (dm_state->underscan_hborder != 0 && dm_state->underscan_vborder != 0)
7807                         return true;
7808         } else if (dm_state->underscan_hborder != old_dm_state->underscan_hborder ||
7809                    dm_state->underscan_vborder != old_dm_state->underscan_vborder)
7810                 return true;
7811         return false;
7812 }
7813
7814 static bool is_content_protection_different(struct drm_crtc_state *new_crtc_state,
7815                                             struct drm_crtc_state *old_crtc_state,
7816                                             struct drm_connector_state *new_conn_state,
7817                                             struct drm_connector_state *old_conn_state,
7818                                             const struct drm_connector *connector,
7819                                             struct hdcp_workqueue *hdcp_w)
7820 {
7821         struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
7822         struct dm_connector_state *dm_con_state = to_dm_connector_state(connector->state);
7823
7824         pr_debug("[HDCP_DM] connector->index: %x connect_status: %x dpms: %x\n",
7825                 connector->index, connector->status, connector->dpms);
7826         pr_debug("[HDCP_DM] state protection old: %x new: %x\n",
7827                 old_conn_state->content_protection, new_conn_state->content_protection);
7828
7829         if (old_crtc_state)
7830                 pr_debug("[HDCP_DM] old crtc en: %x a: %x m: %x a-chg: %x c-chg: %x\n",
7831                 old_crtc_state->enable,
7832                 old_crtc_state->active,
7833                 old_crtc_state->mode_changed,
7834                 old_crtc_state->active_changed,
7835                 old_crtc_state->connectors_changed);
7836
7837         if (new_crtc_state)
7838                 pr_debug("[HDCP_DM] NEW crtc en: %x a: %x m: %x a-chg: %x c-chg: %x\n",
7839                 new_crtc_state->enable,
7840                 new_crtc_state->active,
7841                 new_crtc_state->mode_changed,
7842                 new_crtc_state->active_changed,
7843                 new_crtc_state->connectors_changed);
7844
7845         /* hdcp content type change */
7846         if (old_conn_state->hdcp_content_type != new_conn_state->hdcp_content_type &&
7847             new_conn_state->content_protection != DRM_MODE_CONTENT_PROTECTION_UNDESIRED) {
7848                 new_conn_state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED;
7849                 pr_debug("[HDCP_DM] Type0/1 change %s :true\n", __func__);
7850                 return true;
7851         }
7852
7853         /* CP is being re enabled, ignore this */
7854         if (old_conn_state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED &&
7855             new_conn_state->content_protection == DRM_MODE_CONTENT_PROTECTION_DESIRED) {
7856                 if (new_crtc_state && new_crtc_state->mode_changed) {
7857                         new_conn_state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED;
7858                         pr_debug("[HDCP_DM] ENABLED->DESIRED & mode_changed %s :true\n", __func__);
7859                         return true;
7860                 }
7861                 new_conn_state->content_protection = DRM_MODE_CONTENT_PROTECTION_ENABLED;
7862                 pr_debug("[HDCP_DM] ENABLED -> DESIRED %s :false\n", __func__);
7863                 return false;
7864         }
7865
7866         /* S3 resume case, since old state will always be 0 (UNDESIRED) and the restored state will be ENABLED
7867          *
7868          * Handles:     UNDESIRED -> ENABLED
7869          */
7870         if (old_conn_state->content_protection == DRM_MODE_CONTENT_PROTECTION_UNDESIRED &&
7871             new_conn_state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED)
7872                 new_conn_state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED;
7873
7874         /* Stream removed and re-enabled
7875          *
7876          * Can sometimes overlap with the HPD case,
7877          * thus set update_hdcp to false to avoid
7878          * setting HDCP multiple times.
7879          *
7880          * Handles:     DESIRED -> DESIRED (Special case)
7881          */
7882         if (!(old_conn_state->crtc && old_conn_state->crtc->enabled) &&
7883                 new_conn_state->crtc && new_conn_state->crtc->enabled &&
7884                 connector->state->content_protection == DRM_MODE_CONTENT_PROTECTION_DESIRED) {
7885                 dm_con_state->update_hdcp = false;
7886                 pr_debug("[HDCP_DM] DESIRED->DESIRED (Stream removed and re-enabled) %s :true\n",
7887                         __func__);
7888                 return true;
7889         }
7890
7891         /* Hot-plug, headless s3, dpms
7892          *
7893          * Only start HDCP if the display is connected/enabled.
7894          * update_hdcp flag will be set to false until the next
7895          * HPD comes in.
7896          *
7897          * Handles:     DESIRED -> DESIRED (Special case)
7898          */
7899         if (dm_con_state->update_hdcp &&
7900         new_conn_state->content_protection == DRM_MODE_CONTENT_PROTECTION_DESIRED &&
7901         connector->dpms == DRM_MODE_DPMS_ON && aconnector->dc_sink != NULL) {
7902                 dm_con_state->update_hdcp = false;
7903                 pr_debug("[HDCP_DM] DESIRED->DESIRED (Hot-plug, headless s3, dpms) %s :true\n",
7904                         __func__);
7905                 return true;
7906         }
7907
7908         if (old_conn_state->content_protection == new_conn_state->content_protection) {
7909                 if (new_conn_state->content_protection >= DRM_MODE_CONTENT_PROTECTION_DESIRED) {
7910                         if (new_crtc_state && new_crtc_state->mode_changed) {
7911                                 pr_debug("[HDCP_DM] DESIRED->DESIRED or ENABLE->ENABLE mode_change %s :true\n",
7912                                         __func__);
7913                                 return true;
7914                         }
7915                         pr_debug("[HDCP_DM] DESIRED->DESIRED & ENABLE->ENABLE %s :false\n",
7916                                 __func__);
7917                         return false;
7918                 }
7919
7920                 pr_debug("[HDCP_DM] UNDESIRED->UNDESIRED %s :false\n", __func__);
7921                 return false;
7922         }
7923
7924         if (new_conn_state->content_protection != DRM_MODE_CONTENT_PROTECTION_ENABLED) {
7925                 pr_debug("[HDCP_DM] UNDESIRED->DESIRED or DESIRED->UNDESIRED or ENABLED->UNDESIRED %s :true\n",
7926                         __func__);
7927                 return true;
7928         }
7929
7930         pr_debug("[HDCP_DM] DESIRED->ENABLED %s :false\n", __func__);
7931         return false;
7932 }
7933
7934 static void remove_stream(struct amdgpu_device *adev,
7935                           struct amdgpu_crtc *acrtc,
7936                           struct dc_stream_state *stream)
7937 {
7938         /* this is the update mode case */
7939
7940         acrtc->otg_inst = -1;
7941         acrtc->enabled = false;
7942 }
7943
7944 static void prepare_flip_isr(struct amdgpu_crtc *acrtc)
7945 {
7946
7947         assert_spin_locked(&acrtc->base.dev->event_lock);
7948         WARN_ON(acrtc->event);
7949
7950         acrtc->event = acrtc->base.state->event;
7951
7952         /* Set the flip status */
7953         acrtc->pflip_status = AMDGPU_FLIP_SUBMITTED;
7954
7955         /* Mark this event as consumed */
7956         acrtc->base.state->event = NULL;
7957
7958         drm_dbg_state(acrtc->base.dev,
7959                       "crtc:%d, pflip_stat:AMDGPU_FLIP_SUBMITTED\n",
7960                       acrtc->crtc_id);
7961 }
7962
7963 static void update_freesync_state_on_stream(
7964         struct amdgpu_display_manager *dm,
7965         struct dm_crtc_state *new_crtc_state,
7966         struct dc_stream_state *new_stream,
7967         struct dc_plane_state *surface,
7968         u32 flip_timestamp_in_us)
7969 {
7970         struct mod_vrr_params vrr_params;
7971         struct dc_info_packet vrr_infopacket = {0};
7972         struct amdgpu_device *adev = dm->adev;
7973         struct amdgpu_crtc *acrtc = to_amdgpu_crtc(new_crtc_state->base.crtc);
7974         unsigned long flags;
7975         bool pack_sdp_v1_3 = false;
7976         struct amdgpu_dm_connector *aconn;
7977         enum vrr_packet_type packet_type = PACKET_TYPE_VRR;
7978
7979         if (!new_stream)
7980                 return;
7981
7982         /*
7983          * TODO: Determine why min/max totals and vrefresh can be 0 here.
7984          * For now it's sufficient to just guard against these conditions.
7985          */
7986
7987         if (!new_stream->timing.h_total || !new_stream->timing.v_total)
7988                 return;
7989
7990         spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
7991         vrr_params = acrtc->dm_irq_params.vrr_params;
7992
7993         if (surface) {
7994                 mod_freesync_handle_preflip(
7995                         dm->freesync_module,
7996                         surface,
7997                         new_stream,
7998                         flip_timestamp_in_us,
7999                         &vrr_params);
8000
8001                 if (adev->family < AMDGPU_FAMILY_AI &&
8002                     amdgpu_dm_crtc_vrr_active(new_crtc_state)) {
8003                         mod_freesync_handle_v_update(dm->freesync_module,
8004                                                      new_stream, &vrr_params);
8005
8006                         /* Need to call this before the frame ends. */
8007                         dc_stream_adjust_vmin_vmax(dm->dc,
8008                                                    new_crtc_state->stream,
8009                                                    &vrr_params.adjust);
8010                 }
8011         }
8012
8013         aconn = (struct amdgpu_dm_connector *)new_stream->dm_stream_context;
8014
8015         if (aconn && (aconn->as_type == FREESYNC_TYPE_PCON_IN_WHITELIST || aconn->vsdb_info.replay_mode)) {
8016                 pack_sdp_v1_3 = aconn->pack_sdp_v1_3;
8017
8018                 if (aconn->vsdb_info.amd_vsdb_version == 1)
8019                         packet_type = PACKET_TYPE_FS_V1;
8020                 else if (aconn->vsdb_info.amd_vsdb_version == 2)
8021                         packet_type = PACKET_TYPE_FS_V2;
8022                 else if (aconn->vsdb_info.amd_vsdb_version == 3)
8023                         packet_type = PACKET_TYPE_FS_V3;
8024
8025                 mod_build_adaptive_sync_infopacket(new_stream, aconn->as_type, NULL,
8026                                         &new_stream->adaptive_sync_infopacket);
8027         }
8028
8029         mod_freesync_build_vrr_infopacket(
8030                 dm->freesync_module,
8031                 new_stream,
8032                 &vrr_params,
8033                 packet_type,
8034                 TRANSFER_FUNC_UNKNOWN,
8035                 &vrr_infopacket,
8036                 pack_sdp_v1_3);
8037
8038         new_crtc_state->freesync_vrr_info_changed |=
8039                 (memcmp(&new_crtc_state->vrr_infopacket,
8040                         &vrr_infopacket,
8041                         sizeof(vrr_infopacket)) != 0);
8042
8043         acrtc->dm_irq_params.vrr_params = vrr_params;
8044         new_crtc_state->vrr_infopacket = vrr_infopacket;
8045
8046         new_stream->vrr_infopacket = vrr_infopacket;
8047         new_stream->allow_freesync = mod_freesync_get_freesync_enabled(&vrr_params);
8048
8049         if (new_crtc_state->freesync_vrr_info_changed)
8050                 DRM_DEBUG_KMS("VRR packet update: crtc=%u enabled=%d state=%d",
8051                               new_crtc_state->base.crtc->base.id,
8052                               (int)new_crtc_state->base.vrr_enabled,
8053                               (int)vrr_params.state);
8054
8055         spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
8056 }
8057
8058 static void update_stream_irq_parameters(
8059         struct amdgpu_display_manager *dm,
8060         struct dm_crtc_state *new_crtc_state)
8061 {
8062         struct dc_stream_state *new_stream = new_crtc_state->stream;
8063         struct mod_vrr_params vrr_params;
8064         struct mod_freesync_config config = new_crtc_state->freesync_config;
8065         struct amdgpu_device *adev = dm->adev;
8066         struct amdgpu_crtc *acrtc = to_amdgpu_crtc(new_crtc_state->base.crtc);
8067         unsigned long flags;
8068
8069         if (!new_stream)
8070                 return;
8071
8072         /*
8073          * TODO: Determine why min/max totals and vrefresh can be 0 here.
8074          * For now it's sufficient to just guard against these conditions.
8075          */
8076         if (!new_stream->timing.h_total || !new_stream->timing.v_total)
8077                 return;
8078
8079         spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
8080         vrr_params = acrtc->dm_irq_params.vrr_params;
8081
8082         if (new_crtc_state->vrr_supported &&
8083             config.min_refresh_in_uhz &&
8084             config.max_refresh_in_uhz) {
8085                 /*
8086                  * if freesync compatible mode was set, config.state will be set
8087                  * in atomic check
8088                  */
8089                 if (config.state == VRR_STATE_ACTIVE_FIXED && config.fixed_refresh_in_uhz &&
8090                     (!drm_atomic_crtc_needs_modeset(&new_crtc_state->base) ||
8091                      new_crtc_state->freesync_config.state == VRR_STATE_ACTIVE_FIXED)) {
8092                         vrr_params.max_refresh_in_uhz = config.max_refresh_in_uhz;
8093                         vrr_params.min_refresh_in_uhz = config.min_refresh_in_uhz;
8094                         vrr_params.fixed_refresh_in_uhz = config.fixed_refresh_in_uhz;
8095                         vrr_params.state = VRR_STATE_ACTIVE_FIXED;
8096                 } else {
8097                         config.state = new_crtc_state->base.vrr_enabled ?
8098                                                      VRR_STATE_ACTIVE_VARIABLE :
8099                                                      VRR_STATE_INACTIVE;
8100                 }
8101         } else {
8102                 config.state = VRR_STATE_UNSUPPORTED;
8103         }
8104
8105         mod_freesync_build_vrr_params(dm->freesync_module,
8106                                       new_stream,
8107                                       &config, &vrr_params);
8108
8109         new_crtc_state->freesync_config = config;
8110         /* Copy state for access from DM IRQ handler */
8111         acrtc->dm_irq_params.freesync_config = config;
8112         acrtc->dm_irq_params.active_planes = new_crtc_state->active_planes;
8113         acrtc->dm_irq_params.vrr_params = vrr_params;
8114         spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
8115 }
8116
8117 static void amdgpu_dm_handle_vrr_transition(struct dm_crtc_state *old_state,
8118                                             struct dm_crtc_state *new_state)
8119 {
8120         bool old_vrr_active = amdgpu_dm_crtc_vrr_active(old_state);
8121         bool new_vrr_active = amdgpu_dm_crtc_vrr_active(new_state);
8122
8123         if (!old_vrr_active && new_vrr_active) {
8124                 /* Transition VRR inactive -> active:
8125                  * While VRR is active, we must not disable vblank irq, as a
8126                  * reenable after disable would compute bogus vblank/pflip
8127                  * timestamps if it likely happened inside display front-porch.
8128                  *
8129                  * We also need vupdate irq for the actual core vblank handling
8130                  * at end of vblank.
8131                  */
8132                 WARN_ON(amdgpu_dm_crtc_set_vupdate_irq(new_state->base.crtc, true) != 0);
8133                 WARN_ON(drm_crtc_vblank_get(new_state->base.crtc) != 0);
8134                 DRM_DEBUG_DRIVER("%s: crtc=%u VRR off->on: Get vblank ref\n",
8135                                  __func__, new_state->base.crtc->base.id);
8136         } else if (old_vrr_active && !new_vrr_active) {
8137                 /* Transition VRR active -> inactive:
8138                  * Allow vblank irq disable again for fixed refresh rate.
8139                  */
8140                 WARN_ON(amdgpu_dm_crtc_set_vupdate_irq(new_state->base.crtc, false) != 0);
8141                 drm_crtc_vblank_put(new_state->base.crtc);
8142                 DRM_DEBUG_DRIVER("%s: crtc=%u VRR on->off: Drop vblank ref\n",
8143                                  __func__, new_state->base.crtc->base.id);
8144         }
8145 }
8146
8147 static void amdgpu_dm_commit_cursors(struct drm_atomic_state *state)
8148 {
8149         struct drm_plane *plane;
8150         struct drm_plane_state *old_plane_state;
8151         int i;
8152
8153         /*
8154          * TODO: Make this per-stream so we don't issue redundant updates for
8155          * commits with multiple streams.
8156          */
8157         for_each_old_plane_in_state(state, plane, old_plane_state, i)
8158                 if (plane->type == DRM_PLANE_TYPE_CURSOR)
8159                         amdgpu_dm_plane_handle_cursor_update(plane, old_plane_state);
8160 }
8161
8162 static inline uint32_t get_mem_type(struct drm_framebuffer *fb)
8163 {
8164         struct amdgpu_bo *abo = gem_to_amdgpu_bo(fb->obj[0]);
8165
8166         return abo->tbo.resource ? abo->tbo.resource->mem_type : 0;
8167 }
8168
8169 static void amdgpu_dm_commit_planes(struct drm_atomic_state *state,
8170                                     struct drm_device *dev,
8171                                     struct amdgpu_display_manager *dm,
8172                                     struct drm_crtc *pcrtc,
8173                                     bool wait_for_vblank)
8174 {
8175         u32 i;
8176         u64 timestamp_ns = ktime_get_ns();
8177         struct drm_plane *plane;
8178         struct drm_plane_state *old_plane_state, *new_plane_state;
8179         struct amdgpu_crtc *acrtc_attach = to_amdgpu_crtc(pcrtc);
8180         struct drm_crtc_state *new_pcrtc_state =
8181                         drm_atomic_get_new_crtc_state(state, pcrtc);
8182         struct dm_crtc_state *acrtc_state = to_dm_crtc_state(new_pcrtc_state);
8183         struct dm_crtc_state *dm_old_crtc_state =
8184                         to_dm_crtc_state(drm_atomic_get_old_crtc_state(state, pcrtc));
8185         int planes_count = 0, vpos, hpos;
8186         unsigned long flags;
8187         u32 target_vblank, last_flip_vblank;
8188         bool vrr_active = amdgpu_dm_crtc_vrr_active(acrtc_state);
8189         bool cursor_update = false;
8190         bool pflip_present = false;
8191         bool dirty_rects_changed = false;
8192         struct {
8193                 struct dc_surface_update surface_updates[MAX_SURFACES];
8194                 struct dc_plane_info plane_infos[MAX_SURFACES];
8195                 struct dc_scaling_info scaling_infos[MAX_SURFACES];
8196                 struct dc_flip_addrs flip_addrs[MAX_SURFACES];
8197                 struct dc_stream_update stream_update;
8198         } *bundle;
8199
8200         bundle = kzalloc(sizeof(*bundle), GFP_KERNEL);
8201
8202         if (!bundle) {
8203                 drm_err(dev, "Failed to allocate update bundle\n");
8204                 goto cleanup;
8205         }
8206
8207         /*
8208          * Disable the cursor first if we're disabling all the planes.
8209          * It'll remain on the screen after the planes are re-enabled
8210          * if we don't.
8211          */
8212         if (acrtc_state->active_planes == 0)
8213                 amdgpu_dm_commit_cursors(state);
8214
8215         /* update planes when needed */
8216         for_each_oldnew_plane_in_state(state, plane, old_plane_state, new_plane_state, i) {
8217                 struct drm_crtc *crtc = new_plane_state->crtc;
8218                 struct drm_crtc_state *new_crtc_state;
8219                 struct drm_framebuffer *fb = new_plane_state->fb;
8220                 struct amdgpu_framebuffer *afb = (struct amdgpu_framebuffer *)fb;
8221                 bool plane_needs_flip;
8222                 struct dc_plane_state *dc_plane;
8223                 struct dm_plane_state *dm_new_plane_state = to_dm_plane_state(new_plane_state);
8224
8225                 /* Cursor plane is handled after stream updates */
8226                 if (plane->type == DRM_PLANE_TYPE_CURSOR) {
8227                         if ((fb && crtc == pcrtc) ||
8228                             (old_plane_state->fb && old_plane_state->crtc == pcrtc))
8229                                 cursor_update = true;
8230
8231                         continue;
8232                 }
8233
8234                 if (!fb || !crtc || pcrtc != crtc)
8235                         continue;
8236
8237                 new_crtc_state = drm_atomic_get_new_crtc_state(state, crtc);
8238                 if (!new_crtc_state->active)
8239                         continue;
8240
8241                 dc_plane = dm_new_plane_state->dc_state;
8242                 if (!dc_plane)
8243                         continue;
8244
8245                 bundle->surface_updates[planes_count].surface = dc_plane;
8246                 if (new_pcrtc_state->color_mgmt_changed) {
8247                         bundle->surface_updates[planes_count].gamma = dc_plane->gamma_correction;
8248                         bundle->surface_updates[planes_count].in_transfer_func = dc_plane->in_transfer_func;
8249                         bundle->surface_updates[planes_count].gamut_remap_matrix = &dc_plane->gamut_remap_matrix;
8250                 }
8251
8252                 amdgpu_dm_plane_fill_dc_scaling_info(dm->adev, new_plane_state,
8253                                      &bundle->scaling_infos[planes_count]);
8254
8255                 bundle->surface_updates[planes_count].scaling_info =
8256                         &bundle->scaling_infos[planes_count];
8257
8258                 plane_needs_flip = old_plane_state->fb && new_plane_state->fb;
8259
8260                 pflip_present = pflip_present || plane_needs_flip;
8261
8262                 if (!plane_needs_flip) {
8263                         planes_count += 1;
8264                         continue;
8265                 }
8266
8267                 fill_dc_plane_info_and_addr(
8268                         dm->adev, new_plane_state,
8269                         afb->tiling_flags,
8270                         &bundle->plane_infos[planes_count],
8271                         &bundle->flip_addrs[planes_count].address,
8272                         afb->tmz_surface, false);
8273
8274                 drm_dbg_state(state->dev, "plane: id=%d dcc_en=%d\n",
8275                                  new_plane_state->plane->index,
8276                                  bundle->plane_infos[planes_count].dcc.enable);
8277
8278                 bundle->surface_updates[planes_count].plane_info =
8279                         &bundle->plane_infos[planes_count];
8280
8281                 if (acrtc_state->stream->link->psr_settings.psr_feature_enabled ||
8282                     acrtc_state->stream->link->replay_settings.replay_feature_enabled) {
8283                         fill_dc_dirty_rects(plane, old_plane_state,
8284                                             new_plane_state, new_crtc_state,
8285                                             &bundle->flip_addrs[planes_count],
8286                                             &dirty_rects_changed);
8287
8288                         /*
8289                          * If the dirty regions changed, PSR-SU need to be disabled temporarily
8290                          * and enabled it again after dirty regions are stable to avoid video glitch.
8291                          * PSR-SU will be enabled in vblank_control_worker() if user pause the video
8292                          * during the PSR-SU was disabled.
8293                          */
8294                         if (acrtc_state->stream->link->psr_settings.psr_version >= DC_PSR_VERSION_SU_1 &&
8295                             acrtc_attach->dm_irq_params.allow_psr_entry &&
8296 #ifdef CONFIG_DRM_AMD_SECURE_DISPLAY
8297                             !amdgpu_dm_crc_window_is_activated(acrtc_state->base.crtc) &&
8298 #endif
8299                             dirty_rects_changed) {
8300                                 mutex_lock(&dm->dc_lock);
8301                                 acrtc_state->stream->link->psr_settings.psr_dirty_rects_change_timestamp_ns =
8302                                 timestamp_ns;
8303                                 if (acrtc_state->stream->link->psr_settings.psr_allow_active)
8304                                         amdgpu_dm_psr_disable(acrtc_state->stream);
8305                                 mutex_unlock(&dm->dc_lock);
8306                         }
8307                 }
8308
8309                 /*
8310                  * Only allow immediate flips for fast updates that don't
8311                  * change memory domain, FB pitch, DCC state, rotation or
8312                  * mirroring.
8313                  *
8314                  * dm_crtc_helper_atomic_check() only accepts async flips with
8315                  * fast updates.
8316                  */
8317                 if (crtc->state->async_flip &&
8318                     (acrtc_state->update_type != UPDATE_TYPE_FAST ||
8319                      get_mem_type(old_plane_state->fb) != get_mem_type(fb)))
8320                         drm_warn_once(state->dev,
8321                                       "[PLANE:%d:%s] async flip with non-fast update\n",
8322                                       plane->base.id, plane->name);
8323
8324                 bundle->flip_addrs[planes_count].flip_immediate =
8325                         crtc->state->async_flip &&
8326                         acrtc_state->update_type == UPDATE_TYPE_FAST &&
8327                         get_mem_type(old_plane_state->fb) == get_mem_type(fb);
8328
8329                 timestamp_ns = ktime_get_ns();
8330                 bundle->flip_addrs[planes_count].flip_timestamp_in_us = div_u64(timestamp_ns, 1000);
8331                 bundle->surface_updates[planes_count].flip_addr = &bundle->flip_addrs[planes_count];
8332                 bundle->surface_updates[planes_count].surface = dc_plane;
8333
8334                 if (!bundle->surface_updates[planes_count].surface) {
8335                         DRM_ERROR("No surface for CRTC: id=%d\n",
8336                                         acrtc_attach->crtc_id);
8337                         continue;
8338                 }
8339
8340                 if (plane == pcrtc->primary)
8341                         update_freesync_state_on_stream(
8342                                 dm,
8343                                 acrtc_state,
8344                                 acrtc_state->stream,
8345                                 dc_plane,
8346                                 bundle->flip_addrs[planes_count].flip_timestamp_in_us);
8347
8348                 drm_dbg_state(state->dev, "%s Flipping to hi: 0x%x, low: 0x%x\n",
8349                                  __func__,
8350                                  bundle->flip_addrs[planes_count].address.grph.addr.high_part,
8351                                  bundle->flip_addrs[planes_count].address.grph.addr.low_part);
8352
8353                 planes_count += 1;
8354
8355         }
8356
8357         if (pflip_present) {
8358                 if (!vrr_active) {
8359                         /* Use old throttling in non-vrr fixed refresh rate mode
8360                          * to keep flip scheduling based on target vblank counts
8361                          * working in a backwards compatible way, e.g., for
8362                          * clients using the GLX_OML_sync_control extension or
8363                          * DRI3/Present extension with defined target_msc.
8364                          */
8365                         last_flip_vblank = amdgpu_get_vblank_counter_kms(pcrtc);
8366                 } else {
8367                         /* For variable refresh rate mode only:
8368                          * Get vblank of last completed flip to avoid > 1 vrr
8369                          * flips per video frame by use of throttling, but allow
8370                          * flip programming anywhere in the possibly large
8371                          * variable vrr vblank interval for fine-grained flip
8372                          * timing control and more opportunity to avoid stutter
8373                          * on late submission of flips.
8374                          */
8375                         spin_lock_irqsave(&pcrtc->dev->event_lock, flags);
8376                         last_flip_vblank = acrtc_attach->dm_irq_params.last_flip_vblank;
8377                         spin_unlock_irqrestore(&pcrtc->dev->event_lock, flags);
8378                 }
8379
8380                 target_vblank = last_flip_vblank + wait_for_vblank;
8381
8382                 /*
8383                  * Wait until we're out of the vertical blank period before the one
8384                  * targeted by the flip
8385                  */
8386                 while ((acrtc_attach->enabled &&
8387                         (amdgpu_display_get_crtc_scanoutpos(dm->ddev, acrtc_attach->crtc_id,
8388                                                             0, &vpos, &hpos, NULL,
8389                                                             NULL, &pcrtc->hwmode)
8390                          & (DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_IN_VBLANK)) ==
8391                         (DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_IN_VBLANK) &&
8392                         (int)(target_vblank -
8393                           amdgpu_get_vblank_counter_kms(pcrtc)) > 0)) {
8394                         usleep_range(1000, 1100);
8395                 }
8396
8397                 /**
8398                  * Prepare the flip event for the pageflip interrupt to handle.
8399                  *
8400                  * This only works in the case where we've already turned on the
8401                  * appropriate hardware blocks (eg. HUBP) so in the transition case
8402                  * from 0 -> n planes we have to skip a hardware generated event
8403                  * and rely on sending it from software.
8404                  */
8405                 if (acrtc_attach->base.state->event &&
8406                     acrtc_state->active_planes > 0) {
8407                         drm_crtc_vblank_get(pcrtc);
8408
8409                         spin_lock_irqsave(&pcrtc->dev->event_lock, flags);
8410
8411                         WARN_ON(acrtc_attach->pflip_status != AMDGPU_FLIP_NONE);
8412                         prepare_flip_isr(acrtc_attach);
8413
8414                         spin_unlock_irqrestore(&pcrtc->dev->event_lock, flags);
8415                 }
8416
8417                 if (acrtc_state->stream) {
8418                         if (acrtc_state->freesync_vrr_info_changed)
8419                                 bundle->stream_update.vrr_infopacket =
8420                                         &acrtc_state->stream->vrr_infopacket;
8421                 }
8422         } else if (cursor_update && acrtc_state->active_planes > 0 &&
8423                    acrtc_attach->base.state->event) {
8424                 drm_crtc_vblank_get(pcrtc);
8425
8426                 spin_lock_irqsave(&pcrtc->dev->event_lock, flags);
8427
8428                 acrtc_attach->event = acrtc_attach->base.state->event;
8429                 acrtc_attach->base.state->event = NULL;
8430
8431                 spin_unlock_irqrestore(&pcrtc->dev->event_lock, flags);
8432         }
8433
8434         /* Update the planes if changed or disable if we don't have any. */
8435         if ((planes_count || acrtc_state->active_planes == 0) &&
8436                 acrtc_state->stream) {
8437                 /*
8438                  * If PSR or idle optimizations are enabled then flush out
8439                  * any pending work before hardware programming.
8440                  */
8441                 if (dm->vblank_control_workqueue)
8442                         flush_workqueue(dm->vblank_control_workqueue);
8443
8444                 bundle->stream_update.stream = acrtc_state->stream;
8445                 if (new_pcrtc_state->mode_changed) {
8446                         bundle->stream_update.src = acrtc_state->stream->src;
8447                         bundle->stream_update.dst = acrtc_state->stream->dst;
8448                 }
8449
8450                 if (new_pcrtc_state->color_mgmt_changed) {
8451                         /*
8452                          * TODO: This isn't fully correct since we've actually
8453                          * already modified the stream in place.
8454                          */
8455                         bundle->stream_update.gamut_remap =
8456                                 &acrtc_state->stream->gamut_remap_matrix;
8457                         bundle->stream_update.output_csc_transform =
8458                                 &acrtc_state->stream->csc_color_matrix;
8459                         bundle->stream_update.out_transfer_func =
8460                                 acrtc_state->stream->out_transfer_func;
8461                 }
8462
8463                 acrtc_state->stream->abm_level = acrtc_state->abm_level;
8464                 if (acrtc_state->abm_level != dm_old_crtc_state->abm_level)
8465                         bundle->stream_update.abm_level = &acrtc_state->abm_level;
8466
8467                 mutex_lock(&dm->dc_lock);
8468                 if ((acrtc_state->update_type > UPDATE_TYPE_FAST) &&
8469                                 acrtc_state->stream->link->psr_settings.psr_allow_active)
8470                         amdgpu_dm_psr_disable(acrtc_state->stream);
8471                 mutex_unlock(&dm->dc_lock);
8472
8473                 /*
8474                  * If FreeSync state on the stream has changed then we need to
8475                  * re-adjust the min/max bounds now that DC doesn't handle this
8476                  * as part of commit.
8477                  */
8478                 if (is_dc_timing_adjust_needed(dm_old_crtc_state, acrtc_state)) {
8479                         spin_lock_irqsave(&pcrtc->dev->event_lock, flags);
8480                         dc_stream_adjust_vmin_vmax(
8481                                 dm->dc, acrtc_state->stream,
8482                                 &acrtc_attach->dm_irq_params.vrr_params.adjust);
8483                         spin_unlock_irqrestore(&pcrtc->dev->event_lock, flags);
8484                 }
8485                 mutex_lock(&dm->dc_lock);
8486                 update_planes_and_stream_adapter(dm->dc,
8487                                          acrtc_state->update_type,
8488                                          planes_count,
8489                                          acrtc_state->stream,
8490                                          &bundle->stream_update,
8491                                          bundle->surface_updates);
8492
8493                 /**
8494                  * Enable or disable the interrupts on the backend.
8495                  *
8496                  * Most pipes are put into power gating when unused.
8497                  *
8498                  * When power gating is enabled on a pipe we lose the
8499                  * interrupt enablement state when power gating is disabled.
8500                  *
8501                  * So we need to update the IRQ control state in hardware
8502                  * whenever the pipe turns on (since it could be previously
8503                  * power gated) or off (since some pipes can't be power gated
8504                  * on some ASICs).
8505                  */
8506                 if (dm_old_crtc_state->active_planes != acrtc_state->active_planes)
8507                         dm_update_pflip_irq_state(drm_to_adev(dev),
8508                                                   acrtc_attach);
8509
8510                 if ((acrtc_state->update_type > UPDATE_TYPE_FAST) &&
8511                                 acrtc_state->stream->link->psr_settings.psr_version != DC_PSR_VERSION_UNSUPPORTED &&
8512                                 !acrtc_state->stream->link->psr_settings.psr_feature_enabled)
8513                         amdgpu_dm_link_setup_psr(acrtc_state->stream);
8514
8515                 /* Decrement skip count when PSR is enabled and we're doing fast updates. */
8516                 if (acrtc_state->update_type == UPDATE_TYPE_FAST &&
8517                     acrtc_state->stream->link->psr_settings.psr_feature_enabled) {
8518                         struct amdgpu_dm_connector *aconn =
8519                                 (struct amdgpu_dm_connector *)acrtc_state->stream->dm_stream_context;
8520
8521                         if (aconn->psr_skip_count > 0)
8522                                 aconn->psr_skip_count--;
8523
8524                         /* Allow PSR when skip count is 0. */
8525                         acrtc_attach->dm_irq_params.allow_psr_entry = !aconn->psr_skip_count;
8526
8527                         /*
8528                          * If sink supports PSR SU, there is no need to rely on
8529                          * a vblank event disable request to enable PSR. PSR SU
8530                          * can be enabled immediately once OS demonstrates an
8531                          * adequate number of fast atomic commits to notify KMD
8532                          * of update events. See `vblank_control_worker()`.
8533                          */
8534                         if (acrtc_state->stream->link->psr_settings.psr_version >= DC_PSR_VERSION_SU_1 &&
8535                             acrtc_attach->dm_irq_params.allow_psr_entry &&
8536 #ifdef CONFIG_DRM_AMD_SECURE_DISPLAY
8537                             !amdgpu_dm_crc_window_is_activated(acrtc_state->base.crtc) &&
8538 #endif
8539                             !acrtc_state->stream->link->psr_settings.psr_allow_active &&
8540                             (timestamp_ns -
8541                             acrtc_state->stream->link->psr_settings.psr_dirty_rects_change_timestamp_ns) >
8542                             500000000)
8543                                 amdgpu_dm_psr_enable(acrtc_state->stream);
8544                 } else {
8545                         acrtc_attach->dm_irq_params.allow_psr_entry = false;
8546                 }
8547
8548                 mutex_unlock(&dm->dc_lock);
8549         }
8550
8551         /*
8552          * Update cursor state *after* programming all the planes.
8553          * This avoids redundant programming in the case where we're going
8554          * to be disabling a single plane - those pipes are being disabled.
8555          */
8556         if (acrtc_state->active_planes)
8557                 amdgpu_dm_commit_cursors(state);
8558
8559 cleanup:
8560         kfree(bundle);
8561 }
8562
8563 static void amdgpu_dm_commit_audio(struct drm_device *dev,
8564                                    struct drm_atomic_state *state)
8565 {
8566         struct amdgpu_device *adev = drm_to_adev(dev);
8567         struct amdgpu_dm_connector *aconnector;
8568         struct drm_connector *connector;
8569         struct drm_connector_state *old_con_state, *new_con_state;
8570         struct drm_crtc_state *new_crtc_state;
8571         struct dm_crtc_state *new_dm_crtc_state;
8572         const struct dc_stream_status *status;
8573         int i, inst;
8574
8575         /* Notify device removals. */
8576         for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
8577                 if (old_con_state->crtc != new_con_state->crtc) {
8578                         /* CRTC changes require notification. */
8579                         goto notify;
8580                 }
8581
8582                 if (!new_con_state->crtc)
8583                         continue;
8584
8585                 new_crtc_state = drm_atomic_get_new_crtc_state(
8586                         state, new_con_state->crtc);
8587
8588                 if (!new_crtc_state)
8589                         continue;
8590
8591                 if (!drm_atomic_crtc_needs_modeset(new_crtc_state))
8592                         continue;
8593
8594                 if (connector->connector_type == DRM_MODE_CONNECTOR_WRITEBACK)
8595                         continue;
8596
8597 notify:
8598                 aconnector = to_amdgpu_dm_connector(connector);
8599
8600                 mutex_lock(&adev->dm.audio_lock);
8601                 inst = aconnector->audio_inst;
8602                 aconnector->audio_inst = -1;
8603                 mutex_unlock(&adev->dm.audio_lock);
8604
8605                 amdgpu_dm_audio_eld_notify(adev, inst);
8606         }
8607
8608         /* Notify audio device additions. */
8609         for_each_new_connector_in_state(state, connector, new_con_state, i) {
8610                 if (!new_con_state->crtc)
8611                         continue;
8612
8613                 new_crtc_state = drm_atomic_get_new_crtc_state(
8614                         state, new_con_state->crtc);
8615
8616                 if (!new_crtc_state)
8617                         continue;
8618
8619                 if (!drm_atomic_crtc_needs_modeset(new_crtc_state))
8620                         continue;
8621
8622                 new_dm_crtc_state = to_dm_crtc_state(new_crtc_state);
8623                 if (!new_dm_crtc_state->stream)
8624                         continue;
8625
8626                 status = dc_stream_get_status(new_dm_crtc_state->stream);
8627                 if (!status)
8628                         continue;
8629
8630                 if (connector->connector_type == DRM_MODE_CONNECTOR_WRITEBACK)
8631                         continue;
8632
8633                 aconnector = to_amdgpu_dm_connector(connector);
8634
8635                 mutex_lock(&adev->dm.audio_lock);
8636                 inst = status->audio_inst;
8637                 aconnector->audio_inst = inst;
8638                 mutex_unlock(&adev->dm.audio_lock);
8639
8640                 amdgpu_dm_audio_eld_notify(adev, inst);
8641         }
8642 }
8643
8644 /*
8645  * amdgpu_dm_crtc_copy_transient_flags - copy mirrored flags from DRM to DC
8646  * @crtc_state: the DRM CRTC state
8647  * @stream_state: the DC stream state.
8648  *
8649  * Copy the mirrored transient state flags from DRM, to DC. It is used to bring
8650  * a dc_stream_state's flags in sync with a drm_crtc_state's flags.
8651  */
8652 static void amdgpu_dm_crtc_copy_transient_flags(struct drm_crtc_state *crtc_state,
8653                                                 struct dc_stream_state *stream_state)
8654 {
8655         stream_state->mode_changed = drm_atomic_crtc_needs_modeset(crtc_state);
8656 }
8657
8658 static void dm_clear_writeback(struct amdgpu_display_manager *dm,
8659                               struct dm_crtc_state *crtc_state)
8660 {
8661         dc_stream_remove_writeback(dm->dc, crtc_state->stream, 0);
8662 }
8663
8664 static void amdgpu_dm_commit_streams(struct drm_atomic_state *state,
8665                                         struct dc_state *dc_state)
8666 {
8667         struct drm_device *dev = state->dev;
8668         struct amdgpu_device *adev = drm_to_adev(dev);
8669         struct amdgpu_display_manager *dm = &adev->dm;
8670         struct drm_crtc *crtc;
8671         struct drm_crtc_state *old_crtc_state, *new_crtc_state;
8672         struct dm_crtc_state *dm_old_crtc_state, *dm_new_crtc_state;
8673         struct drm_connector_state *old_con_state;
8674         struct drm_connector *connector;
8675         bool mode_set_reset_required = false;
8676         u32 i;
8677
8678         /* Disable writeback */
8679         for_each_old_connector_in_state(state, connector, old_con_state, i) {
8680                 struct dm_connector_state *dm_old_con_state;
8681                 struct amdgpu_crtc *acrtc;
8682
8683                 if (connector->connector_type != DRM_MODE_CONNECTOR_WRITEBACK)
8684                         continue;
8685
8686                 old_crtc_state = NULL;
8687
8688                 dm_old_con_state = to_dm_connector_state(old_con_state);
8689                 if (!dm_old_con_state->base.crtc)
8690                         continue;
8691
8692                 acrtc = to_amdgpu_crtc(dm_old_con_state->base.crtc);
8693                 if (acrtc)
8694                         old_crtc_state = drm_atomic_get_old_crtc_state(state, &acrtc->base);
8695
8696                 if (!acrtc->wb_enabled)
8697                         continue;
8698
8699                 dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
8700
8701                 dm_clear_writeback(dm, dm_old_crtc_state);
8702                 acrtc->wb_enabled = false;
8703         }
8704
8705         for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state,
8706                                       new_crtc_state, i) {
8707                 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
8708
8709                 dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
8710
8711                 if (old_crtc_state->active &&
8712                     (!new_crtc_state->active ||
8713                      drm_atomic_crtc_needs_modeset(new_crtc_state))) {
8714                         manage_dm_interrupts(adev, acrtc, false);
8715                         dc_stream_release(dm_old_crtc_state->stream);
8716                 }
8717         }
8718
8719         drm_atomic_helper_calc_timestamping_constants(state);
8720
8721         /* update changed items */
8722         for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
8723                 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
8724
8725                 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
8726                 dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
8727
8728                 drm_dbg_state(state->dev,
8729                         "amdgpu_crtc id:%d crtc_state_flags: enable:%d, active:%d, planes_changed:%d, mode_changed:%d,active_changed:%d,connectors_changed:%d\n",
8730                         acrtc->crtc_id,
8731                         new_crtc_state->enable,
8732                         new_crtc_state->active,
8733                         new_crtc_state->planes_changed,
8734                         new_crtc_state->mode_changed,
8735                         new_crtc_state->active_changed,
8736                         new_crtc_state->connectors_changed);
8737
8738                 /* Disable cursor if disabling crtc */
8739                 if (old_crtc_state->active && !new_crtc_state->active) {
8740                         struct dc_cursor_position position;
8741
8742                         memset(&position, 0, sizeof(position));
8743                         mutex_lock(&dm->dc_lock);
8744                         dc_stream_set_cursor_position(dm_old_crtc_state->stream, &position);
8745                         mutex_unlock(&dm->dc_lock);
8746                 }
8747
8748                 /* Copy all transient state flags into dc state */
8749                 if (dm_new_crtc_state->stream) {
8750                         amdgpu_dm_crtc_copy_transient_flags(&dm_new_crtc_state->base,
8751                                                             dm_new_crtc_state->stream);
8752                 }
8753
8754                 /* handles headless hotplug case, updating new_state and
8755                  * aconnector as needed
8756                  */
8757
8758                 if (amdgpu_dm_crtc_modeset_required(new_crtc_state, dm_new_crtc_state->stream, dm_old_crtc_state->stream)) {
8759
8760                         DRM_DEBUG_ATOMIC("Atomic commit: SET crtc id %d: [%p]\n", acrtc->crtc_id, acrtc);
8761
8762                         if (!dm_new_crtc_state->stream) {
8763                                 /*
8764                                  * this could happen because of issues with
8765                                  * userspace notifications delivery.
8766                                  * In this case userspace tries to set mode on
8767                                  * display which is disconnected in fact.
8768                                  * dc_sink is NULL in this case on aconnector.
8769                                  * We expect reset mode will come soon.
8770                                  *
8771                                  * This can also happen when unplug is done
8772                                  * during resume sequence ended
8773                                  *
8774                                  * In this case, we want to pretend we still
8775                                  * have a sink to keep the pipe running so that
8776                                  * hw state is consistent with the sw state
8777                                  */
8778                                 DRM_DEBUG_DRIVER("%s: Failed to create new stream for crtc %d\n",
8779                                                 __func__, acrtc->base.base.id);
8780                                 continue;
8781                         }
8782
8783                         if (dm_old_crtc_state->stream)
8784                                 remove_stream(adev, acrtc, dm_old_crtc_state->stream);
8785
8786                         pm_runtime_get_noresume(dev->dev);
8787
8788                         acrtc->enabled = true;
8789                         acrtc->hw_mode = new_crtc_state->mode;
8790                         crtc->hwmode = new_crtc_state->mode;
8791                         mode_set_reset_required = true;
8792                 } else if (modereset_required(new_crtc_state)) {
8793                         DRM_DEBUG_ATOMIC("Atomic commit: RESET. crtc id %d:[%p]\n", acrtc->crtc_id, acrtc);
8794                         /* i.e. reset mode */
8795                         if (dm_old_crtc_state->stream)
8796                                 remove_stream(adev, acrtc, dm_old_crtc_state->stream);
8797
8798                         mode_set_reset_required = true;
8799                 }
8800         } /* for_each_crtc_in_state() */
8801
8802         /* if there mode set or reset, disable eDP PSR */
8803         if (mode_set_reset_required) {
8804                 if (dm->vblank_control_workqueue)
8805                         flush_workqueue(dm->vblank_control_workqueue);
8806
8807                 amdgpu_dm_psr_disable_all(dm);
8808         }
8809
8810         dm_enable_per_frame_crtc_master_sync(dc_state);
8811         mutex_lock(&dm->dc_lock);
8812         WARN_ON(!dc_commit_streams(dm->dc, dc_state->streams, dc_state->stream_count));
8813
8814         /* Allow idle optimization when vblank count is 0 for display off */
8815         if (dm->active_vblank_irq_count == 0)
8816                 dc_allow_idle_optimizations(dm->dc, true);
8817         mutex_unlock(&dm->dc_lock);
8818
8819         for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) {
8820                 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
8821
8822                 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
8823
8824                 if (dm_new_crtc_state->stream != NULL) {
8825                         const struct dc_stream_status *status =
8826                                         dc_stream_get_status(dm_new_crtc_state->stream);
8827
8828                         if (!status)
8829                                 status = dc_stream_get_status_from_state(dc_state,
8830                                                                          dm_new_crtc_state->stream);
8831                         if (!status)
8832                                 drm_err(dev,
8833                                         "got no status for stream %p on acrtc%p\n",
8834                                         dm_new_crtc_state->stream, acrtc);
8835                         else
8836                                 acrtc->otg_inst = status->primary_otg_inst;
8837                 }
8838         }
8839 }
8840
8841 static void dm_set_writeback(struct amdgpu_display_manager *dm,
8842                               struct dm_crtc_state *crtc_state,
8843                               struct drm_connector *connector,
8844                               struct drm_connector_state *new_con_state)
8845 {
8846         struct drm_writeback_connector *wb_conn = drm_connector_to_writeback(connector);
8847         struct amdgpu_crtc *acrtc;
8848         struct dc_writeback_info *wb_info;
8849         struct pipe_ctx *pipe = NULL;
8850         struct amdgpu_framebuffer *afb;
8851         int i = 0;
8852
8853         wb_info = kzalloc(sizeof(*wb_info), GFP_KERNEL);
8854         if (!wb_info) {
8855                 DRM_ERROR("Failed to allocate wb_info\n");
8856                 return;
8857         }
8858
8859         acrtc = to_amdgpu_crtc(wb_conn->encoder.crtc);
8860         if (!acrtc) {
8861                 DRM_ERROR("no amdgpu_crtc found\n");
8862                 return;
8863         }
8864
8865         afb = to_amdgpu_framebuffer(new_con_state->writeback_job->fb);
8866         if (!afb) {
8867                 DRM_ERROR("No amdgpu_framebuffer found\n");
8868                 return;
8869         }
8870
8871         for (i = 0; i < MAX_PIPES; i++) {
8872                 if (dm->dc->current_state->res_ctx.pipe_ctx[i].stream == crtc_state->stream) {
8873                         pipe = &dm->dc->current_state->res_ctx.pipe_ctx[i];
8874                         break;
8875                 }
8876         }
8877
8878         /* fill in wb_info */
8879         wb_info->wb_enabled = true;
8880
8881         wb_info->dwb_pipe_inst = 0;
8882         wb_info->dwb_params.dwbscl_black_color = 0;
8883         wb_info->dwb_params.hdr_mult = 0x1F000;
8884         wb_info->dwb_params.csc_params.gamut_adjust_type = CM_GAMUT_ADJUST_TYPE_BYPASS;
8885         wb_info->dwb_params.csc_params.gamut_coef_format = CM_GAMUT_REMAP_COEF_FORMAT_S2_13;
8886         wb_info->dwb_params.output_depth = DWB_OUTPUT_PIXEL_DEPTH_10BPC;
8887         wb_info->dwb_params.cnv_params.cnv_out_bpc = DWB_CNV_OUT_BPC_10BPC;
8888
8889         /* width & height from crtc */
8890         wb_info->dwb_params.cnv_params.src_width = acrtc->base.mode.crtc_hdisplay;
8891         wb_info->dwb_params.cnv_params.src_height = acrtc->base.mode.crtc_vdisplay;
8892         wb_info->dwb_params.dest_width = acrtc->base.mode.crtc_hdisplay;
8893         wb_info->dwb_params.dest_height = acrtc->base.mode.crtc_vdisplay;
8894
8895         wb_info->dwb_params.cnv_params.crop_en = false;
8896         wb_info->dwb_params.stereo_params.stereo_enabled = false;
8897
8898         wb_info->dwb_params.cnv_params.out_max_pix_val = 0x3ff; // 10 bits
8899         wb_info->dwb_params.cnv_params.out_min_pix_val = 0;
8900         wb_info->dwb_params.cnv_params.fc_out_format = DWB_OUT_FORMAT_32BPP_ARGB;
8901         wb_info->dwb_params.cnv_params.out_denorm_mode = DWB_OUT_DENORM_BYPASS;
8902
8903         wb_info->dwb_params.out_format = dwb_scaler_mode_bypass444;
8904
8905         wb_info->dwb_params.capture_rate = dwb_capture_rate_0;
8906
8907         wb_info->dwb_params.scaler_taps.h_taps = 4;
8908         wb_info->dwb_params.scaler_taps.v_taps = 4;
8909         wb_info->dwb_params.scaler_taps.h_taps_c = 2;
8910         wb_info->dwb_params.scaler_taps.v_taps_c = 2;
8911         wb_info->dwb_params.subsample_position = DWB_INTERSTITIAL_SUBSAMPLING;
8912
8913         wb_info->mcif_buf_params.luma_pitch = afb->base.pitches[0];
8914         wb_info->mcif_buf_params.chroma_pitch = afb->base.pitches[1];
8915
8916         for (i = 0; i < DWB_MCIF_BUF_COUNT; i++) {
8917                 wb_info->mcif_buf_params.luma_address[i] = afb->address;
8918                 wb_info->mcif_buf_params.chroma_address[i] = 0;
8919         }
8920
8921         wb_info->mcif_buf_params.p_vmid = 1;
8922         wb_info->mcif_warmup_params.p_vmid = 1;
8923         wb_info->writeback_source_plane = pipe->plane_state;
8924
8925         dc_stream_add_writeback(dm->dc, crtc_state->stream, wb_info);
8926
8927         acrtc->wb_pending = true;
8928         acrtc->wb_conn = wb_conn;
8929         drm_writeback_queue_job(wb_conn, new_con_state);
8930 }
8931
8932 /**
8933  * amdgpu_dm_atomic_commit_tail() - AMDgpu DM's commit tail implementation.
8934  * @state: The atomic state to commit
8935  *
8936  * This will tell DC to commit the constructed DC state from atomic_check,
8937  * programming the hardware. Any failures here implies a hardware failure, since
8938  * atomic check should have filtered anything non-kosher.
8939  */
8940 static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state)
8941 {
8942         struct drm_device *dev = state->dev;
8943         struct amdgpu_device *adev = drm_to_adev(dev);
8944         struct amdgpu_display_manager *dm = &adev->dm;
8945         struct dm_atomic_state *dm_state;
8946         struct dc_state *dc_state = NULL;
8947         u32 i, j;
8948         struct drm_crtc *crtc;
8949         struct drm_crtc_state *old_crtc_state, *new_crtc_state;
8950         unsigned long flags;
8951         bool wait_for_vblank = true;
8952         struct drm_connector *connector;
8953         struct drm_connector_state *old_con_state, *new_con_state;
8954         struct dm_crtc_state *dm_old_crtc_state, *dm_new_crtc_state;
8955         int crtc_disable_count = 0;
8956
8957         trace_amdgpu_dm_atomic_commit_tail_begin(state);
8958
8959         if (dm->dc->caps.ips_support) {
8960                 for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
8961                         if (new_con_state->crtc &&
8962                                 new_con_state->crtc->state->active &&
8963                                 drm_atomic_crtc_needs_modeset(new_con_state->crtc->state)) {
8964                                 dc_dmub_srv_exit_low_power_state(dm->dc);
8965                                 break;
8966                         }
8967                 }
8968         }
8969
8970         drm_atomic_helper_update_legacy_modeset_state(dev, state);
8971         drm_dp_mst_atomic_wait_for_dependencies(state);
8972
8973         dm_state = dm_atomic_get_new_state(state);
8974         if (dm_state && dm_state->context) {
8975                 dc_state = dm_state->context;
8976                 amdgpu_dm_commit_streams(state, dc_state);
8977         }
8978
8979         for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
8980                 struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state);
8981                 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(dm_new_con_state->base.crtc);
8982                 struct amdgpu_dm_connector *aconnector;
8983
8984                 if (connector->connector_type == DRM_MODE_CONNECTOR_WRITEBACK)
8985                         continue;
8986
8987                 aconnector = to_amdgpu_dm_connector(connector);
8988
8989                 if (!adev->dm.hdcp_workqueue)
8990                         continue;
8991
8992                 pr_debug("[HDCP_DM] -------------- i : %x ----------\n", i);
8993
8994                 if (!connector)
8995                         continue;
8996
8997                 pr_debug("[HDCP_DM] connector->index: %x connect_status: %x dpms: %x\n",
8998                         connector->index, connector->status, connector->dpms);
8999                 pr_debug("[HDCP_DM] state protection old: %x new: %x\n",
9000                         old_con_state->content_protection, new_con_state->content_protection);
9001
9002                 if (aconnector->dc_sink) {
9003                         if (aconnector->dc_sink->sink_signal != SIGNAL_TYPE_VIRTUAL &&
9004                                 aconnector->dc_sink->sink_signal != SIGNAL_TYPE_NONE) {
9005                                 pr_debug("[HDCP_DM] pipe_ctx dispname=%s\n",
9006                                 aconnector->dc_sink->edid_caps.display_name);
9007                         }
9008                 }
9009
9010                 new_crtc_state = NULL;
9011                 old_crtc_state = NULL;
9012
9013                 if (acrtc) {
9014                         new_crtc_state = drm_atomic_get_new_crtc_state(state, &acrtc->base);
9015                         old_crtc_state = drm_atomic_get_old_crtc_state(state, &acrtc->base);
9016                 }
9017
9018                 if (old_crtc_state)
9019                         pr_debug("old crtc en: %x a: %x m: %x a-chg: %x c-chg: %x\n",
9020                         old_crtc_state->enable,
9021                         old_crtc_state->active,
9022                         old_crtc_state->mode_changed,
9023                         old_crtc_state->active_changed,
9024                         old_crtc_state->connectors_changed);
9025
9026                 if (new_crtc_state)
9027                         pr_debug("NEW crtc en: %x a: %x m: %x a-chg: %x c-chg: %x\n",
9028                         new_crtc_state->enable,
9029                         new_crtc_state->active,
9030                         new_crtc_state->mode_changed,
9031                         new_crtc_state->active_changed,
9032                         new_crtc_state->connectors_changed);
9033         }
9034
9035         for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
9036                 struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state);
9037                 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(dm_new_con_state->base.crtc);
9038                 struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
9039
9040                 if (!adev->dm.hdcp_workqueue)
9041                         continue;
9042
9043                 new_crtc_state = NULL;
9044                 old_crtc_state = NULL;
9045
9046                 if (acrtc) {
9047                         new_crtc_state = drm_atomic_get_new_crtc_state(state, &acrtc->base);
9048                         old_crtc_state = drm_atomic_get_old_crtc_state(state, &acrtc->base);
9049                 }
9050
9051                 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
9052
9053                 if (dm_new_crtc_state && dm_new_crtc_state->stream == NULL &&
9054                     connector->state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED) {
9055                         hdcp_reset_display(adev->dm.hdcp_workqueue, aconnector->dc_link->link_index);
9056                         new_con_state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED;
9057                         dm_new_con_state->update_hdcp = true;
9058                         continue;
9059                 }
9060
9061                 if (is_content_protection_different(new_crtc_state, old_crtc_state, new_con_state,
9062                                                                                         old_con_state, connector, adev->dm.hdcp_workqueue)) {
9063                         /* when display is unplugged from mst hub, connctor will
9064                          * be destroyed within dm_dp_mst_connector_destroy. connector
9065                          * hdcp perperties, like type, undesired, desired, enabled,
9066                          * will be lost. So, save hdcp properties into hdcp_work within
9067                          * amdgpu_dm_atomic_commit_tail. if the same display is
9068                          * plugged back with same display index, its hdcp properties
9069                          * will be retrieved from hdcp_work within dm_dp_mst_get_modes
9070                          */
9071
9072                         bool enable_encryption = false;
9073
9074                         if (new_con_state->content_protection == DRM_MODE_CONTENT_PROTECTION_DESIRED)
9075                                 enable_encryption = true;
9076
9077                         if (aconnector->dc_link && aconnector->dc_sink &&
9078                                 aconnector->dc_link->type == dc_connection_mst_branch) {
9079                                 struct hdcp_workqueue *hdcp_work = adev->dm.hdcp_workqueue;
9080                                 struct hdcp_workqueue *hdcp_w =
9081                                         &hdcp_work[aconnector->dc_link->link_index];
9082
9083                                 hdcp_w->hdcp_content_type[connector->index] =
9084                                         new_con_state->hdcp_content_type;
9085                                 hdcp_w->content_protection[connector->index] =
9086                                         new_con_state->content_protection;
9087                         }
9088
9089                         if (new_crtc_state && new_crtc_state->mode_changed &&
9090                                 new_con_state->content_protection >= DRM_MODE_CONTENT_PROTECTION_DESIRED)
9091                                 enable_encryption = true;
9092
9093                         DRM_INFO("[HDCP_DM] hdcp_update_display enable_encryption = %x\n", enable_encryption);
9094
9095                         hdcp_update_display(
9096                                 adev->dm.hdcp_workqueue, aconnector->dc_link->link_index, aconnector,
9097                                 new_con_state->hdcp_content_type, enable_encryption);
9098                 }
9099         }
9100
9101         /* Handle connector state changes */
9102         for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
9103                 struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state);
9104                 struct dm_connector_state *dm_old_con_state = to_dm_connector_state(old_con_state);
9105                 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(dm_new_con_state->base.crtc);
9106                 struct dc_surface_update *dummy_updates;
9107                 struct dc_stream_update stream_update;
9108                 struct dc_info_packet hdr_packet;
9109                 struct dc_stream_status *status = NULL;
9110                 bool abm_changed, hdr_changed, scaling_changed;
9111
9112                 memset(&stream_update, 0, sizeof(stream_update));
9113
9114                 if (acrtc) {
9115                         new_crtc_state = drm_atomic_get_new_crtc_state(state, &acrtc->base);
9116                         old_crtc_state = drm_atomic_get_old_crtc_state(state, &acrtc->base);
9117                 }
9118
9119                 /* Skip any modesets/resets */
9120                 if (!acrtc || drm_atomic_crtc_needs_modeset(new_crtc_state))
9121                         continue;
9122
9123                 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
9124                 dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
9125
9126                 scaling_changed = is_scaling_state_different(dm_new_con_state,
9127                                                              dm_old_con_state);
9128
9129                 abm_changed = dm_new_crtc_state->abm_level !=
9130                               dm_old_crtc_state->abm_level;
9131
9132                 hdr_changed =
9133                         !drm_connector_atomic_hdr_metadata_equal(old_con_state, new_con_state);
9134
9135                 if (!scaling_changed && !abm_changed && !hdr_changed)
9136                         continue;
9137
9138                 stream_update.stream = dm_new_crtc_state->stream;
9139                 if (scaling_changed) {
9140                         update_stream_scaling_settings(&dm_new_con_state->base.crtc->mode,
9141                                         dm_new_con_state, dm_new_crtc_state->stream);
9142
9143                         stream_update.src = dm_new_crtc_state->stream->src;
9144                         stream_update.dst = dm_new_crtc_state->stream->dst;
9145                 }
9146
9147                 if (abm_changed) {
9148                         dm_new_crtc_state->stream->abm_level = dm_new_crtc_state->abm_level;
9149
9150                         stream_update.abm_level = &dm_new_crtc_state->abm_level;
9151                 }
9152
9153                 if (hdr_changed) {
9154                         fill_hdr_info_packet(new_con_state, &hdr_packet);
9155                         stream_update.hdr_static_metadata = &hdr_packet;
9156                 }
9157
9158                 status = dc_stream_get_status(dm_new_crtc_state->stream);
9159
9160                 if (WARN_ON(!status))
9161                         continue;
9162
9163                 WARN_ON(!status->plane_count);
9164
9165                 /*
9166                  * TODO: DC refuses to perform stream updates without a dc_surface_update.
9167                  * Here we create an empty update on each plane.
9168                  * To fix this, DC should permit updating only stream properties.
9169                  */
9170                 dummy_updates = kzalloc(sizeof(struct dc_surface_update) * MAX_SURFACES, GFP_ATOMIC);
9171                 for (j = 0; j < status->plane_count; j++)
9172                         dummy_updates[j].surface = status->plane_states[0];
9173
9174
9175                 mutex_lock(&dm->dc_lock);
9176                 dc_update_planes_and_stream(dm->dc,
9177                                             dummy_updates,
9178                                             status->plane_count,
9179                                             dm_new_crtc_state->stream,
9180                                             &stream_update);
9181                 mutex_unlock(&dm->dc_lock);
9182                 kfree(dummy_updates);
9183         }
9184
9185         /**
9186          * Enable interrupts for CRTCs that are newly enabled or went through
9187          * a modeset. It was intentionally deferred until after the front end
9188          * state was modified to wait until the OTG was on and so the IRQ
9189          * handlers didn't access stale or invalid state.
9190          */
9191         for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
9192                 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
9193 #ifdef CONFIG_DEBUG_FS
9194                 enum amdgpu_dm_pipe_crc_source cur_crc_src;
9195 #endif
9196                 /* Count number of newly disabled CRTCs for dropping PM refs later. */
9197                 if (old_crtc_state->active && !new_crtc_state->active)
9198                         crtc_disable_count++;
9199
9200                 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
9201                 dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
9202
9203                 /* For freesync config update on crtc state and params for irq */
9204                 update_stream_irq_parameters(dm, dm_new_crtc_state);
9205
9206 #ifdef CONFIG_DEBUG_FS
9207                 spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
9208                 cur_crc_src = acrtc->dm_irq_params.crc_src;
9209                 spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
9210 #endif
9211
9212                 if (new_crtc_state->active &&
9213                     (!old_crtc_state->active ||
9214                      drm_atomic_crtc_needs_modeset(new_crtc_state))) {
9215                         dc_stream_retain(dm_new_crtc_state->stream);
9216                         acrtc->dm_irq_params.stream = dm_new_crtc_state->stream;
9217                         manage_dm_interrupts(adev, acrtc, true);
9218                 }
9219                 /* Handle vrr on->off / off->on transitions */
9220                 amdgpu_dm_handle_vrr_transition(dm_old_crtc_state, dm_new_crtc_state);
9221
9222 #ifdef CONFIG_DEBUG_FS
9223                 if (new_crtc_state->active &&
9224                     (!old_crtc_state->active ||
9225                      drm_atomic_crtc_needs_modeset(new_crtc_state))) {
9226                         /**
9227                          * Frontend may have changed so reapply the CRC capture
9228                          * settings for the stream.
9229                          */
9230                         if (amdgpu_dm_is_valid_crc_source(cur_crc_src)) {
9231 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
9232                                 if (amdgpu_dm_crc_window_is_activated(crtc)) {
9233                                         spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
9234                                         acrtc->dm_irq_params.window_param.update_win = true;
9235
9236                                         /**
9237                                          * It takes 2 frames for HW to stably generate CRC when
9238                                          * resuming from suspend, so we set skip_frame_cnt 2.
9239                                          */
9240                                         acrtc->dm_irq_params.window_param.skip_frame_cnt = 2;
9241                                         spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
9242                                 }
9243 #endif
9244                                 if (amdgpu_dm_crtc_configure_crc_source(
9245                                         crtc, dm_new_crtc_state, cur_crc_src))
9246                                         DRM_DEBUG_DRIVER("Failed to configure crc source");
9247                         }
9248                 }
9249 #endif
9250         }
9251
9252         for_each_new_crtc_in_state(state, crtc, new_crtc_state, j)
9253                 if (new_crtc_state->async_flip)
9254                         wait_for_vblank = false;
9255
9256         /* update planes when needed per crtc*/
9257         for_each_new_crtc_in_state(state, crtc, new_crtc_state, j) {
9258                 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
9259
9260                 if (dm_new_crtc_state->stream)
9261                         amdgpu_dm_commit_planes(state, dev, dm, crtc, wait_for_vblank);
9262         }
9263
9264         /* Enable writeback */
9265         for_each_new_connector_in_state(state, connector, new_con_state, i) {
9266                 struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state);
9267                 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(dm_new_con_state->base.crtc);
9268
9269                 if (connector->connector_type != DRM_MODE_CONNECTOR_WRITEBACK)
9270                         continue;
9271
9272                 if (!new_con_state->writeback_job)
9273                         continue;
9274
9275                 new_crtc_state = NULL;
9276
9277                 if (acrtc)
9278                         new_crtc_state = drm_atomic_get_new_crtc_state(state, &acrtc->base);
9279
9280                 if (acrtc->wb_enabled)
9281                         continue;
9282
9283                 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
9284
9285                 dm_set_writeback(dm, dm_new_crtc_state, connector, new_con_state);
9286                 acrtc->wb_enabled = true;
9287         }
9288
9289         /* Update audio instances for each connector. */
9290         amdgpu_dm_commit_audio(dev, state);
9291
9292         /* restore the backlight level */
9293         for (i = 0; i < dm->num_of_edps; i++) {
9294                 if (dm->backlight_dev[i] &&
9295                     (dm->actual_brightness[i] != dm->brightness[i]))
9296                         amdgpu_dm_backlight_set_level(dm, i, dm->brightness[i]);
9297         }
9298
9299         /*
9300          * send vblank event on all events not handled in flip and
9301          * mark consumed event for drm_atomic_helper_commit_hw_done
9302          */
9303         spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
9304         for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) {
9305
9306                 if (new_crtc_state->event)
9307                         drm_send_event_locked(dev, &new_crtc_state->event->base);
9308
9309                 new_crtc_state->event = NULL;
9310         }
9311         spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
9312
9313         /* Signal HW programming completion */
9314         drm_atomic_helper_commit_hw_done(state);
9315
9316         if (wait_for_vblank)
9317                 drm_atomic_helper_wait_for_flip_done(dev, state);
9318
9319         drm_atomic_helper_cleanup_planes(dev, state);
9320
9321         /* Don't free the memory if we are hitting this as part of suspend.
9322          * This way we don't free any memory during suspend; see
9323          * amdgpu_bo_free_kernel().  The memory will be freed in the first
9324          * non-suspend modeset or when the driver is torn down.
9325          */
9326         if (!adev->in_suspend) {
9327                 /* return the stolen vga memory back to VRAM */
9328                 if (!adev->mman.keep_stolen_vga_memory)
9329                         amdgpu_bo_free_kernel(&adev->mman.stolen_vga_memory, NULL, NULL);
9330                 amdgpu_bo_free_kernel(&adev->mman.stolen_extended_memory, NULL, NULL);
9331         }
9332
9333         /*
9334          * Finally, drop a runtime PM reference for each newly disabled CRTC,
9335          * so we can put the GPU into runtime suspend if we're not driving any
9336          * displays anymore
9337          */
9338         for (i = 0; i < crtc_disable_count; i++)
9339                 pm_runtime_put_autosuspend(dev->dev);
9340         pm_runtime_mark_last_busy(dev->dev);
9341 }
9342
9343 static int dm_force_atomic_commit(struct drm_connector *connector)
9344 {
9345         int ret = 0;
9346         struct drm_device *ddev = connector->dev;
9347         struct drm_atomic_state *state = drm_atomic_state_alloc(ddev);
9348         struct amdgpu_crtc *disconnected_acrtc = to_amdgpu_crtc(connector->encoder->crtc);
9349         struct drm_plane *plane = disconnected_acrtc->base.primary;
9350         struct drm_connector_state *conn_state;
9351         struct drm_crtc_state *crtc_state;
9352         struct drm_plane_state *plane_state;
9353
9354         if (!state)
9355                 return -ENOMEM;
9356
9357         state->acquire_ctx = ddev->mode_config.acquire_ctx;
9358
9359         /* Construct an atomic state to restore previous display setting */
9360
9361         /*
9362          * Attach connectors to drm_atomic_state
9363          */
9364         conn_state = drm_atomic_get_connector_state(state, connector);
9365
9366         ret = PTR_ERR_OR_ZERO(conn_state);
9367         if (ret)
9368                 goto out;
9369
9370         /* Attach crtc to drm_atomic_state*/
9371         crtc_state = drm_atomic_get_crtc_state(state, &disconnected_acrtc->base);
9372
9373         ret = PTR_ERR_OR_ZERO(crtc_state);
9374         if (ret)
9375                 goto out;
9376
9377         /* force a restore */
9378         crtc_state->mode_changed = true;
9379
9380         /* Attach plane to drm_atomic_state */
9381         plane_state = drm_atomic_get_plane_state(state, plane);
9382
9383         ret = PTR_ERR_OR_ZERO(plane_state);
9384         if (ret)
9385                 goto out;
9386
9387         /* Call commit internally with the state we just constructed */
9388         ret = drm_atomic_commit(state);
9389
9390 out:
9391         drm_atomic_state_put(state);
9392         if (ret)
9393                 DRM_ERROR("Restoring old state failed with %i\n", ret);
9394
9395         return ret;
9396 }
9397
9398 /*
9399  * This function handles all cases when set mode does not come upon hotplug.
9400  * This includes when a display is unplugged then plugged back into the
9401  * same port and when running without usermode desktop manager supprot
9402  */
9403 void dm_restore_drm_connector_state(struct drm_device *dev,
9404                                     struct drm_connector *connector)
9405 {
9406         struct amdgpu_dm_connector *aconnector;
9407         struct amdgpu_crtc *disconnected_acrtc;
9408         struct dm_crtc_state *acrtc_state;
9409
9410         if (connector->connector_type == DRM_MODE_CONNECTOR_WRITEBACK)
9411                 return;
9412
9413         aconnector = to_amdgpu_dm_connector(connector);
9414
9415         if (!aconnector->dc_sink || !connector->state || !connector->encoder)
9416                 return;
9417
9418         disconnected_acrtc = to_amdgpu_crtc(connector->encoder->crtc);
9419         if (!disconnected_acrtc)
9420                 return;
9421
9422         acrtc_state = to_dm_crtc_state(disconnected_acrtc->base.state);
9423         if (!acrtc_state->stream)
9424                 return;
9425
9426         /*
9427          * If the previous sink is not released and different from the current,
9428          * we deduce we are in a state where we can not rely on usermode call
9429          * to turn on the display, so we do it here
9430          */
9431         if (acrtc_state->stream->sink != aconnector->dc_sink)
9432                 dm_force_atomic_commit(&aconnector->base);
9433 }
9434
9435 /*
9436  * Grabs all modesetting locks to serialize against any blocking commits,
9437  * Waits for completion of all non blocking commits.
9438  */
9439 static int do_aquire_global_lock(struct drm_device *dev,
9440                                  struct drm_atomic_state *state)
9441 {
9442         struct drm_crtc *crtc;
9443         struct drm_crtc_commit *commit;
9444         long ret;
9445
9446         /*
9447          * Adding all modeset locks to aquire_ctx will
9448          * ensure that when the framework release it the
9449          * extra locks we are locking here will get released to
9450          */
9451         ret = drm_modeset_lock_all_ctx(dev, state->acquire_ctx);
9452         if (ret)
9453                 return ret;
9454
9455         list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
9456                 spin_lock(&crtc->commit_lock);
9457                 commit = list_first_entry_or_null(&crtc->commit_list,
9458                                 struct drm_crtc_commit, commit_entry);
9459                 if (commit)
9460                         drm_crtc_commit_get(commit);
9461                 spin_unlock(&crtc->commit_lock);
9462
9463                 if (!commit)
9464                         continue;
9465
9466                 /*
9467                  * Make sure all pending HW programming completed and
9468                  * page flips done
9469                  */
9470                 ret = wait_for_completion_interruptible_timeout(&commit->hw_done, 10*HZ);
9471
9472                 if (ret > 0)
9473                         ret = wait_for_completion_interruptible_timeout(
9474                                         &commit->flip_done, 10*HZ);
9475
9476                 if (ret == 0)
9477                         DRM_ERROR("[CRTC:%d:%s] hw_done or flip_done timed out\n",
9478                                   crtc->base.id, crtc->name);
9479
9480                 drm_crtc_commit_put(commit);
9481         }
9482
9483         return ret < 0 ? ret : 0;
9484 }
9485
9486 static void get_freesync_config_for_crtc(
9487         struct dm_crtc_state *new_crtc_state,
9488         struct dm_connector_state *new_con_state)
9489 {
9490         struct mod_freesync_config config = {0};
9491         struct amdgpu_dm_connector *aconnector;
9492         struct drm_display_mode *mode = &new_crtc_state->base.mode;
9493         int vrefresh = drm_mode_vrefresh(mode);
9494         bool fs_vid_mode = false;
9495
9496         if (new_con_state->base.connector->connector_type == DRM_MODE_CONNECTOR_WRITEBACK)
9497                 return;
9498
9499         aconnector = to_amdgpu_dm_connector(new_con_state->base.connector);
9500
9501         new_crtc_state->vrr_supported = new_con_state->freesync_capable &&
9502                                         vrefresh >= aconnector->min_vfreq &&
9503                                         vrefresh <= aconnector->max_vfreq;
9504
9505         if (new_crtc_state->vrr_supported) {
9506                 new_crtc_state->stream->ignore_msa_timing_param = true;
9507                 fs_vid_mode = new_crtc_state->freesync_config.state == VRR_STATE_ACTIVE_FIXED;
9508
9509                 config.min_refresh_in_uhz = aconnector->min_vfreq * 1000000;
9510                 config.max_refresh_in_uhz = aconnector->max_vfreq * 1000000;
9511                 config.vsif_supported = true;
9512                 config.btr = true;
9513
9514                 if (fs_vid_mode) {
9515                         config.state = VRR_STATE_ACTIVE_FIXED;
9516                         config.fixed_refresh_in_uhz = new_crtc_state->freesync_config.fixed_refresh_in_uhz;
9517                         goto out;
9518                 } else if (new_crtc_state->base.vrr_enabled) {
9519                         config.state = VRR_STATE_ACTIVE_VARIABLE;
9520                 } else {
9521                         config.state = VRR_STATE_INACTIVE;
9522                 }
9523         }
9524 out:
9525         new_crtc_state->freesync_config = config;
9526 }
9527
9528 static void reset_freesync_config_for_crtc(
9529         struct dm_crtc_state *new_crtc_state)
9530 {
9531         new_crtc_state->vrr_supported = false;
9532
9533         memset(&new_crtc_state->vrr_infopacket, 0,
9534                sizeof(new_crtc_state->vrr_infopacket));
9535 }
9536
9537 static bool
9538 is_timing_unchanged_for_freesync(struct drm_crtc_state *old_crtc_state,
9539                                  struct drm_crtc_state *new_crtc_state)
9540 {
9541         const struct drm_display_mode *old_mode, *new_mode;
9542
9543         if (!old_crtc_state || !new_crtc_state)
9544                 return false;
9545
9546         old_mode = &old_crtc_state->mode;
9547         new_mode = &new_crtc_state->mode;
9548
9549         if (old_mode->clock       == new_mode->clock &&
9550             old_mode->hdisplay    == new_mode->hdisplay &&
9551             old_mode->vdisplay    == new_mode->vdisplay &&
9552             old_mode->htotal      == new_mode->htotal &&
9553             old_mode->vtotal      != new_mode->vtotal &&
9554             old_mode->hsync_start == new_mode->hsync_start &&
9555             old_mode->vsync_start != new_mode->vsync_start &&
9556             old_mode->hsync_end   == new_mode->hsync_end &&
9557             old_mode->vsync_end   != new_mode->vsync_end &&
9558             old_mode->hskew       == new_mode->hskew &&
9559             old_mode->vscan       == new_mode->vscan &&
9560             (old_mode->vsync_end - old_mode->vsync_start) ==
9561             (new_mode->vsync_end - new_mode->vsync_start))
9562                 return true;
9563
9564         return false;
9565 }
9566
9567 static void set_freesync_fixed_config(struct dm_crtc_state *dm_new_crtc_state)
9568 {
9569         u64 num, den, res;
9570         struct drm_crtc_state *new_crtc_state = &dm_new_crtc_state->base;
9571
9572         dm_new_crtc_state->freesync_config.state = VRR_STATE_ACTIVE_FIXED;
9573
9574         num = (unsigned long long)new_crtc_state->mode.clock * 1000 * 1000000;
9575         den = (unsigned long long)new_crtc_state->mode.htotal *
9576               (unsigned long long)new_crtc_state->mode.vtotal;
9577
9578         res = div_u64(num, den);
9579         dm_new_crtc_state->freesync_config.fixed_refresh_in_uhz = res;
9580 }
9581
9582 static int dm_update_crtc_state(struct amdgpu_display_manager *dm,
9583                          struct drm_atomic_state *state,
9584                          struct drm_crtc *crtc,
9585                          struct drm_crtc_state *old_crtc_state,
9586                          struct drm_crtc_state *new_crtc_state,
9587                          bool enable,
9588                          bool *lock_and_validation_needed)
9589 {
9590         struct dm_atomic_state *dm_state = NULL;
9591         struct dm_crtc_state *dm_old_crtc_state, *dm_new_crtc_state;
9592         struct dc_stream_state *new_stream;
9593         int ret = 0;
9594
9595         /*
9596          * TODO Move this code into dm_crtc_atomic_check once we get rid of dc_validation_set
9597          * update changed items
9598          */
9599         struct amdgpu_crtc *acrtc = NULL;
9600         struct drm_connector *connector = NULL;
9601         struct amdgpu_dm_connector *aconnector = NULL;
9602         struct drm_connector_state *drm_new_conn_state = NULL, *drm_old_conn_state = NULL;
9603         struct dm_connector_state *dm_new_conn_state = NULL, *dm_old_conn_state = NULL;
9604
9605         new_stream = NULL;
9606
9607         dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
9608         dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
9609         acrtc = to_amdgpu_crtc(crtc);
9610         connector = amdgpu_dm_find_first_crtc_matching_connector(state, crtc);
9611         if (connector)
9612                 aconnector = to_amdgpu_dm_connector(connector);
9613
9614         /* TODO This hack should go away */
9615         if (connector && enable) {
9616                 /* Make sure fake sink is created in plug-in scenario */
9617                 drm_new_conn_state = drm_atomic_get_new_connector_state(state,
9618                                                                         connector);
9619                 drm_old_conn_state = drm_atomic_get_old_connector_state(state,
9620                                                                         connector);
9621
9622                 if (IS_ERR(drm_new_conn_state)) {
9623                         ret = PTR_ERR_OR_ZERO(drm_new_conn_state);
9624                         goto fail;
9625                 }
9626
9627                 dm_new_conn_state = to_dm_connector_state(drm_new_conn_state);
9628                 dm_old_conn_state = to_dm_connector_state(drm_old_conn_state);
9629
9630                 if (!drm_atomic_crtc_needs_modeset(new_crtc_state))
9631                         goto skip_modeset;
9632
9633                 new_stream = create_validate_stream_for_sink(aconnector,
9634                                                              &new_crtc_state->mode,
9635                                                              dm_new_conn_state,
9636                                                              dm_old_crtc_state->stream);
9637
9638                 /*
9639                  * we can have no stream on ACTION_SET if a display
9640                  * was disconnected during S3, in this case it is not an
9641                  * error, the OS will be updated after detection, and
9642                  * will do the right thing on next atomic commit
9643                  */
9644
9645                 if (!new_stream) {
9646                         DRM_DEBUG_DRIVER("%s: Failed to create new stream for crtc %d\n",
9647                                         __func__, acrtc->base.base.id);
9648                         ret = -ENOMEM;
9649                         goto fail;
9650                 }
9651
9652                 /*
9653                  * TODO: Check VSDB bits to decide whether this should
9654                  * be enabled or not.
9655                  */
9656                 new_stream->triggered_crtc_reset.enabled =
9657                         dm->force_timing_sync;
9658
9659                 dm_new_crtc_state->abm_level = dm_new_conn_state->abm_level;
9660
9661                 ret = fill_hdr_info_packet(drm_new_conn_state,
9662                                            &new_stream->hdr_static_metadata);
9663                 if (ret)
9664                         goto fail;
9665
9666                 /*
9667                  * If we already removed the old stream from the context
9668                  * (and set the new stream to NULL) then we can't reuse
9669                  * the old stream even if the stream and scaling are unchanged.
9670                  * We'll hit the BUG_ON and black screen.
9671                  *
9672                  * TODO: Refactor this function to allow this check to work
9673                  * in all conditions.
9674                  */
9675                 if (dm_new_crtc_state->stream &&
9676                     is_timing_unchanged_for_freesync(new_crtc_state, old_crtc_state))
9677                         goto skip_modeset;
9678
9679                 if (dm_new_crtc_state->stream &&
9680                     dc_is_stream_unchanged(new_stream, dm_old_crtc_state->stream) &&
9681                     dc_is_stream_scaling_unchanged(new_stream, dm_old_crtc_state->stream)) {
9682                         new_crtc_state->mode_changed = false;
9683                         DRM_DEBUG_DRIVER("Mode change not required, setting mode_changed to %d",
9684                                          new_crtc_state->mode_changed);
9685                 }
9686         }
9687
9688         /* mode_changed flag may get updated above, need to check again */
9689         if (!drm_atomic_crtc_needs_modeset(new_crtc_state))
9690                 goto skip_modeset;
9691
9692         drm_dbg_state(state->dev,
9693                 "amdgpu_crtc id:%d crtc_state_flags: enable:%d, active:%d, planes_changed:%d, mode_changed:%d,active_changed:%d,connectors_changed:%d\n",
9694                 acrtc->crtc_id,
9695                 new_crtc_state->enable,
9696                 new_crtc_state->active,
9697                 new_crtc_state->planes_changed,
9698                 new_crtc_state->mode_changed,
9699                 new_crtc_state->active_changed,
9700                 new_crtc_state->connectors_changed);
9701
9702         /* Remove stream for any changed/disabled CRTC */
9703         if (!enable) {
9704
9705                 if (!dm_old_crtc_state->stream)
9706                         goto skip_modeset;
9707
9708                 /* Unset freesync video if it was active before */
9709                 if (dm_old_crtc_state->freesync_config.state == VRR_STATE_ACTIVE_FIXED) {
9710                         dm_new_crtc_state->freesync_config.state = VRR_STATE_INACTIVE;
9711                         dm_new_crtc_state->freesync_config.fixed_refresh_in_uhz = 0;
9712                 }
9713
9714                 /* Now check if we should set freesync video mode */
9715                 if (dm_new_crtc_state->stream &&
9716                     dc_is_stream_unchanged(new_stream, dm_old_crtc_state->stream) &&
9717                     dc_is_stream_scaling_unchanged(new_stream, dm_old_crtc_state->stream) &&
9718                     is_timing_unchanged_for_freesync(new_crtc_state,
9719                                                      old_crtc_state)) {
9720                         new_crtc_state->mode_changed = false;
9721                         DRM_DEBUG_DRIVER(
9722                                 "Mode change not required for front porch change, setting mode_changed to %d",
9723                                 new_crtc_state->mode_changed);
9724
9725                         set_freesync_fixed_config(dm_new_crtc_state);
9726
9727                         goto skip_modeset;
9728                 } else if (aconnector &&
9729                            is_freesync_video_mode(&new_crtc_state->mode,
9730                                                   aconnector)) {
9731                         struct drm_display_mode *high_mode;
9732
9733                         high_mode = get_highest_refresh_rate_mode(aconnector, false);
9734                         if (!drm_mode_equal(&new_crtc_state->mode, high_mode))
9735                                 set_freesync_fixed_config(dm_new_crtc_state);
9736                 }
9737
9738                 ret = dm_atomic_get_state(state, &dm_state);
9739                 if (ret)
9740                         goto fail;
9741
9742                 DRM_DEBUG_DRIVER("Disabling DRM crtc: %d\n",
9743                                 crtc->base.id);
9744
9745                 /* i.e. reset mode */
9746                 if (dc_remove_stream_from_ctx(
9747                                 dm->dc,
9748                                 dm_state->context,
9749                                 dm_old_crtc_state->stream) != DC_OK) {
9750                         ret = -EINVAL;
9751                         goto fail;
9752                 }
9753
9754                 dc_stream_release(dm_old_crtc_state->stream);
9755                 dm_new_crtc_state->stream = NULL;
9756
9757                 reset_freesync_config_for_crtc(dm_new_crtc_state);
9758
9759                 *lock_and_validation_needed = true;
9760
9761         } else {/* Add stream for any updated/enabled CRTC */
9762                 /*
9763                  * Quick fix to prevent NULL pointer on new_stream when
9764                  * added MST connectors not found in existing crtc_state in the chained mode
9765                  * TODO: need to dig out the root cause of that
9766                  */
9767                 if (!connector)
9768                         goto skip_modeset;
9769
9770                 if (modereset_required(new_crtc_state))
9771                         goto skip_modeset;
9772
9773                 if (amdgpu_dm_crtc_modeset_required(new_crtc_state, new_stream,
9774                                      dm_old_crtc_state->stream)) {
9775
9776                         WARN_ON(dm_new_crtc_state->stream);
9777
9778                         ret = dm_atomic_get_state(state, &dm_state);
9779                         if (ret)
9780                                 goto fail;
9781
9782                         dm_new_crtc_state->stream = new_stream;
9783
9784                         dc_stream_retain(new_stream);
9785
9786                         DRM_DEBUG_ATOMIC("Enabling DRM crtc: %d\n",
9787                                          crtc->base.id);
9788
9789                         if (dc_add_stream_to_ctx(
9790                                         dm->dc,
9791                                         dm_state->context,
9792                                         dm_new_crtc_state->stream) != DC_OK) {
9793                                 ret = -EINVAL;
9794                                 goto fail;
9795                         }
9796
9797                         *lock_and_validation_needed = true;
9798                 }
9799         }
9800
9801 skip_modeset:
9802         /* Release extra reference */
9803         if (new_stream)
9804                 dc_stream_release(new_stream);
9805
9806         /*
9807          * We want to do dc stream updates that do not require a
9808          * full modeset below.
9809          */
9810         if (!(enable && connector && new_crtc_state->active))
9811                 return 0;
9812         /*
9813          * Given above conditions, the dc state cannot be NULL because:
9814          * 1. We're in the process of enabling CRTCs (just been added
9815          *    to the dc context, or already is on the context)
9816          * 2. Has a valid connector attached, and
9817          * 3. Is currently active and enabled.
9818          * => The dc stream state currently exists.
9819          */
9820         BUG_ON(dm_new_crtc_state->stream == NULL);
9821
9822         /* Scaling or underscan settings */
9823         if (is_scaling_state_different(dm_old_conn_state, dm_new_conn_state) ||
9824                                 drm_atomic_crtc_needs_modeset(new_crtc_state))
9825                 update_stream_scaling_settings(
9826                         &new_crtc_state->mode, dm_new_conn_state, dm_new_crtc_state->stream);
9827
9828         /* ABM settings */
9829         dm_new_crtc_state->abm_level = dm_new_conn_state->abm_level;
9830
9831         /*
9832          * Color management settings. We also update color properties
9833          * when a modeset is needed, to ensure it gets reprogrammed.
9834          */
9835         if (dm_new_crtc_state->base.color_mgmt_changed ||
9836             drm_atomic_crtc_needs_modeset(new_crtc_state)) {
9837                 ret = amdgpu_dm_update_crtc_color_mgmt(dm_new_crtc_state);
9838                 if (ret)
9839                         goto fail;
9840         }
9841
9842         /* Update Freesync settings. */
9843         get_freesync_config_for_crtc(dm_new_crtc_state,
9844                                      dm_new_conn_state);
9845
9846         return ret;
9847
9848 fail:
9849         if (new_stream)
9850                 dc_stream_release(new_stream);
9851         return ret;
9852 }
9853
9854 static bool should_reset_plane(struct drm_atomic_state *state,
9855                                struct drm_plane *plane,
9856                                struct drm_plane_state *old_plane_state,
9857                                struct drm_plane_state *new_plane_state)
9858 {
9859         struct drm_plane *other;
9860         struct drm_plane_state *old_other_state, *new_other_state;
9861         struct drm_crtc_state *new_crtc_state;
9862         struct amdgpu_device *adev = drm_to_adev(plane->dev);
9863         int i;
9864
9865         /*
9866          * TODO: Remove this hack for all asics once it proves that the
9867          * fast updates works fine on DCN3.2+.
9868          */
9869         if (adev->ip_versions[DCE_HWIP][0] < IP_VERSION(3, 2, 0) && state->allow_modeset)
9870                 return true;
9871
9872         /* Exit early if we know that we're adding or removing the plane. */
9873         if (old_plane_state->crtc != new_plane_state->crtc)
9874                 return true;
9875
9876         /* old crtc == new_crtc == NULL, plane not in context. */
9877         if (!new_plane_state->crtc)
9878                 return false;
9879
9880         new_crtc_state =
9881                 drm_atomic_get_new_crtc_state(state, new_plane_state->crtc);
9882
9883         if (!new_crtc_state)
9884                 return true;
9885
9886         /* CRTC Degamma changes currently require us to recreate planes. */
9887         if (new_crtc_state->color_mgmt_changed)
9888                 return true;
9889
9890         if (drm_atomic_crtc_needs_modeset(new_crtc_state))
9891                 return true;
9892
9893         /*
9894          * If there are any new primary or overlay planes being added or
9895          * removed then the z-order can potentially change. To ensure
9896          * correct z-order and pipe acquisition the current DC architecture
9897          * requires us to remove and recreate all existing planes.
9898          *
9899          * TODO: Come up with a more elegant solution for this.
9900          */
9901         for_each_oldnew_plane_in_state(state, other, old_other_state, new_other_state, i) {
9902                 struct amdgpu_framebuffer *old_afb, *new_afb;
9903
9904                 if (other->type == DRM_PLANE_TYPE_CURSOR)
9905                         continue;
9906
9907                 if (old_other_state->crtc != new_plane_state->crtc &&
9908                     new_other_state->crtc != new_plane_state->crtc)
9909                         continue;
9910
9911                 if (old_other_state->crtc != new_other_state->crtc)
9912                         return true;
9913
9914                 /* Src/dst size and scaling updates. */
9915                 if (old_other_state->src_w != new_other_state->src_w ||
9916                     old_other_state->src_h != new_other_state->src_h ||
9917                     old_other_state->crtc_w != new_other_state->crtc_w ||
9918                     old_other_state->crtc_h != new_other_state->crtc_h)
9919                         return true;
9920
9921                 /* Rotation / mirroring updates. */
9922                 if (old_other_state->rotation != new_other_state->rotation)
9923                         return true;
9924
9925                 /* Blending updates. */
9926                 if (old_other_state->pixel_blend_mode !=
9927                     new_other_state->pixel_blend_mode)
9928                         return true;
9929
9930                 /* Alpha updates. */
9931                 if (old_other_state->alpha != new_other_state->alpha)
9932                         return true;
9933
9934                 /* Colorspace changes. */
9935                 if (old_other_state->color_range != new_other_state->color_range ||
9936                     old_other_state->color_encoding != new_other_state->color_encoding)
9937                         return true;
9938
9939                 /* Framebuffer checks fall at the end. */
9940                 if (!old_other_state->fb || !new_other_state->fb)
9941                         continue;
9942
9943                 /* Pixel format changes can require bandwidth updates. */
9944                 if (old_other_state->fb->format != new_other_state->fb->format)
9945                         return true;
9946
9947                 old_afb = (struct amdgpu_framebuffer *)old_other_state->fb;
9948                 new_afb = (struct amdgpu_framebuffer *)new_other_state->fb;
9949
9950                 /* Tiling and DCC changes also require bandwidth updates. */
9951                 if (old_afb->tiling_flags != new_afb->tiling_flags ||
9952                     old_afb->base.modifier != new_afb->base.modifier)
9953                         return true;
9954         }
9955
9956         return false;
9957 }
9958
9959 static int dm_check_cursor_fb(struct amdgpu_crtc *new_acrtc,
9960                               struct drm_plane_state *new_plane_state,
9961                               struct drm_framebuffer *fb)
9962 {
9963         struct amdgpu_device *adev = drm_to_adev(new_acrtc->base.dev);
9964         struct amdgpu_framebuffer *afb = to_amdgpu_framebuffer(fb);
9965         unsigned int pitch;
9966         bool linear;
9967
9968         if (fb->width > new_acrtc->max_cursor_width ||
9969             fb->height > new_acrtc->max_cursor_height) {
9970                 DRM_DEBUG_ATOMIC("Bad cursor FB size %dx%d\n",
9971                                  new_plane_state->fb->width,
9972                                  new_plane_state->fb->height);
9973                 return -EINVAL;
9974         }
9975         if (new_plane_state->src_w != fb->width << 16 ||
9976             new_plane_state->src_h != fb->height << 16) {
9977                 DRM_DEBUG_ATOMIC("Cropping not supported for cursor plane\n");
9978                 return -EINVAL;
9979         }
9980
9981         /* Pitch in pixels */
9982         pitch = fb->pitches[0] / fb->format->cpp[0];
9983
9984         if (fb->width != pitch) {
9985                 DRM_DEBUG_ATOMIC("Cursor FB width %d doesn't match pitch %d",
9986                                  fb->width, pitch);
9987                 return -EINVAL;
9988         }
9989
9990         switch (pitch) {
9991         case 64:
9992         case 128:
9993         case 256:
9994                 /* FB pitch is supported by cursor plane */
9995                 break;
9996         default:
9997                 DRM_DEBUG_ATOMIC("Bad cursor FB pitch %d px\n", pitch);
9998                 return -EINVAL;
9999         }
10000
10001         /* Core DRM takes care of checking FB modifiers, so we only need to
10002          * check tiling flags when the FB doesn't have a modifier.
10003          */
10004         if (!(fb->flags & DRM_MODE_FB_MODIFIERS)) {
10005                 if (adev->family < AMDGPU_FAMILY_AI) {
10006                         linear = AMDGPU_TILING_GET(afb->tiling_flags, ARRAY_MODE) != DC_ARRAY_2D_TILED_THIN1 &&
10007                                  AMDGPU_TILING_GET(afb->tiling_flags, ARRAY_MODE) != DC_ARRAY_1D_TILED_THIN1 &&
10008                                  AMDGPU_TILING_GET(afb->tiling_flags, MICRO_TILE_MODE) == 0;
10009                 } else {
10010                         linear = AMDGPU_TILING_GET(afb->tiling_flags, SWIZZLE_MODE) == 0;
10011                 }
10012                 if (!linear) {
10013                         DRM_DEBUG_ATOMIC("Cursor FB not linear");
10014                         return -EINVAL;
10015                 }
10016         }
10017
10018         return 0;
10019 }
10020
10021 static int dm_update_plane_state(struct dc *dc,
10022                                  struct drm_atomic_state *state,
10023                                  struct drm_plane *plane,
10024                                  struct drm_plane_state *old_plane_state,
10025                                  struct drm_plane_state *new_plane_state,
10026                                  bool enable,
10027                                  bool *lock_and_validation_needed,
10028                                  bool *is_top_most_overlay)
10029 {
10030
10031         struct dm_atomic_state *dm_state = NULL;
10032         struct drm_crtc *new_plane_crtc, *old_plane_crtc;
10033         struct drm_crtc_state *old_crtc_state, *new_crtc_state;
10034         struct dm_crtc_state *dm_new_crtc_state, *dm_old_crtc_state;
10035         struct dm_plane_state *dm_new_plane_state, *dm_old_plane_state;
10036         struct amdgpu_crtc *new_acrtc;
10037         bool needs_reset;
10038         int ret = 0;
10039
10040
10041         new_plane_crtc = new_plane_state->crtc;
10042         old_plane_crtc = old_plane_state->crtc;
10043         dm_new_plane_state = to_dm_plane_state(new_plane_state);
10044         dm_old_plane_state = to_dm_plane_state(old_plane_state);
10045
10046         if (plane->type == DRM_PLANE_TYPE_CURSOR) {
10047                 if (!enable || !new_plane_crtc ||
10048                         drm_atomic_plane_disabling(plane->state, new_plane_state))
10049                         return 0;
10050
10051                 new_acrtc = to_amdgpu_crtc(new_plane_crtc);
10052
10053                 if (new_plane_state->src_x != 0 || new_plane_state->src_y != 0) {
10054                         DRM_DEBUG_ATOMIC("Cropping not supported for cursor plane\n");
10055                         return -EINVAL;
10056                 }
10057
10058                 if (new_plane_state->fb) {
10059                         ret = dm_check_cursor_fb(new_acrtc, new_plane_state,
10060                                                  new_plane_state->fb);
10061                         if (ret)
10062                                 return ret;
10063                 }
10064
10065                 return 0;
10066         }
10067
10068         needs_reset = should_reset_plane(state, plane, old_plane_state,
10069                                          new_plane_state);
10070
10071         /* Remove any changed/removed planes */
10072         if (!enable) {
10073                 if (!needs_reset)
10074                         return 0;
10075
10076                 if (!old_plane_crtc)
10077                         return 0;
10078
10079                 old_crtc_state = drm_atomic_get_old_crtc_state(
10080                                 state, old_plane_crtc);
10081                 dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
10082
10083                 if (!dm_old_crtc_state->stream)
10084                         return 0;
10085
10086                 DRM_DEBUG_ATOMIC("Disabling DRM plane: %d on DRM crtc %d\n",
10087                                 plane->base.id, old_plane_crtc->base.id);
10088
10089                 ret = dm_atomic_get_state(state, &dm_state);
10090                 if (ret)
10091                         return ret;
10092
10093                 if (!dc_remove_plane_from_context(
10094                                 dc,
10095                                 dm_old_crtc_state->stream,
10096                                 dm_old_plane_state->dc_state,
10097                                 dm_state->context)) {
10098
10099                         return -EINVAL;
10100                 }
10101
10102                 if (dm_old_plane_state->dc_state)
10103                         dc_plane_state_release(dm_old_plane_state->dc_state);
10104
10105                 dm_new_plane_state->dc_state = NULL;
10106
10107                 *lock_and_validation_needed = true;
10108
10109         } else { /* Add new planes */
10110                 struct dc_plane_state *dc_new_plane_state;
10111
10112                 if (drm_atomic_plane_disabling(plane->state, new_plane_state))
10113                         return 0;
10114
10115                 if (!new_plane_crtc)
10116                         return 0;
10117
10118                 new_crtc_state = drm_atomic_get_new_crtc_state(state, new_plane_crtc);
10119                 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
10120
10121                 if (!dm_new_crtc_state->stream)
10122                         return 0;
10123
10124                 if (!needs_reset)
10125                         return 0;
10126
10127                 ret = amdgpu_dm_plane_helper_check_state(new_plane_state, new_crtc_state);
10128                 if (ret)
10129                         return ret;
10130
10131                 WARN_ON(dm_new_plane_state->dc_state);
10132
10133                 dc_new_plane_state = dc_create_plane_state(dc);
10134                 if (!dc_new_plane_state)
10135                         return -ENOMEM;
10136
10137                 /* Block top most plane from being a video plane */
10138                 if (plane->type == DRM_PLANE_TYPE_OVERLAY) {
10139                         if (amdgpu_dm_plane_is_video_format(new_plane_state->fb->format->format) && *is_top_most_overlay)
10140                                 return -EINVAL;
10141
10142                         *is_top_most_overlay = false;
10143                 }
10144
10145                 DRM_DEBUG_ATOMIC("Enabling DRM plane: %d on DRM crtc %d\n",
10146                                  plane->base.id, new_plane_crtc->base.id);
10147
10148                 ret = fill_dc_plane_attributes(
10149                         drm_to_adev(new_plane_crtc->dev),
10150                         dc_new_plane_state,
10151                         new_plane_state,
10152                         new_crtc_state);
10153                 if (ret) {
10154                         dc_plane_state_release(dc_new_plane_state);
10155                         return ret;
10156                 }
10157
10158                 ret = dm_atomic_get_state(state, &dm_state);
10159                 if (ret) {
10160                         dc_plane_state_release(dc_new_plane_state);
10161                         return ret;
10162                 }
10163
10164                 /*
10165                  * Any atomic check errors that occur after this will
10166                  * not need a release. The plane state will be attached
10167                  * to the stream, and therefore part of the atomic
10168                  * state. It'll be released when the atomic state is
10169                  * cleaned.
10170                  */
10171                 if (!dc_add_plane_to_context(
10172                                 dc,
10173                                 dm_new_crtc_state->stream,
10174                                 dc_new_plane_state,
10175                                 dm_state->context)) {
10176
10177                         dc_plane_state_release(dc_new_plane_state);
10178                         return -EINVAL;
10179                 }
10180
10181                 dm_new_plane_state->dc_state = dc_new_plane_state;
10182
10183                 dm_new_crtc_state->mpo_requested |= (plane->type == DRM_PLANE_TYPE_OVERLAY);
10184
10185                 /* Tell DC to do a full surface update every time there
10186                  * is a plane change. Inefficient, but works for now.
10187                  */
10188                 dm_new_plane_state->dc_state->update_flags.bits.full_update = 1;
10189
10190                 *lock_and_validation_needed = true;
10191         }
10192
10193
10194         return ret;
10195 }
10196
10197 static void dm_get_oriented_plane_size(struct drm_plane_state *plane_state,
10198                                        int *src_w, int *src_h)
10199 {
10200         switch (plane_state->rotation & DRM_MODE_ROTATE_MASK) {
10201         case DRM_MODE_ROTATE_90:
10202         case DRM_MODE_ROTATE_270:
10203                 *src_w = plane_state->src_h >> 16;
10204                 *src_h = plane_state->src_w >> 16;
10205                 break;
10206         case DRM_MODE_ROTATE_0:
10207         case DRM_MODE_ROTATE_180:
10208         default:
10209                 *src_w = plane_state->src_w >> 16;
10210                 *src_h = plane_state->src_h >> 16;
10211                 break;
10212         }
10213 }
10214
10215 static void
10216 dm_get_plane_scale(struct drm_plane_state *plane_state,
10217                    int *out_plane_scale_w, int *out_plane_scale_h)
10218 {
10219         int plane_src_w, plane_src_h;
10220
10221         dm_get_oriented_plane_size(plane_state, &plane_src_w, &plane_src_h);
10222         *out_plane_scale_w = plane_state->crtc_w * 1000 / plane_src_w;
10223         *out_plane_scale_h = plane_state->crtc_h * 1000 / plane_src_h;
10224 }
10225
10226 static int dm_check_crtc_cursor(struct drm_atomic_state *state,
10227                                 struct drm_crtc *crtc,
10228                                 struct drm_crtc_state *new_crtc_state)
10229 {
10230         struct drm_plane *cursor = crtc->cursor, *plane, *underlying;
10231         struct drm_plane_state *old_plane_state, *new_plane_state;
10232         struct drm_plane_state *new_cursor_state, *new_underlying_state;
10233         int i;
10234         int cursor_scale_w, cursor_scale_h, underlying_scale_w, underlying_scale_h;
10235         bool any_relevant_change = false;
10236
10237         /* On DCE and DCN there is no dedicated hardware cursor plane. We get a
10238          * cursor per pipe but it's going to inherit the scaling and
10239          * positioning from the underlying pipe. Check the cursor plane's
10240          * blending properties match the underlying planes'.
10241          */
10242
10243         /* If no plane was enabled or changed scaling, no need to check again */
10244         for_each_oldnew_plane_in_state(state, plane, old_plane_state, new_plane_state, i) {
10245                 int new_scale_w, new_scale_h, old_scale_w, old_scale_h;
10246
10247                 if (!new_plane_state || !new_plane_state->fb || new_plane_state->crtc != crtc)
10248                         continue;
10249
10250                 if (!old_plane_state || !old_plane_state->fb || old_plane_state->crtc != crtc) {
10251                         any_relevant_change = true;
10252                         break;
10253                 }
10254
10255                 if (new_plane_state->fb == old_plane_state->fb &&
10256                     new_plane_state->crtc_w == old_plane_state->crtc_w &&
10257                     new_plane_state->crtc_h == old_plane_state->crtc_h)
10258                         continue;
10259
10260                 dm_get_plane_scale(new_plane_state, &new_scale_w, &new_scale_h);
10261                 dm_get_plane_scale(old_plane_state, &old_scale_w, &old_scale_h);
10262
10263                 if (new_scale_w != old_scale_w || new_scale_h != old_scale_h) {
10264                         any_relevant_change = true;
10265                         break;
10266                 }
10267         }
10268
10269         if (!any_relevant_change)
10270                 return 0;
10271
10272         new_cursor_state = drm_atomic_get_plane_state(state, cursor);
10273         if (IS_ERR(new_cursor_state))
10274                 return PTR_ERR(new_cursor_state);
10275
10276         if (!new_cursor_state->fb)
10277                 return 0;
10278
10279         dm_get_plane_scale(new_cursor_state, &cursor_scale_w, &cursor_scale_h);
10280
10281         /* Need to check all enabled planes, even if this commit doesn't change
10282          * their state
10283          */
10284         i = drm_atomic_add_affected_planes(state, crtc);
10285         if (i)
10286                 return i;
10287
10288         for_each_new_plane_in_state_reverse(state, underlying, new_underlying_state, i) {
10289                 /* Narrow down to non-cursor planes on the same CRTC as the cursor */
10290                 if (new_underlying_state->crtc != crtc || underlying == crtc->cursor)
10291                         continue;
10292
10293                 /* Ignore disabled planes */
10294                 if (!new_underlying_state->fb)
10295                         continue;
10296
10297                 dm_get_plane_scale(new_underlying_state,
10298                                    &underlying_scale_w, &underlying_scale_h);
10299
10300                 if (cursor_scale_w != underlying_scale_w ||
10301                     cursor_scale_h != underlying_scale_h) {
10302                         drm_dbg_atomic(crtc->dev,
10303                                        "Cursor [PLANE:%d:%s] scaling doesn't match underlying [PLANE:%d:%s]\n",
10304                                        cursor->base.id, cursor->name, underlying->base.id, underlying->name);
10305                         return -EINVAL;
10306                 }
10307
10308                 /* If this plane covers the whole CRTC, no need to check planes underneath */
10309                 if (new_underlying_state->crtc_x <= 0 &&
10310                     new_underlying_state->crtc_y <= 0 &&
10311                     new_underlying_state->crtc_x + new_underlying_state->crtc_w >= new_crtc_state->mode.hdisplay &&
10312                     new_underlying_state->crtc_y + new_underlying_state->crtc_h >= new_crtc_state->mode.vdisplay)
10313                         break;
10314         }
10315
10316         return 0;
10317 }
10318
10319 static int add_affected_mst_dsc_crtcs(struct drm_atomic_state *state, struct drm_crtc *crtc)
10320 {
10321         struct drm_connector *connector;
10322         struct drm_connector_state *conn_state, *old_conn_state;
10323         struct amdgpu_dm_connector *aconnector = NULL;
10324         int i;
10325
10326         for_each_oldnew_connector_in_state(state, connector, old_conn_state, conn_state, i) {
10327                 if (!conn_state->crtc)
10328                         conn_state = old_conn_state;
10329
10330                 if (conn_state->crtc != crtc)
10331                         continue;
10332
10333                 if (connector->connector_type == DRM_MODE_CONNECTOR_WRITEBACK)
10334                         continue;
10335
10336                 aconnector = to_amdgpu_dm_connector(connector);
10337                 if (!aconnector->mst_output_port || !aconnector->mst_root)
10338                         aconnector = NULL;
10339                 else
10340                         break;
10341         }
10342
10343         if (!aconnector)
10344                 return 0;
10345
10346         return drm_dp_mst_add_affected_dsc_crtcs(state, &aconnector->mst_root->mst_mgr);
10347 }
10348
10349 /**
10350  * amdgpu_dm_atomic_check() - Atomic check implementation for AMDgpu DM.
10351  *
10352  * @dev: The DRM device
10353  * @state: The atomic state to commit
10354  *
10355  * Validate that the given atomic state is programmable by DC into hardware.
10356  * This involves constructing a &struct dc_state reflecting the new hardware
10357  * state we wish to commit, then querying DC to see if it is programmable. It's
10358  * important not to modify the existing DC state. Otherwise, atomic_check
10359  * may unexpectedly commit hardware changes.
10360  *
10361  * When validating the DC state, it's important that the right locks are
10362  * acquired. For full updates case which removes/adds/updates streams on one
10363  * CRTC while flipping on another CRTC, acquiring global lock will guarantee
10364  * that any such full update commit will wait for completion of any outstanding
10365  * flip using DRMs synchronization events.
10366  *
10367  * Note that DM adds the affected connectors for all CRTCs in state, when that
10368  * might not seem necessary. This is because DC stream creation requires the
10369  * DC sink, which is tied to the DRM connector state. Cleaning this up should
10370  * be possible but non-trivial - a possible TODO item.
10371  *
10372  * Return: -Error code if validation failed.
10373  */
10374 static int amdgpu_dm_atomic_check(struct drm_device *dev,
10375                                   struct drm_atomic_state *state)
10376 {
10377         struct amdgpu_device *adev = drm_to_adev(dev);
10378         struct dm_atomic_state *dm_state = NULL;
10379         struct dc *dc = adev->dm.dc;
10380         struct drm_connector *connector;
10381         struct drm_connector_state *old_con_state, *new_con_state;
10382         struct drm_crtc *crtc;
10383         struct drm_crtc_state *old_crtc_state, *new_crtc_state;
10384         struct drm_plane *plane;
10385         struct drm_plane_state *old_plane_state, *new_plane_state;
10386         enum dc_status status;
10387         int ret, i;
10388         bool lock_and_validation_needed = false;
10389         bool is_top_most_overlay = true;
10390         struct dm_crtc_state *dm_old_crtc_state, *dm_new_crtc_state;
10391         struct drm_dp_mst_topology_mgr *mgr;
10392         struct drm_dp_mst_topology_state *mst_state;
10393         struct dsc_mst_fairness_vars vars[MAX_PIPES];
10394
10395         trace_amdgpu_dm_atomic_check_begin(state);
10396
10397         ret = drm_atomic_helper_check_modeset(dev, state);
10398         if (ret) {
10399                 DRM_DEBUG_DRIVER("drm_atomic_helper_check_modeset() failed\n");
10400                 goto fail;
10401         }
10402
10403         /* Check connector changes */
10404         for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
10405                 struct dm_connector_state *dm_old_con_state = to_dm_connector_state(old_con_state);
10406                 struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state);
10407
10408                 /* Skip connectors that are disabled or part of modeset already. */
10409                 if (!new_con_state->crtc)
10410                         continue;
10411
10412                 new_crtc_state = drm_atomic_get_crtc_state(state, new_con_state->crtc);
10413                 if (IS_ERR(new_crtc_state)) {
10414                         DRM_DEBUG_DRIVER("drm_atomic_get_crtc_state() failed\n");
10415                         ret = PTR_ERR(new_crtc_state);
10416                         goto fail;
10417                 }
10418
10419                 if (dm_old_con_state->abm_level != dm_new_con_state->abm_level ||
10420                     dm_old_con_state->scaling != dm_new_con_state->scaling)
10421                         new_crtc_state->connectors_changed = true;
10422         }
10423
10424         if (dc_resource_is_dsc_encoding_supported(dc)) {
10425                 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
10426                         if (drm_atomic_crtc_needs_modeset(new_crtc_state)) {
10427                                 ret = add_affected_mst_dsc_crtcs(state, crtc);
10428                                 if (ret) {
10429                                         DRM_DEBUG_DRIVER("add_affected_mst_dsc_crtcs() failed\n");
10430                                         goto fail;
10431                                 }
10432                         }
10433                 }
10434         }
10435         for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
10436                 dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
10437
10438                 if (!drm_atomic_crtc_needs_modeset(new_crtc_state) &&
10439                     !new_crtc_state->color_mgmt_changed &&
10440                     old_crtc_state->vrr_enabled == new_crtc_state->vrr_enabled &&
10441                         dm_old_crtc_state->dsc_force_changed == false)
10442                         continue;
10443
10444                 ret = amdgpu_dm_verify_lut_sizes(new_crtc_state);
10445                 if (ret) {
10446                         DRM_DEBUG_DRIVER("amdgpu_dm_verify_lut_sizes() failed\n");
10447                         goto fail;
10448                 }
10449
10450                 if (!new_crtc_state->enable)
10451                         continue;
10452
10453                 ret = drm_atomic_add_affected_connectors(state, crtc);
10454                 if (ret) {
10455                         DRM_DEBUG_DRIVER("drm_atomic_add_affected_connectors() failed\n");
10456                         goto fail;
10457                 }
10458
10459                 ret = drm_atomic_add_affected_planes(state, crtc);
10460                 if (ret) {
10461                         DRM_DEBUG_DRIVER("drm_atomic_add_affected_planes() failed\n");
10462                         goto fail;
10463                 }
10464
10465                 if (dm_old_crtc_state->dsc_force_changed)
10466                         new_crtc_state->mode_changed = true;
10467         }
10468
10469         /*
10470          * Add all primary and overlay planes on the CRTC to the state
10471          * whenever a plane is enabled to maintain correct z-ordering
10472          * and to enable fast surface updates.
10473          */
10474         drm_for_each_crtc(crtc, dev) {
10475                 bool modified = false;
10476
10477                 for_each_oldnew_plane_in_state(state, plane, old_plane_state, new_plane_state, i) {
10478                         if (plane->type == DRM_PLANE_TYPE_CURSOR)
10479                                 continue;
10480
10481                         if (new_plane_state->crtc == crtc ||
10482                             old_plane_state->crtc == crtc) {
10483                                 modified = true;
10484                                 break;
10485                         }
10486                 }
10487
10488                 if (!modified)
10489                         continue;
10490
10491                 drm_for_each_plane_mask(plane, state->dev, crtc->state->plane_mask) {
10492                         if (plane->type == DRM_PLANE_TYPE_CURSOR)
10493                                 continue;
10494
10495                         new_plane_state =
10496                                 drm_atomic_get_plane_state(state, plane);
10497
10498                         if (IS_ERR(new_plane_state)) {
10499                                 ret = PTR_ERR(new_plane_state);
10500                                 DRM_DEBUG_DRIVER("new_plane_state is BAD\n");
10501                                 goto fail;
10502                         }
10503                 }
10504         }
10505
10506         /*
10507          * DC consults the zpos (layer_index in DC terminology) to determine the
10508          * hw plane on which to enable the hw cursor (see
10509          * `dcn10_can_pipe_disable_cursor`). By now, all modified planes are in
10510          * atomic state, so call drm helper to normalize zpos.
10511          */
10512         ret = drm_atomic_normalize_zpos(dev, state);
10513         if (ret) {
10514                 drm_dbg(dev, "drm_atomic_normalize_zpos() failed\n");
10515                 goto fail;
10516         }
10517
10518         /* Remove exiting planes if they are modified */
10519         for_each_oldnew_plane_in_state_reverse(state, plane, old_plane_state, new_plane_state, i) {
10520                 if (old_plane_state->fb && new_plane_state->fb &&
10521                     get_mem_type(old_plane_state->fb) !=
10522                     get_mem_type(new_plane_state->fb))
10523                         lock_and_validation_needed = true;
10524
10525                 ret = dm_update_plane_state(dc, state, plane,
10526                                             old_plane_state,
10527                                             new_plane_state,
10528                                             false,
10529                                             &lock_and_validation_needed,
10530                                             &is_top_most_overlay);
10531                 if (ret) {
10532                         DRM_DEBUG_DRIVER("dm_update_plane_state() failed\n");
10533                         goto fail;
10534                 }
10535         }
10536
10537         /* Disable all crtcs which require disable */
10538         for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
10539                 ret = dm_update_crtc_state(&adev->dm, state, crtc,
10540                                            old_crtc_state,
10541                                            new_crtc_state,
10542                                            false,
10543                                            &lock_and_validation_needed);
10544                 if (ret) {
10545                         DRM_DEBUG_DRIVER("DISABLE: dm_update_crtc_state() failed\n");
10546                         goto fail;
10547                 }
10548         }
10549
10550         /* Enable all crtcs which require enable */
10551         for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
10552                 ret = dm_update_crtc_state(&adev->dm, state, crtc,
10553                                            old_crtc_state,
10554                                            new_crtc_state,
10555                                            true,
10556                                            &lock_and_validation_needed);
10557                 if (ret) {
10558                         DRM_DEBUG_DRIVER("ENABLE: dm_update_crtc_state() failed\n");
10559                         goto fail;
10560                 }
10561         }
10562
10563         /* Add new/modified planes */
10564         for_each_oldnew_plane_in_state_reverse(state, plane, old_plane_state, new_plane_state, i) {
10565                 ret = dm_update_plane_state(dc, state, plane,
10566                                             old_plane_state,
10567                                             new_plane_state,
10568                                             true,
10569                                             &lock_and_validation_needed,
10570                                             &is_top_most_overlay);
10571                 if (ret) {
10572                         DRM_DEBUG_DRIVER("dm_update_plane_state() failed\n");
10573                         goto fail;
10574                 }
10575         }
10576
10577         if (dc_resource_is_dsc_encoding_supported(dc)) {
10578                 ret = pre_validate_dsc(state, &dm_state, vars);
10579                 if (ret != 0)
10580                         goto fail;
10581         }
10582
10583         /* Run this here since we want to validate the streams we created */
10584         ret = drm_atomic_helper_check_planes(dev, state);
10585         if (ret) {
10586                 DRM_DEBUG_DRIVER("drm_atomic_helper_check_planes() failed\n");
10587                 goto fail;
10588         }
10589
10590         for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) {
10591                 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
10592                 if (dm_new_crtc_state->mpo_requested)
10593                         DRM_DEBUG_DRIVER("MPO enablement requested on crtc:[%p]\n", crtc);
10594         }
10595
10596         /* Check cursor planes scaling */
10597         for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) {
10598                 ret = dm_check_crtc_cursor(state, crtc, new_crtc_state);
10599                 if (ret) {
10600                         DRM_DEBUG_DRIVER("dm_check_crtc_cursor() failed\n");
10601                         goto fail;
10602                 }
10603         }
10604
10605         if (state->legacy_cursor_update) {
10606                 /*
10607                  * This is a fast cursor update coming from the plane update
10608                  * helper, check if it can be done asynchronously for better
10609                  * performance.
10610                  */
10611                 state->async_update =
10612                         !drm_atomic_helper_async_check(dev, state);
10613
10614                 /*
10615                  * Skip the remaining global validation if this is an async
10616                  * update. Cursor updates can be done without affecting
10617                  * state or bandwidth calcs and this avoids the performance
10618                  * penalty of locking the private state object and
10619                  * allocating a new dc_state.
10620                  */
10621                 if (state->async_update)
10622                         return 0;
10623         }
10624
10625         /* Check scaling and underscan changes*/
10626         /* TODO Removed scaling changes validation due to inability to commit
10627          * new stream into context w\o causing full reset. Need to
10628          * decide how to handle.
10629          */
10630         for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
10631                 struct dm_connector_state *dm_old_con_state = to_dm_connector_state(old_con_state);
10632                 struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state);
10633                 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(dm_new_con_state->base.crtc);
10634
10635                 /* Skip any modesets/resets */
10636                 if (!acrtc || drm_atomic_crtc_needs_modeset(
10637                                 drm_atomic_get_new_crtc_state(state, &acrtc->base)))
10638                         continue;
10639
10640                 /* Skip any thing not scale or underscan changes */
10641                 if (!is_scaling_state_different(dm_new_con_state, dm_old_con_state))
10642                         continue;
10643
10644                 lock_and_validation_needed = true;
10645         }
10646
10647         /* set the slot info for each mst_state based on the link encoding format */
10648         for_each_new_mst_mgr_in_state(state, mgr, mst_state, i) {
10649                 struct amdgpu_dm_connector *aconnector;
10650                 struct drm_connector *connector;
10651                 struct drm_connector_list_iter iter;
10652                 u8 link_coding_cap;
10653
10654                 drm_connector_list_iter_begin(dev, &iter);
10655                 drm_for_each_connector_iter(connector, &iter) {
10656                         if (connector->index == mst_state->mgr->conn_base_id) {
10657                                 aconnector = to_amdgpu_dm_connector(connector);
10658                                 link_coding_cap = dc_link_dp_mst_decide_link_encoding_format(aconnector->dc_link);
10659                                 drm_dp_mst_update_slots(mst_state, link_coding_cap);
10660
10661                                 break;
10662                         }
10663                 }
10664                 drm_connector_list_iter_end(&iter);
10665         }
10666
10667         /**
10668          * Streams and planes are reset when there are changes that affect
10669          * bandwidth. Anything that affects bandwidth needs to go through
10670          * DC global validation to ensure that the configuration can be applied
10671          * to hardware.
10672          *
10673          * We have to currently stall out here in atomic_check for outstanding
10674          * commits to finish in this case because our IRQ handlers reference
10675          * DRM state directly - we can end up disabling interrupts too early
10676          * if we don't.
10677          *
10678          * TODO: Remove this stall and drop DM state private objects.
10679          */
10680         if (lock_and_validation_needed) {
10681                 ret = dm_atomic_get_state(state, &dm_state);
10682                 if (ret) {
10683                         DRM_DEBUG_DRIVER("dm_atomic_get_state() failed\n");
10684                         goto fail;
10685                 }
10686
10687                 ret = do_aquire_global_lock(dev, state);
10688                 if (ret) {
10689                         DRM_DEBUG_DRIVER("do_aquire_global_lock() failed\n");
10690                         goto fail;
10691                 }
10692
10693                 ret = compute_mst_dsc_configs_for_state(state, dm_state->context, vars);
10694                 if (ret) {
10695                         DRM_DEBUG_DRIVER("compute_mst_dsc_configs_for_state() failed\n");
10696                         ret = -EINVAL;
10697                         goto fail;
10698                 }
10699
10700                 ret = dm_update_mst_vcpi_slots_for_dsc(state, dm_state->context, vars);
10701                 if (ret) {
10702                         DRM_DEBUG_DRIVER("dm_update_mst_vcpi_slots_for_dsc() failed\n");
10703                         goto fail;
10704                 }
10705
10706                 /*
10707                  * Perform validation of MST topology in the state:
10708                  * We need to perform MST atomic check before calling
10709                  * dc_validate_global_state(), or there is a chance
10710                  * to get stuck in an infinite loop and hang eventually.
10711                  */
10712                 ret = drm_dp_mst_atomic_check(state);
10713                 if (ret) {
10714                         DRM_DEBUG_DRIVER("drm_dp_mst_atomic_check() failed\n");
10715                         goto fail;
10716                 }
10717                 status = dc_validate_global_state(dc, dm_state->context, true);
10718                 if (status != DC_OK) {
10719                         DRM_DEBUG_DRIVER("DC global validation failure: %s (%d)",
10720                                        dc_status_to_str(status), status);
10721                         ret = -EINVAL;
10722                         goto fail;
10723                 }
10724         } else {
10725                 /*
10726                  * The commit is a fast update. Fast updates shouldn't change
10727                  * the DC context, affect global validation, and can have their
10728                  * commit work done in parallel with other commits not touching
10729                  * the same resource. If we have a new DC context as part of
10730                  * the DM atomic state from validation we need to free it and
10731                  * retain the existing one instead.
10732                  *
10733                  * Furthermore, since the DM atomic state only contains the DC
10734                  * context and can safely be annulled, we can free the state
10735                  * and clear the associated private object now to free
10736                  * some memory and avoid a possible use-after-free later.
10737                  */
10738
10739                 for (i = 0; i < state->num_private_objs; i++) {
10740                         struct drm_private_obj *obj = state->private_objs[i].ptr;
10741
10742                         if (obj->funcs == adev->dm.atomic_obj.funcs) {
10743                                 int j = state->num_private_objs-1;
10744
10745                                 dm_atomic_destroy_state(obj,
10746                                                 state->private_objs[i].state);
10747
10748                                 /* If i is not at the end of the array then the
10749                                  * last element needs to be moved to where i was
10750                                  * before the array can safely be truncated.
10751                                  */
10752                                 if (i != j)
10753                                         state->private_objs[i] =
10754                                                 state->private_objs[j];
10755
10756                                 state->private_objs[j].ptr = NULL;
10757                                 state->private_objs[j].state = NULL;
10758                                 state->private_objs[j].old_state = NULL;
10759                                 state->private_objs[j].new_state = NULL;
10760
10761                                 state->num_private_objs = j;
10762                                 break;
10763                         }
10764                 }
10765         }
10766
10767         /* Store the overall update type for use later in atomic check. */
10768         for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) {
10769                 struct dm_crtc_state *dm_new_crtc_state =
10770                         to_dm_crtc_state(new_crtc_state);
10771
10772                 /*
10773                  * Only allow async flips for fast updates that don't change
10774                  * the FB pitch, the DCC state, rotation, etc.
10775                  */
10776                 if (new_crtc_state->async_flip && lock_and_validation_needed) {
10777                         drm_dbg_atomic(crtc->dev,
10778                                        "[CRTC:%d:%s] async flips are only supported for fast updates\n",
10779                                        crtc->base.id, crtc->name);
10780                         ret = -EINVAL;
10781                         goto fail;
10782                 }
10783
10784                 dm_new_crtc_state->update_type = lock_and_validation_needed ?
10785                         UPDATE_TYPE_FULL : UPDATE_TYPE_FAST;
10786         }
10787
10788         /* Must be success */
10789         WARN_ON(ret);
10790
10791         trace_amdgpu_dm_atomic_check_finish(state, ret);
10792
10793         return ret;
10794
10795 fail:
10796         if (ret == -EDEADLK)
10797                 DRM_DEBUG_DRIVER("Atomic check stopped to avoid deadlock.\n");
10798         else if (ret == -EINTR || ret == -EAGAIN || ret == -ERESTARTSYS)
10799                 DRM_DEBUG_DRIVER("Atomic check stopped due to signal.\n");
10800         else
10801                 DRM_DEBUG_DRIVER("Atomic check failed with err: %d\n", ret);
10802
10803         trace_amdgpu_dm_atomic_check_finish(state, ret);
10804
10805         return ret;
10806 }
10807
10808 static bool is_dp_capable_without_timing_msa(struct dc *dc,
10809                                              struct amdgpu_dm_connector *amdgpu_dm_connector)
10810 {
10811         u8 dpcd_data;
10812         bool capable = false;
10813
10814         if (amdgpu_dm_connector->dc_link &&
10815                 dm_helpers_dp_read_dpcd(
10816                                 NULL,
10817                                 amdgpu_dm_connector->dc_link,
10818                                 DP_DOWN_STREAM_PORT_COUNT,
10819                                 &dpcd_data,
10820                                 sizeof(dpcd_data))) {
10821                 capable = (dpcd_data & DP_MSA_TIMING_PAR_IGNORED) ? true:false;
10822         }
10823
10824         return capable;
10825 }
10826
10827 static bool dm_edid_parser_send_cea(struct amdgpu_display_manager *dm,
10828                 unsigned int offset,
10829                 unsigned int total_length,
10830                 u8 *data,
10831                 unsigned int length,
10832                 struct amdgpu_hdmi_vsdb_info *vsdb)
10833 {
10834         bool res;
10835         union dmub_rb_cmd cmd;
10836         struct dmub_cmd_send_edid_cea *input;
10837         struct dmub_cmd_edid_cea_output *output;
10838
10839         if (length > DMUB_EDID_CEA_DATA_CHUNK_BYTES)
10840                 return false;
10841
10842         memset(&cmd, 0, sizeof(cmd));
10843
10844         input = &cmd.edid_cea.data.input;
10845
10846         cmd.edid_cea.header.type = DMUB_CMD__EDID_CEA;
10847         cmd.edid_cea.header.sub_type = 0;
10848         cmd.edid_cea.header.payload_bytes =
10849                 sizeof(cmd.edid_cea) - sizeof(cmd.edid_cea.header);
10850         input->offset = offset;
10851         input->length = length;
10852         input->cea_total_length = total_length;
10853         memcpy(input->payload, data, length);
10854
10855         res = dm_execute_dmub_cmd(dm->dc->ctx, &cmd, DM_DMUB_WAIT_TYPE_WAIT_WITH_REPLY);
10856         if (!res) {
10857                 DRM_ERROR("EDID CEA parser failed\n");
10858                 return false;
10859         }
10860
10861         output = &cmd.edid_cea.data.output;
10862
10863         if (output->type == DMUB_CMD__EDID_CEA_ACK) {
10864                 if (!output->ack.success) {
10865                         DRM_ERROR("EDID CEA ack failed at offset %d\n",
10866                                         output->ack.offset);
10867                 }
10868         } else if (output->type == DMUB_CMD__EDID_CEA_AMD_VSDB) {
10869                 if (!output->amd_vsdb.vsdb_found)
10870                         return false;
10871
10872                 vsdb->freesync_supported = output->amd_vsdb.freesync_supported;
10873                 vsdb->amd_vsdb_version = output->amd_vsdb.amd_vsdb_version;
10874                 vsdb->min_refresh_rate_hz = output->amd_vsdb.min_frame_rate;
10875                 vsdb->max_refresh_rate_hz = output->amd_vsdb.max_frame_rate;
10876         } else {
10877                 DRM_WARN("Unknown EDID CEA parser results\n");
10878                 return false;
10879         }
10880
10881         return true;
10882 }
10883
10884 static bool parse_edid_cea_dmcu(struct amdgpu_display_manager *dm,
10885                 u8 *edid_ext, int len,
10886                 struct amdgpu_hdmi_vsdb_info *vsdb_info)
10887 {
10888         int i;
10889
10890         /* send extension block to DMCU for parsing */
10891         for (i = 0; i < len; i += 8) {
10892                 bool res;
10893                 int offset;
10894
10895                 /* send 8 bytes a time */
10896                 if (!dc_edid_parser_send_cea(dm->dc, i, len, &edid_ext[i], 8))
10897                         return false;
10898
10899                 if (i+8 == len) {
10900                         /* EDID block sent completed, expect result */
10901                         int version, min_rate, max_rate;
10902
10903                         res = dc_edid_parser_recv_amd_vsdb(dm->dc, &version, &min_rate, &max_rate);
10904                         if (res) {
10905                                 /* amd vsdb found */
10906                                 vsdb_info->freesync_supported = 1;
10907                                 vsdb_info->amd_vsdb_version = version;
10908                                 vsdb_info->min_refresh_rate_hz = min_rate;
10909                                 vsdb_info->max_refresh_rate_hz = max_rate;
10910                                 return true;
10911                         }
10912                         /* not amd vsdb */
10913                         return false;
10914                 }
10915
10916                 /* check for ack*/
10917                 res = dc_edid_parser_recv_cea_ack(dm->dc, &offset);
10918                 if (!res)
10919                         return false;
10920         }
10921
10922         return false;
10923 }
10924
10925 static bool parse_edid_cea_dmub(struct amdgpu_display_manager *dm,
10926                 u8 *edid_ext, int len,
10927                 struct amdgpu_hdmi_vsdb_info *vsdb_info)
10928 {
10929         int i;
10930
10931         /* send extension block to DMCU for parsing */
10932         for (i = 0; i < len; i += 8) {
10933                 /* send 8 bytes a time */
10934                 if (!dm_edid_parser_send_cea(dm, i, len, &edid_ext[i], 8, vsdb_info))
10935                         return false;
10936         }
10937
10938         return vsdb_info->freesync_supported;
10939 }
10940
10941 static bool parse_edid_cea(struct amdgpu_dm_connector *aconnector,
10942                 u8 *edid_ext, int len,
10943                 struct amdgpu_hdmi_vsdb_info *vsdb_info)
10944 {
10945         struct amdgpu_device *adev = drm_to_adev(aconnector->base.dev);
10946         bool ret;
10947
10948         mutex_lock(&adev->dm.dc_lock);
10949         if (adev->dm.dmub_srv)
10950                 ret = parse_edid_cea_dmub(&adev->dm, edid_ext, len, vsdb_info);
10951         else
10952                 ret = parse_edid_cea_dmcu(&adev->dm, edid_ext, len, vsdb_info);
10953         mutex_unlock(&adev->dm.dc_lock);
10954         return ret;
10955 }
10956
10957 static int parse_amd_vsdb(struct amdgpu_dm_connector *aconnector,
10958                           struct edid *edid, struct amdgpu_hdmi_vsdb_info *vsdb_info)
10959 {
10960         u8 *edid_ext = NULL;
10961         int i;
10962         int j = 0;
10963
10964         if (edid == NULL || edid->extensions == 0)
10965                 return -ENODEV;
10966
10967         /* Find DisplayID extension */
10968         for (i = 0; i < edid->extensions; i++) {
10969                 edid_ext = (void *)(edid + (i + 1));
10970                 if (edid_ext[0] == DISPLAYID_EXT)
10971                         break;
10972         }
10973
10974         while (j < EDID_LENGTH) {
10975                 struct amd_vsdb_block *amd_vsdb = (struct amd_vsdb_block *)&edid_ext[j];
10976                 unsigned int ieeeId = (amd_vsdb->ieee_id[2] << 16) | (amd_vsdb->ieee_id[1] << 8) | (amd_vsdb->ieee_id[0]);
10977
10978                 if (ieeeId == HDMI_AMD_VENDOR_SPECIFIC_DATA_BLOCK_IEEE_REGISTRATION_ID &&
10979                                 amd_vsdb->version == HDMI_AMD_VENDOR_SPECIFIC_DATA_BLOCK_VERSION_3) {
10980                         vsdb_info->replay_mode = (amd_vsdb->feature_caps & AMD_VSDB_VERSION_3_FEATURECAP_REPLAYMODE) ? true : false;
10981                         vsdb_info->amd_vsdb_version = HDMI_AMD_VENDOR_SPECIFIC_DATA_BLOCK_VERSION_3;
10982                         DRM_DEBUG_KMS("Panel supports Replay Mode: %d\n", vsdb_info->replay_mode);
10983
10984                         return true;
10985                 }
10986                 j++;
10987         }
10988
10989         return false;
10990 }
10991
10992 static int parse_hdmi_amd_vsdb(struct amdgpu_dm_connector *aconnector,
10993                 struct edid *edid, struct amdgpu_hdmi_vsdb_info *vsdb_info)
10994 {
10995         u8 *edid_ext = NULL;
10996         int i;
10997         bool valid_vsdb_found = false;
10998
10999         /*----- drm_find_cea_extension() -----*/
11000         /* No EDID or EDID extensions */
11001         if (edid == NULL || edid->extensions == 0)
11002                 return -ENODEV;
11003
11004         /* Find CEA extension */
11005         for (i = 0; i < edid->extensions; i++) {
11006                 edid_ext = (uint8_t *)edid + EDID_LENGTH * (i + 1);
11007                 if (edid_ext[0] == CEA_EXT)
11008                         break;
11009         }
11010
11011         if (i == edid->extensions)
11012                 return -ENODEV;
11013
11014         /*----- cea_db_offsets() -----*/
11015         if (edid_ext[0] != CEA_EXT)
11016                 return -ENODEV;
11017
11018         valid_vsdb_found = parse_edid_cea(aconnector, edid_ext, EDID_LENGTH, vsdb_info);
11019
11020         return valid_vsdb_found ? i : -ENODEV;
11021 }
11022
11023 /**
11024  * amdgpu_dm_update_freesync_caps - Update Freesync capabilities
11025  *
11026  * @connector: Connector to query.
11027  * @edid: EDID from monitor
11028  *
11029  * Amdgpu supports Freesync in DP and HDMI displays, and it is required to keep
11030  * track of some of the display information in the internal data struct used by
11031  * amdgpu_dm. This function checks which type of connector we need to set the
11032  * FreeSync parameters.
11033  */
11034 void amdgpu_dm_update_freesync_caps(struct drm_connector *connector,
11035                                     struct edid *edid)
11036 {
11037         int i = 0;
11038         struct detailed_timing *timing;
11039         struct detailed_non_pixel *data;
11040         struct detailed_data_monitor_range *range;
11041         struct amdgpu_dm_connector *amdgpu_dm_connector =
11042                         to_amdgpu_dm_connector(connector);
11043         struct dm_connector_state *dm_con_state = NULL;
11044         struct dc_sink *sink;
11045
11046         struct amdgpu_device *adev = drm_to_adev(connector->dev);
11047         struct amdgpu_hdmi_vsdb_info vsdb_info = {0};
11048         bool freesync_capable = false;
11049         enum adaptive_sync_type as_type = ADAPTIVE_SYNC_TYPE_NONE;
11050
11051         if (!connector->state) {
11052                 DRM_ERROR("%s - Connector has no state", __func__);
11053                 goto update;
11054         }
11055
11056         sink = amdgpu_dm_connector->dc_sink ?
11057                 amdgpu_dm_connector->dc_sink :
11058                 amdgpu_dm_connector->dc_em_sink;
11059
11060         if (!edid || !sink) {
11061                 dm_con_state = to_dm_connector_state(connector->state);
11062
11063                 amdgpu_dm_connector->min_vfreq = 0;
11064                 amdgpu_dm_connector->max_vfreq = 0;
11065                 amdgpu_dm_connector->pixel_clock_mhz = 0;
11066                 connector->display_info.monitor_range.min_vfreq = 0;
11067                 connector->display_info.monitor_range.max_vfreq = 0;
11068                 freesync_capable = false;
11069
11070                 goto update;
11071         }
11072
11073         dm_con_state = to_dm_connector_state(connector->state);
11074
11075         if (!adev->dm.freesync_module)
11076                 goto update;
11077
11078         if (sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT
11079                 || sink->sink_signal == SIGNAL_TYPE_EDP) {
11080                 bool edid_check_required = false;
11081
11082                 if (edid) {
11083                         edid_check_required = is_dp_capable_without_timing_msa(
11084                                                 adev->dm.dc,
11085                                                 amdgpu_dm_connector);
11086                 }
11087
11088                 if (edid_check_required == true && (edid->version > 1 ||
11089                    (edid->version == 1 && edid->revision > 1))) {
11090                         for (i = 0; i < 4; i++) {
11091
11092                                 timing  = &edid->detailed_timings[i];
11093                                 data    = &timing->data.other_data;
11094                                 range   = &data->data.range;
11095                                 /*
11096                                  * Check if monitor has continuous frequency mode
11097                                  */
11098                                 if (data->type != EDID_DETAIL_MONITOR_RANGE)
11099                                         continue;
11100                                 /*
11101                                  * Check for flag range limits only. If flag == 1 then
11102                                  * no additional timing information provided.
11103                                  * Default GTF, GTF Secondary curve and CVT are not
11104                                  * supported
11105                                  */
11106                                 if (range->flags != 1)
11107                                         continue;
11108
11109                                 amdgpu_dm_connector->min_vfreq = range->min_vfreq;
11110                                 amdgpu_dm_connector->max_vfreq = range->max_vfreq;
11111                                 amdgpu_dm_connector->pixel_clock_mhz =
11112                                         range->pixel_clock_mhz * 10;
11113
11114                                 connector->display_info.monitor_range.min_vfreq = range->min_vfreq;
11115                                 connector->display_info.monitor_range.max_vfreq = range->max_vfreq;
11116
11117                                 break;
11118                         }
11119
11120                         if (amdgpu_dm_connector->max_vfreq -
11121                             amdgpu_dm_connector->min_vfreq > 10) {
11122
11123                                 freesync_capable = true;
11124                         }
11125                 }
11126                 parse_amd_vsdb(amdgpu_dm_connector, edid, &vsdb_info);
11127
11128                 if (vsdb_info.replay_mode) {
11129                         amdgpu_dm_connector->vsdb_info.replay_mode = vsdb_info.replay_mode;
11130                         amdgpu_dm_connector->vsdb_info.amd_vsdb_version = vsdb_info.amd_vsdb_version;
11131                         amdgpu_dm_connector->as_type = ADAPTIVE_SYNC_TYPE_EDP;
11132                 }
11133
11134         } else if (edid && sink->sink_signal == SIGNAL_TYPE_HDMI_TYPE_A) {
11135                 i = parse_hdmi_amd_vsdb(amdgpu_dm_connector, edid, &vsdb_info);
11136                 if (i >= 0 && vsdb_info.freesync_supported) {
11137                         timing  = &edid->detailed_timings[i];
11138                         data    = &timing->data.other_data;
11139
11140                         amdgpu_dm_connector->min_vfreq = vsdb_info.min_refresh_rate_hz;
11141                         amdgpu_dm_connector->max_vfreq = vsdb_info.max_refresh_rate_hz;
11142                         if (amdgpu_dm_connector->max_vfreq - amdgpu_dm_connector->min_vfreq > 10)
11143                                 freesync_capable = true;
11144
11145                         connector->display_info.monitor_range.min_vfreq = vsdb_info.min_refresh_rate_hz;
11146                         connector->display_info.monitor_range.max_vfreq = vsdb_info.max_refresh_rate_hz;
11147                 }
11148         }
11149
11150         as_type = dm_get_adaptive_sync_support_type(amdgpu_dm_connector->dc_link);
11151
11152         if (as_type == FREESYNC_TYPE_PCON_IN_WHITELIST) {
11153                 i = parse_hdmi_amd_vsdb(amdgpu_dm_connector, edid, &vsdb_info);
11154                 if (i >= 0 && vsdb_info.freesync_supported && vsdb_info.amd_vsdb_version > 0) {
11155
11156                         amdgpu_dm_connector->pack_sdp_v1_3 = true;
11157                         amdgpu_dm_connector->as_type = as_type;
11158                         amdgpu_dm_connector->vsdb_info = vsdb_info;
11159
11160                         amdgpu_dm_connector->min_vfreq = vsdb_info.min_refresh_rate_hz;
11161                         amdgpu_dm_connector->max_vfreq = vsdb_info.max_refresh_rate_hz;
11162                         if (amdgpu_dm_connector->max_vfreq - amdgpu_dm_connector->min_vfreq > 10)
11163                                 freesync_capable = true;
11164
11165                         connector->display_info.monitor_range.min_vfreq = vsdb_info.min_refresh_rate_hz;
11166                         connector->display_info.monitor_range.max_vfreq = vsdb_info.max_refresh_rate_hz;
11167                 }
11168         }
11169
11170 update:
11171         if (dm_con_state)
11172                 dm_con_state->freesync_capable = freesync_capable;
11173
11174         if (connector->vrr_capable_property)
11175                 drm_connector_set_vrr_capable_property(connector,
11176                                                        freesync_capable);
11177 }
11178
11179 void amdgpu_dm_trigger_timing_sync(struct drm_device *dev)
11180 {
11181         struct amdgpu_device *adev = drm_to_adev(dev);
11182         struct dc *dc = adev->dm.dc;
11183         int i;
11184
11185         mutex_lock(&adev->dm.dc_lock);
11186         if (dc->current_state) {
11187                 for (i = 0; i < dc->current_state->stream_count; ++i)
11188                         dc->current_state->streams[i]
11189                                 ->triggered_crtc_reset.enabled =
11190                                 adev->dm.force_timing_sync;
11191
11192                 dm_enable_per_frame_crtc_master_sync(dc->current_state);
11193                 dc_trigger_sync(dc, dc->current_state);
11194         }
11195         mutex_unlock(&adev->dm.dc_lock);
11196 }
11197
11198 void dm_write_reg_func(const struct dc_context *ctx, uint32_t address,
11199                        u32 value, const char *func_name)
11200 {
11201 #ifdef DM_CHECK_ADDR_0
11202         if (address == 0) {
11203                 drm_err(adev_to_drm(ctx->driver_context),
11204                         "invalid register write. address = 0");
11205                 return;
11206         }
11207 #endif
11208         cgs_write_register(ctx->cgs_device, address, value);
11209         trace_amdgpu_dc_wreg(&ctx->perf_trace->write_count, address, value);
11210 }
11211
11212 uint32_t dm_read_reg_func(const struct dc_context *ctx, uint32_t address,
11213                           const char *func_name)
11214 {
11215         u32 value;
11216 #ifdef DM_CHECK_ADDR_0
11217         if (address == 0) {
11218                 drm_err(adev_to_drm(ctx->driver_context),
11219                         "invalid register read; address = 0\n");
11220                 return 0;
11221         }
11222 #endif
11223
11224         if (ctx->dmub_srv &&
11225             ctx->dmub_srv->reg_helper_offload.gather_in_progress &&
11226             !ctx->dmub_srv->reg_helper_offload.should_burst_write) {
11227                 ASSERT(false);
11228                 return 0;
11229         }
11230
11231         value = cgs_read_register(ctx->cgs_device, address);
11232
11233         trace_amdgpu_dc_rreg(&ctx->perf_trace->read_count, address, value);
11234
11235         return value;
11236 }
11237
11238 int amdgpu_dm_process_dmub_aux_transfer_sync(
11239                 struct dc_context *ctx,
11240                 unsigned int link_index,
11241                 struct aux_payload *payload,
11242                 enum aux_return_code_type *operation_result)
11243 {
11244         struct amdgpu_device *adev = ctx->driver_context;
11245         struct dmub_notification *p_notify = adev->dm.dmub_notify;
11246         int ret = -1;
11247
11248         mutex_lock(&adev->dm.dpia_aux_lock);
11249         if (!dc_process_dmub_aux_transfer_async(ctx->dc, link_index, payload)) {
11250                 *operation_result = AUX_RET_ERROR_ENGINE_ACQUIRE;
11251                 goto out;
11252         }
11253
11254         if (!wait_for_completion_timeout(&adev->dm.dmub_aux_transfer_done, 10 * HZ)) {
11255                 DRM_ERROR("wait_for_completion_timeout timeout!");
11256                 *operation_result = AUX_RET_ERROR_TIMEOUT;
11257                 goto out;
11258         }
11259
11260         if (p_notify->result != AUX_RET_SUCCESS) {
11261                 /*
11262                  * Transient states before tunneling is enabled could
11263                  * lead to this error. We can ignore this for now.
11264                  */
11265                 if (p_notify->result != AUX_RET_ERROR_PROTOCOL_ERROR) {
11266                         DRM_WARN("DPIA AUX failed on 0x%x(%d), error %d\n",
11267                                         payload->address, payload->length,
11268                                         p_notify->result);
11269                 }
11270                 *operation_result = AUX_RET_ERROR_INVALID_REPLY;
11271                 goto out;
11272         }
11273
11274
11275         payload->reply[0] = adev->dm.dmub_notify->aux_reply.command;
11276         if (!payload->write && p_notify->aux_reply.length &&
11277                         (payload->reply[0] == AUX_TRANSACTION_REPLY_AUX_ACK)) {
11278
11279                 if (payload->length != p_notify->aux_reply.length) {
11280                         DRM_WARN("invalid read length %d from DPIA AUX 0x%x(%d)!\n",
11281                                 p_notify->aux_reply.length,
11282                                         payload->address, payload->length);
11283                         *operation_result = AUX_RET_ERROR_INVALID_REPLY;
11284                         goto out;
11285                 }
11286
11287                 memcpy(payload->data, p_notify->aux_reply.data,
11288                                 p_notify->aux_reply.length);
11289         }
11290
11291         /* success */
11292         ret = p_notify->aux_reply.length;
11293         *operation_result = p_notify->result;
11294 out:
11295         reinit_completion(&adev->dm.dmub_aux_transfer_done);
11296         mutex_unlock(&adev->dm.dpia_aux_lock);
11297         return ret;
11298 }
11299
11300 int amdgpu_dm_process_dmub_set_config_sync(
11301                 struct dc_context *ctx,
11302                 unsigned int link_index,
11303                 struct set_config_cmd_payload *payload,
11304                 enum set_config_status *operation_result)
11305 {
11306         struct amdgpu_device *adev = ctx->driver_context;
11307         bool is_cmd_complete;
11308         int ret;
11309
11310         mutex_lock(&adev->dm.dpia_aux_lock);
11311         is_cmd_complete = dc_process_dmub_set_config_async(ctx->dc,
11312                         link_index, payload, adev->dm.dmub_notify);
11313
11314         if (is_cmd_complete || wait_for_completion_timeout(&adev->dm.dmub_aux_transfer_done, 10 * HZ)) {
11315                 ret = 0;
11316                 *operation_result = adev->dm.dmub_notify->sc_status;
11317         } else {
11318                 DRM_ERROR("wait_for_completion_timeout timeout!");
11319                 ret = -1;
11320                 *operation_result = SET_CONFIG_UNKNOWN_ERROR;
11321         }
11322
11323         if (!is_cmd_complete)
11324                 reinit_completion(&adev->dm.dmub_aux_transfer_done);
11325         mutex_unlock(&adev->dm.dpia_aux_lock);
11326         return ret;
11327 }
11328
11329 bool dm_execute_dmub_cmd(const struct dc_context *ctx, union dmub_rb_cmd *cmd, enum dm_dmub_wait_type wait_type)
11330 {
11331         return dc_dmub_srv_cmd_run(ctx->dmub_srv, cmd, wait_type);
11332 }
11333
11334 bool dm_execute_dmub_cmd_list(const struct dc_context *ctx, unsigned int count, union dmub_rb_cmd *cmd, enum dm_dmub_wait_type wait_type)
11335 {
11336         return dc_dmub_srv_cmd_run_list(ctx->dmub_srv, count, cmd, wait_type);
11337 }
This page took 0.787022 seconds and 4 git commands to generate.