]> Git Repo - linux.git/blob - drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
Merge tag 'drm-misc-next-2020-09-21' of git://anongit.freedesktop.org/drm/drm-misc...
[linux.git] / drivers / gpu / drm / amd / display / amdgpu_dm / amdgpu_dm.c
1 /*
2  * Copyright 2015 Advanced Micro Devices, Inc.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20  * OTHER DEALINGS IN THE SOFTWARE.
21  *
22  * Authors: AMD
23  *
24  */
25
26 /* The caprices of the preprocessor require that this be declared right here */
27 #define CREATE_TRACE_POINTS
28
29 #include "dm_services_types.h"
30 #include "dc.h"
31 #include "dc/inc/core_types.h"
32 #include "dal_asic_id.h"
33 #include "dmub/dmub_srv.h"
34 #include "dc/inc/hw/dmcu.h"
35 #include "dc/inc/hw/abm.h"
36 #include "dc/dc_dmub_srv.h"
37
38 #include "vid.h"
39 #include "amdgpu.h"
40 #include "amdgpu_display.h"
41 #include "amdgpu_ucode.h"
42 #include "atom.h"
43 #include "amdgpu_dm.h"
44 #ifdef CONFIG_DRM_AMD_DC_HDCP
45 #include "amdgpu_dm_hdcp.h"
46 #include <drm/drm_hdcp.h>
47 #endif
48 #include "amdgpu_pm.h"
49
50 #include "amd_shared.h"
51 #include "amdgpu_dm_irq.h"
52 #include "dm_helpers.h"
53 #include "amdgpu_dm_mst_types.h"
54 #if defined(CONFIG_DEBUG_FS)
55 #include "amdgpu_dm_debugfs.h"
56 #endif
57
58 #include "ivsrcid/ivsrcid_vislands30.h"
59
60 #include <linux/module.h>
61 #include <linux/moduleparam.h>
62 #include <linux/version.h>
63 #include <linux/types.h>
64 #include <linux/pm_runtime.h>
65 #include <linux/pci.h>
66 #include <linux/firmware.h>
67 #include <linux/component.h>
68
69 #include <drm/drm_atomic.h>
70 #include <drm/drm_atomic_uapi.h>
71 #include <drm/drm_atomic_helper.h>
72 #include <drm/drm_dp_mst_helper.h>
73 #include <drm/drm_fb_helper.h>
74 #include <drm/drm_fourcc.h>
75 #include <drm/drm_edid.h>
76 #include <drm/drm_vblank.h>
77 #include <drm/drm_audio_component.h>
78 #include <drm/drm_hdcp.h>
79
80 #if defined(CONFIG_DRM_AMD_DC_DCN)
81 #include "ivsrcid/dcn/irqsrcs_dcn_1_0.h"
82
83 #include "dcn/dcn_1_0_offset.h"
84 #include "dcn/dcn_1_0_sh_mask.h"
85 #include "soc15_hw_ip.h"
86 #include "vega10_ip_offset.h"
87
88 #include "soc15_common.h"
89 #endif
90
91 #include "modules/inc/mod_freesync.h"
92 #include "modules/power/power_helpers.h"
93 #include "modules/inc/mod_info_packet.h"
94
95 #define FIRMWARE_RENOIR_DMUB "amdgpu/renoir_dmcub.bin"
96 MODULE_FIRMWARE(FIRMWARE_RENOIR_DMUB);
97 #if defined(CONFIG_DRM_AMD_DC_DCN3_0)
98 #define FIRMWARE_SIENNA_CICHLID_DMUB "amdgpu/sienna_cichlid_dmcub.bin"
99 MODULE_FIRMWARE(FIRMWARE_SIENNA_CICHLID_DMUB);
100 #define FIRMWARE_NAVY_FLOUNDER_DMUB "amdgpu/navy_flounder_dmcub.bin"
101 MODULE_FIRMWARE(FIRMWARE_NAVY_FLOUNDER_DMUB);
102 #endif
103
104 #define FIRMWARE_RAVEN_DMCU             "amdgpu/raven_dmcu.bin"
105 MODULE_FIRMWARE(FIRMWARE_RAVEN_DMCU);
106
107 #define FIRMWARE_NAVI12_DMCU            "amdgpu/navi12_dmcu.bin"
108 MODULE_FIRMWARE(FIRMWARE_NAVI12_DMCU);
109
110 /* Number of bytes in PSP header for firmware. */
111 #define PSP_HEADER_BYTES 0x100
112
113 /* Number of bytes in PSP footer for firmware. */
114 #define PSP_FOOTER_BYTES 0x100
115
116 /**
117  * DOC: overview
118  *
119  * The AMDgpu display manager, **amdgpu_dm** (or even simpler,
120  * **dm**) sits between DRM and DC. It acts as a liason, converting DRM
121  * requests into DC requests, and DC responses into DRM responses.
122  *
123  * The root control structure is &struct amdgpu_display_manager.
124  */
125
126 /* basic init/fini API */
127 static int amdgpu_dm_init(struct amdgpu_device *adev);
128 static void amdgpu_dm_fini(struct amdgpu_device *adev);
129
130 static enum drm_mode_subconnector get_subconnector_type(struct dc_link *link)
131 {
132         switch (link->dpcd_caps.dongle_type) {
133         case DISPLAY_DONGLE_NONE:
134                 return DRM_MODE_SUBCONNECTOR_Native;
135         case DISPLAY_DONGLE_DP_VGA_CONVERTER:
136                 return DRM_MODE_SUBCONNECTOR_VGA;
137         case DISPLAY_DONGLE_DP_DVI_CONVERTER:
138         case DISPLAY_DONGLE_DP_DVI_DONGLE:
139                 return DRM_MODE_SUBCONNECTOR_DVID;
140         case DISPLAY_DONGLE_DP_HDMI_CONVERTER:
141         case DISPLAY_DONGLE_DP_HDMI_DONGLE:
142                 return DRM_MODE_SUBCONNECTOR_HDMIA;
143         case DISPLAY_DONGLE_DP_HDMI_MISMATCHED_DONGLE:
144         default:
145                 return DRM_MODE_SUBCONNECTOR_Unknown;
146         }
147 }
148
149 static void update_subconnector_property(struct amdgpu_dm_connector *aconnector)
150 {
151         struct dc_link *link = aconnector->dc_link;
152         struct drm_connector *connector = &aconnector->base;
153         enum drm_mode_subconnector subconnector = DRM_MODE_SUBCONNECTOR_Unknown;
154
155         if (connector->connector_type != DRM_MODE_CONNECTOR_DisplayPort)
156                 return;
157
158         if (aconnector->dc_sink)
159                 subconnector = get_subconnector_type(link);
160
161         drm_object_property_set_value(&connector->base,
162                         connector->dev->mode_config.dp_subconnector_property,
163                         subconnector);
164 }
165
166 /*
167  * initializes drm_device display related structures, based on the information
168  * provided by DAL. The drm strcutures are: drm_crtc, drm_connector,
169  * drm_encoder, drm_mode_config
170  *
171  * Returns 0 on success
172  */
173 static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev);
174 /* removes and deallocates the drm structures, created by the above function */
175 static void amdgpu_dm_destroy_drm_device(struct amdgpu_display_manager *dm);
176
177 static int amdgpu_dm_plane_init(struct amdgpu_display_manager *dm,
178                                 struct drm_plane *plane,
179                                 unsigned long possible_crtcs,
180                                 const struct dc_plane_cap *plane_cap);
181 static int amdgpu_dm_crtc_init(struct amdgpu_display_manager *dm,
182                                struct drm_plane *plane,
183                                uint32_t link_index);
184 static int amdgpu_dm_connector_init(struct amdgpu_display_manager *dm,
185                                     struct amdgpu_dm_connector *amdgpu_dm_connector,
186                                     uint32_t link_index,
187                                     struct amdgpu_encoder *amdgpu_encoder);
188 static int amdgpu_dm_encoder_init(struct drm_device *dev,
189                                   struct amdgpu_encoder *aencoder,
190                                   uint32_t link_index);
191
192 static int amdgpu_dm_connector_get_modes(struct drm_connector *connector);
193
194 static int amdgpu_dm_atomic_commit(struct drm_device *dev,
195                                    struct drm_atomic_state *state,
196                                    bool nonblock);
197
198 static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state);
199
200 static int amdgpu_dm_atomic_check(struct drm_device *dev,
201                                   struct drm_atomic_state *state);
202
203 static void handle_cursor_update(struct drm_plane *plane,
204                                  struct drm_plane_state *old_plane_state);
205
206 static void amdgpu_dm_set_psr_caps(struct dc_link *link);
207 static bool amdgpu_dm_psr_enable(struct dc_stream_state *stream);
208 static bool amdgpu_dm_link_setup_psr(struct dc_stream_state *stream);
209 static bool amdgpu_dm_psr_disable(struct dc_stream_state *stream);
210 static bool amdgpu_dm_psr_disable_all(struct amdgpu_display_manager *dm);
211
212 /*
213  * dm_vblank_get_counter
214  *
215  * @brief
216  * Get counter for number of vertical blanks
217  *
218  * @param
219  * struct amdgpu_device *adev - [in] desired amdgpu device
220  * int disp_idx - [in] which CRTC to get the counter from
221  *
222  * @return
223  * Counter for vertical blanks
224  */
225 static u32 dm_vblank_get_counter(struct amdgpu_device *adev, int crtc)
226 {
227         if (crtc >= adev->mode_info.num_crtc)
228                 return 0;
229         else {
230                 struct amdgpu_crtc *acrtc = adev->mode_info.crtcs[crtc];
231
232                 if (acrtc->dm_irq_params.stream == NULL) {
233                         DRM_ERROR("dc_stream_state is NULL for crtc '%d'!\n",
234                                   crtc);
235                         return 0;
236                 }
237
238                 return dc_stream_get_vblank_counter(acrtc->dm_irq_params.stream);
239         }
240 }
241
242 static int dm_crtc_get_scanoutpos(struct amdgpu_device *adev, int crtc,
243                                   u32 *vbl, u32 *position)
244 {
245         uint32_t v_blank_start, v_blank_end, h_position, v_position;
246
247         if ((crtc < 0) || (crtc >= adev->mode_info.num_crtc))
248                 return -EINVAL;
249         else {
250                 struct amdgpu_crtc *acrtc = adev->mode_info.crtcs[crtc];
251
252                 if (acrtc->dm_irq_params.stream ==  NULL) {
253                         DRM_ERROR("dc_stream_state is NULL for crtc '%d'!\n",
254                                   crtc);
255                         return 0;
256                 }
257
258                 /*
259                  * TODO rework base driver to use values directly.
260                  * for now parse it back into reg-format
261                  */
262                 dc_stream_get_scanoutpos(acrtc->dm_irq_params.stream,
263                                          &v_blank_start,
264                                          &v_blank_end,
265                                          &h_position,
266                                          &v_position);
267
268                 *position = v_position | (h_position << 16);
269                 *vbl = v_blank_start | (v_blank_end << 16);
270         }
271
272         return 0;
273 }
274
275 static bool dm_is_idle(void *handle)
276 {
277         /* XXX todo */
278         return true;
279 }
280
281 static int dm_wait_for_idle(void *handle)
282 {
283         /* XXX todo */
284         return 0;
285 }
286
287 static bool dm_check_soft_reset(void *handle)
288 {
289         return false;
290 }
291
292 static int dm_soft_reset(void *handle)
293 {
294         /* XXX todo */
295         return 0;
296 }
297
298 static struct amdgpu_crtc *
299 get_crtc_by_otg_inst(struct amdgpu_device *adev,
300                      int otg_inst)
301 {
302         struct drm_device *dev = adev_to_drm(adev);
303         struct drm_crtc *crtc;
304         struct amdgpu_crtc *amdgpu_crtc;
305
306         if (otg_inst == -1) {
307                 WARN_ON(1);
308                 return adev->mode_info.crtcs[0];
309         }
310
311         list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
312                 amdgpu_crtc = to_amdgpu_crtc(crtc);
313
314                 if (amdgpu_crtc->otg_inst == otg_inst)
315                         return amdgpu_crtc;
316         }
317
318         return NULL;
319 }
320
321 static inline bool amdgpu_dm_vrr_active_irq(struct amdgpu_crtc *acrtc)
322 {
323         return acrtc->dm_irq_params.freesync_config.state ==
324                        VRR_STATE_ACTIVE_VARIABLE ||
325                acrtc->dm_irq_params.freesync_config.state ==
326                        VRR_STATE_ACTIVE_FIXED;
327 }
328
329 static inline bool amdgpu_dm_vrr_active(struct dm_crtc_state *dm_state)
330 {
331         return dm_state->freesync_config.state == VRR_STATE_ACTIVE_VARIABLE ||
332                dm_state->freesync_config.state == VRR_STATE_ACTIVE_FIXED;
333 }
334
335 /**
336  * dm_pflip_high_irq() - Handle pageflip interrupt
337  * @interrupt_params: ignored
338  *
339  * Handles the pageflip interrupt by notifying all interested parties
340  * that the pageflip has been completed.
341  */
342 static void dm_pflip_high_irq(void *interrupt_params)
343 {
344         struct amdgpu_crtc *amdgpu_crtc;
345         struct common_irq_params *irq_params = interrupt_params;
346         struct amdgpu_device *adev = irq_params->adev;
347         unsigned long flags;
348         struct drm_pending_vblank_event *e;
349         uint32_t vpos, hpos, v_blank_start, v_blank_end;
350         bool vrr_active;
351
352         amdgpu_crtc = get_crtc_by_otg_inst(adev, irq_params->irq_src - IRQ_TYPE_PFLIP);
353
354         /* IRQ could occur when in initial stage */
355         /* TODO work and BO cleanup */
356         if (amdgpu_crtc == NULL) {
357                 DRM_DEBUG_DRIVER("CRTC is null, returning.\n");
358                 return;
359         }
360
361         spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
362
363         if (amdgpu_crtc->pflip_status != AMDGPU_FLIP_SUBMITTED){
364                 DRM_DEBUG_DRIVER("amdgpu_crtc->pflip_status = %d !=AMDGPU_FLIP_SUBMITTED(%d) on crtc:%d[%p] \n",
365                                                  amdgpu_crtc->pflip_status,
366                                                  AMDGPU_FLIP_SUBMITTED,
367                                                  amdgpu_crtc->crtc_id,
368                                                  amdgpu_crtc);
369                 spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
370                 return;
371         }
372
373         /* page flip completed. */
374         e = amdgpu_crtc->event;
375         amdgpu_crtc->event = NULL;
376
377         if (!e)
378                 WARN_ON(1);
379
380         vrr_active = amdgpu_dm_vrr_active_irq(amdgpu_crtc);
381
382         /* Fixed refresh rate, or VRR scanout position outside front-porch? */
383         if (!vrr_active ||
384             !dc_stream_get_scanoutpos(amdgpu_crtc->dm_irq_params.stream, &v_blank_start,
385                                       &v_blank_end, &hpos, &vpos) ||
386             (vpos < v_blank_start)) {
387                 /* Update to correct count and vblank timestamp if racing with
388                  * vblank irq. This also updates to the correct vblank timestamp
389                  * even in VRR mode, as scanout is past the front-porch atm.
390                  */
391                 drm_crtc_accurate_vblank_count(&amdgpu_crtc->base);
392
393                 /* Wake up userspace by sending the pageflip event with proper
394                  * count and timestamp of vblank of flip completion.
395                  */
396                 if (e) {
397                         drm_crtc_send_vblank_event(&amdgpu_crtc->base, e);
398
399                         /* Event sent, so done with vblank for this flip */
400                         drm_crtc_vblank_put(&amdgpu_crtc->base);
401                 }
402         } else if (e) {
403                 /* VRR active and inside front-porch: vblank count and
404                  * timestamp for pageflip event will only be up to date after
405                  * drm_crtc_handle_vblank() has been executed from late vblank
406                  * irq handler after start of back-porch (vline 0). We queue the
407                  * pageflip event for send-out by drm_crtc_handle_vblank() with
408                  * updated timestamp and count, once it runs after us.
409                  *
410                  * We need to open-code this instead of using the helper
411                  * drm_crtc_arm_vblank_event(), as that helper would
412                  * call drm_crtc_accurate_vblank_count(), which we must
413                  * not call in VRR mode while we are in front-porch!
414                  */
415
416                 /* sequence will be replaced by real count during send-out. */
417                 e->sequence = drm_crtc_vblank_count(&amdgpu_crtc->base);
418                 e->pipe = amdgpu_crtc->crtc_id;
419
420                 list_add_tail(&e->base.link, &adev_to_drm(adev)->vblank_event_list);
421                 e = NULL;
422         }
423
424         /* Keep track of vblank of this flip for flip throttling. We use the
425          * cooked hw counter, as that one incremented at start of this vblank
426          * of pageflip completion, so last_flip_vblank is the forbidden count
427          * for queueing new pageflips if vsync + VRR is enabled.
428          */
429         amdgpu_crtc->dm_irq_params.last_flip_vblank =
430                 amdgpu_get_vblank_counter_kms(&amdgpu_crtc->base);
431
432         amdgpu_crtc->pflip_status = AMDGPU_FLIP_NONE;
433         spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
434
435         DRM_DEBUG_DRIVER("crtc:%d[%p], pflip_stat:AMDGPU_FLIP_NONE, vrr[%d]-fp %d\n",
436                          amdgpu_crtc->crtc_id, amdgpu_crtc,
437                          vrr_active, (int) !e);
438 }
439
440 static void dm_vupdate_high_irq(void *interrupt_params)
441 {
442         struct common_irq_params *irq_params = interrupt_params;
443         struct amdgpu_device *adev = irq_params->adev;
444         struct amdgpu_crtc *acrtc;
445         unsigned long flags;
446         int vrr_active;
447
448         acrtc = get_crtc_by_otg_inst(adev, irq_params->irq_src - IRQ_TYPE_VUPDATE);
449
450         if (acrtc) {
451                 vrr_active = amdgpu_dm_vrr_active_irq(acrtc);
452
453                 DRM_DEBUG_VBL("crtc:%d, vupdate-vrr:%d\n",
454                               acrtc->crtc_id,
455                               vrr_active);
456
457                 /* Core vblank handling is done here after end of front-porch in
458                  * vrr mode, as vblank timestamping will give valid results
459                  * while now done after front-porch. This will also deliver
460                  * page-flip completion events that have been queued to us
461                  * if a pageflip happened inside front-porch.
462                  */
463                 if (vrr_active) {
464                         drm_crtc_handle_vblank(&acrtc->base);
465
466                         /* BTR processing for pre-DCE12 ASICs */
467                         if (acrtc->dm_irq_params.stream &&
468                             adev->family < AMDGPU_FAMILY_AI) {
469                                 spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
470                                 mod_freesync_handle_v_update(
471                                     adev->dm.freesync_module,
472                                     acrtc->dm_irq_params.stream,
473                                     &acrtc->dm_irq_params.vrr_params);
474
475                                 dc_stream_adjust_vmin_vmax(
476                                     adev->dm.dc,
477                                     acrtc->dm_irq_params.stream,
478                                     &acrtc->dm_irq_params.vrr_params.adjust);
479                                 spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
480                         }
481                 }
482         }
483 }
484
485 /**
486  * dm_crtc_high_irq() - Handles CRTC interrupt
487  * @interrupt_params: used for determining the CRTC instance
488  *
489  * Handles the CRTC/VSYNC interrupt by notfying DRM's VBLANK
490  * event handler.
491  */
492 static void dm_crtc_high_irq(void *interrupt_params)
493 {
494         struct common_irq_params *irq_params = interrupt_params;
495         struct amdgpu_device *adev = irq_params->adev;
496         struct amdgpu_crtc *acrtc;
497         unsigned long flags;
498         int vrr_active;
499
500         acrtc = get_crtc_by_otg_inst(adev, irq_params->irq_src - IRQ_TYPE_VBLANK);
501         if (!acrtc)
502                 return;
503
504         vrr_active = amdgpu_dm_vrr_active_irq(acrtc);
505
506         DRM_DEBUG_VBL("crtc:%d, vupdate-vrr:%d, planes:%d\n", acrtc->crtc_id,
507                       vrr_active, acrtc->dm_irq_params.active_planes);
508
509         /**
510          * Core vblank handling at start of front-porch is only possible
511          * in non-vrr mode, as only there vblank timestamping will give
512          * valid results while done in front-porch. Otherwise defer it
513          * to dm_vupdate_high_irq after end of front-porch.
514          */
515         if (!vrr_active)
516                 drm_crtc_handle_vblank(&acrtc->base);
517
518         /**
519          * Following stuff must happen at start of vblank, for crc
520          * computation and below-the-range btr support in vrr mode.
521          */
522         amdgpu_dm_crtc_handle_crc_irq(&acrtc->base);
523
524         /* BTR updates need to happen before VUPDATE on Vega and above. */
525         if (adev->family < AMDGPU_FAMILY_AI)
526                 return;
527
528         spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
529
530         if (acrtc->dm_irq_params.stream &&
531             acrtc->dm_irq_params.vrr_params.supported &&
532             acrtc->dm_irq_params.freesync_config.state ==
533                     VRR_STATE_ACTIVE_VARIABLE) {
534                 mod_freesync_handle_v_update(adev->dm.freesync_module,
535                                              acrtc->dm_irq_params.stream,
536                                              &acrtc->dm_irq_params.vrr_params);
537
538                 dc_stream_adjust_vmin_vmax(adev->dm.dc, acrtc->dm_irq_params.stream,
539                                            &acrtc->dm_irq_params.vrr_params.adjust);
540         }
541
542         /*
543          * If there aren't any active_planes then DCH HUBP may be clock-gated.
544          * In that case, pageflip completion interrupts won't fire and pageflip
545          * completion events won't get delivered. Prevent this by sending
546          * pending pageflip events from here if a flip is still pending.
547          *
548          * If any planes are enabled, use dm_pflip_high_irq() instead, to
549          * avoid race conditions between flip programming and completion,
550          * which could cause too early flip completion events.
551          */
552         if (adev->family >= AMDGPU_FAMILY_RV &&
553             acrtc->pflip_status == AMDGPU_FLIP_SUBMITTED &&
554             acrtc->dm_irq_params.active_planes == 0) {
555                 if (acrtc->event) {
556                         drm_crtc_send_vblank_event(&acrtc->base, acrtc->event);
557                         acrtc->event = NULL;
558                         drm_crtc_vblank_put(&acrtc->base);
559                 }
560                 acrtc->pflip_status = AMDGPU_FLIP_NONE;
561         }
562
563         spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
564 }
565
566 static int dm_set_clockgating_state(void *handle,
567                   enum amd_clockgating_state state)
568 {
569         return 0;
570 }
571
572 static int dm_set_powergating_state(void *handle,
573                   enum amd_powergating_state state)
574 {
575         return 0;
576 }
577
578 /* Prototypes of private functions */
579 static int dm_early_init(void* handle);
580
581 /* Allocate memory for FBC compressed data  */
582 static void amdgpu_dm_fbc_init(struct drm_connector *connector)
583 {
584         struct drm_device *dev = connector->dev;
585         struct amdgpu_device *adev = drm_to_adev(dev);
586         struct dm_comressor_info *compressor = &adev->dm.compressor;
587         struct amdgpu_dm_connector *aconn = to_amdgpu_dm_connector(connector);
588         struct drm_display_mode *mode;
589         unsigned long max_size = 0;
590
591         if (adev->dm.dc->fbc_compressor == NULL)
592                 return;
593
594         if (aconn->dc_link->connector_signal != SIGNAL_TYPE_EDP)
595                 return;
596
597         if (compressor->bo_ptr)
598                 return;
599
600
601         list_for_each_entry(mode, &connector->modes, head) {
602                 if (max_size < mode->htotal * mode->vtotal)
603                         max_size = mode->htotal * mode->vtotal;
604         }
605
606         if (max_size) {
607                 int r = amdgpu_bo_create_kernel(adev, max_size * 4, PAGE_SIZE,
608                             AMDGPU_GEM_DOMAIN_GTT, &compressor->bo_ptr,
609                             &compressor->gpu_addr, &compressor->cpu_addr);
610
611                 if (r)
612                         DRM_ERROR("DM: Failed to initialize FBC\n");
613                 else {
614                         adev->dm.dc->ctx->fbc_gpu_addr = compressor->gpu_addr;
615                         DRM_INFO("DM: FBC alloc %lu\n", max_size*4);
616                 }
617
618         }
619
620 }
621
622 static int amdgpu_dm_audio_component_get_eld(struct device *kdev, int port,
623                                           int pipe, bool *enabled,
624                                           unsigned char *buf, int max_bytes)
625 {
626         struct drm_device *dev = dev_get_drvdata(kdev);
627         struct amdgpu_device *adev = drm_to_adev(dev);
628         struct drm_connector *connector;
629         struct drm_connector_list_iter conn_iter;
630         struct amdgpu_dm_connector *aconnector;
631         int ret = 0;
632
633         *enabled = false;
634
635         mutex_lock(&adev->dm.audio_lock);
636
637         drm_connector_list_iter_begin(dev, &conn_iter);
638         drm_for_each_connector_iter(connector, &conn_iter) {
639                 aconnector = to_amdgpu_dm_connector(connector);
640                 if (aconnector->audio_inst != port)
641                         continue;
642
643                 *enabled = true;
644                 ret = drm_eld_size(connector->eld);
645                 memcpy(buf, connector->eld, min(max_bytes, ret));
646
647                 break;
648         }
649         drm_connector_list_iter_end(&conn_iter);
650
651         mutex_unlock(&adev->dm.audio_lock);
652
653         DRM_DEBUG_KMS("Get ELD : idx=%d ret=%d en=%d\n", port, ret, *enabled);
654
655         return ret;
656 }
657
658 static const struct drm_audio_component_ops amdgpu_dm_audio_component_ops = {
659         .get_eld = amdgpu_dm_audio_component_get_eld,
660 };
661
662 static int amdgpu_dm_audio_component_bind(struct device *kdev,
663                                        struct device *hda_kdev, void *data)
664 {
665         struct drm_device *dev = dev_get_drvdata(kdev);
666         struct amdgpu_device *adev = drm_to_adev(dev);
667         struct drm_audio_component *acomp = data;
668
669         acomp->ops = &amdgpu_dm_audio_component_ops;
670         acomp->dev = kdev;
671         adev->dm.audio_component = acomp;
672
673         return 0;
674 }
675
676 static void amdgpu_dm_audio_component_unbind(struct device *kdev,
677                                           struct device *hda_kdev, void *data)
678 {
679         struct drm_device *dev = dev_get_drvdata(kdev);
680         struct amdgpu_device *adev = drm_to_adev(dev);
681         struct drm_audio_component *acomp = data;
682
683         acomp->ops = NULL;
684         acomp->dev = NULL;
685         adev->dm.audio_component = NULL;
686 }
687
688 static const struct component_ops amdgpu_dm_audio_component_bind_ops = {
689         .bind   = amdgpu_dm_audio_component_bind,
690         .unbind = amdgpu_dm_audio_component_unbind,
691 };
692
693 static int amdgpu_dm_audio_init(struct amdgpu_device *adev)
694 {
695         int i, ret;
696
697         if (!amdgpu_audio)
698                 return 0;
699
700         adev->mode_info.audio.enabled = true;
701
702         adev->mode_info.audio.num_pins = adev->dm.dc->res_pool->audio_count;
703
704         for (i = 0; i < adev->mode_info.audio.num_pins; i++) {
705                 adev->mode_info.audio.pin[i].channels = -1;
706                 adev->mode_info.audio.pin[i].rate = -1;
707                 adev->mode_info.audio.pin[i].bits_per_sample = -1;
708                 adev->mode_info.audio.pin[i].status_bits = 0;
709                 adev->mode_info.audio.pin[i].category_code = 0;
710                 adev->mode_info.audio.pin[i].connected = false;
711                 adev->mode_info.audio.pin[i].id =
712                         adev->dm.dc->res_pool->audios[i]->inst;
713                 adev->mode_info.audio.pin[i].offset = 0;
714         }
715
716         ret = component_add(adev->dev, &amdgpu_dm_audio_component_bind_ops);
717         if (ret < 0)
718                 return ret;
719
720         adev->dm.audio_registered = true;
721
722         return 0;
723 }
724
725 static void amdgpu_dm_audio_fini(struct amdgpu_device *adev)
726 {
727         if (!amdgpu_audio)
728                 return;
729
730         if (!adev->mode_info.audio.enabled)
731                 return;
732
733         if (adev->dm.audio_registered) {
734                 component_del(adev->dev, &amdgpu_dm_audio_component_bind_ops);
735                 adev->dm.audio_registered = false;
736         }
737
738         /* TODO: Disable audio? */
739
740         adev->mode_info.audio.enabled = false;
741 }
742
743 static  void amdgpu_dm_audio_eld_notify(struct amdgpu_device *adev, int pin)
744 {
745         struct drm_audio_component *acomp = adev->dm.audio_component;
746
747         if (acomp && acomp->audio_ops && acomp->audio_ops->pin_eld_notify) {
748                 DRM_DEBUG_KMS("Notify ELD: %d\n", pin);
749
750                 acomp->audio_ops->pin_eld_notify(acomp->audio_ops->audio_ptr,
751                                                  pin, -1);
752         }
753 }
754
755 static int dm_dmub_hw_init(struct amdgpu_device *adev)
756 {
757         const struct dmcub_firmware_header_v1_0 *hdr;
758         struct dmub_srv *dmub_srv = adev->dm.dmub_srv;
759         struct dmub_srv_fb_info *fb_info = adev->dm.dmub_fb_info;
760         const struct firmware *dmub_fw = adev->dm.dmub_fw;
761         struct dmcu *dmcu = adev->dm.dc->res_pool->dmcu;
762         struct abm *abm = adev->dm.dc->res_pool->abm;
763         struct dmub_srv_hw_params hw_params;
764         enum dmub_status status;
765         const unsigned char *fw_inst_const, *fw_bss_data;
766         uint32_t i, fw_inst_const_size, fw_bss_data_size;
767         bool has_hw_support;
768
769         if (!dmub_srv)
770                 /* DMUB isn't supported on the ASIC. */
771                 return 0;
772
773         if (!fb_info) {
774                 DRM_ERROR("No framebuffer info for DMUB service.\n");
775                 return -EINVAL;
776         }
777
778         if (!dmub_fw) {
779                 /* Firmware required for DMUB support. */
780                 DRM_ERROR("No firmware provided for DMUB.\n");
781                 return -EINVAL;
782         }
783
784         status = dmub_srv_has_hw_support(dmub_srv, &has_hw_support);
785         if (status != DMUB_STATUS_OK) {
786                 DRM_ERROR("Error checking HW support for DMUB: %d\n", status);
787                 return -EINVAL;
788         }
789
790         if (!has_hw_support) {
791                 DRM_INFO("DMUB unsupported on ASIC\n");
792                 return 0;
793         }
794
795         hdr = (const struct dmcub_firmware_header_v1_0 *)dmub_fw->data;
796
797         fw_inst_const = dmub_fw->data +
798                         le32_to_cpu(hdr->header.ucode_array_offset_bytes) +
799                         PSP_HEADER_BYTES;
800
801         fw_bss_data = dmub_fw->data +
802                       le32_to_cpu(hdr->header.ucode_array_offset_bytes) +
803                       le32_to_cpu(hdr->inst_const_bytes);
804
805         /* Copy firmware and bios info into FB memory. */
806         fw_inst_const_size = le32_to_cpu(hdr->inst_const_bytes) -
807                              PSP_HEADER_BYTES - PSP_FOOTER_BYTES;
808
809         fw_bss_data_size = le32_to_cpu(hdr->bss_data_bytes);
810
811         /* if adev->firmware.load_type == AMDGPU_FW_LOAD_PSP,
812          * amdgpu_ucode_init_single_fw will load dmub firmware
813          * fw_inst_const part to cw0; otherwise, the firmware back door load
814          * will be done by dm_dmub_hw_init
815          */
816         if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) {
817                 memcpy(fb_info->fb[DMUB_WINDOW_0_INST_CONST].cpu_addr, fw_inst_const,
818                                 fw_inst_const_size);
819         }
820
821         if (fw_bss_data_size)
822                 memcpy(fb_info->fb[DMUB_WINDOW_2_BSS_DATA].cpu_addr,
823                        fw_bss_data, fw_bss_data_size);
824
825         /* Copy firmware bios info into FB memory. */
826         memcpy(fb_info->fb[DMUB_WINDOW_3_VBIOS].cpu_addr, adev->bios,
827                adev->bios_size);
828
829         /* Reset regions that need to be reset. */
830         memset(fb_info->fb[DMUB_WINDOW_4_MAILBOX].cpu_addr, 0,
831         fb_info->fb[DMUB_WINDOW_4_MAILBOX].size);
832
833         memset(fb_info->fb[DMUB_WINDOW_5_TRACEBUFF].cpu_addr, 0,
834                fb_info->fb[DMUB_WINDOW_5_TRACEBUFF].size);
835
836         memset(fb_info->fb[DMUB_WINDOW_6_FW_STATE].cpu_addr, 0,
837                fb_info->fb[DMUB_WINDOW_6_FW_STATE].size);
838
839         /* Initialize hardware. */
840         memset(&hw_params, 0, sizeof(hw_params));
841         hw_params.fb_base = adev->gmc.fb_start;
842         hw_params.fb_offset = adev->gmc.aper_base;
843
844         /* backdoor load firmware and trigger dmub running */
845         if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP)
846                 hw_params.load_inst_const = true;
847
848         if (dmcu)
849                 hw_params.psp_version = dmcu->psp_version;
850
851         for (i = 0; i < fb_info->num_fb; ++i)
852                 hw_params.fb[i] = &fb_info->fb[i];
853
854         status = dmub_srv_hw_init(dmub_srv, &hw_params);
855         if (status != DMUB_STATUS_OK) {
856                 DRM_ERROR("Error initializing DMUB HW: %d\n", status);
857                 return -EINVAL;
858         }
859
860         /* Wait for firmware load to finish. */
861         status = dmub_srv_wait_for_auto_load(dmub_srv, 100000);
862         if (status != DMUB_STATUS_OK)
863                 DRM_WARN("Wait for DMUB auto-load failed: %d\n", status);
864
865         /* Init DMCU and ABM if available. */
866         if (dmcu && abm) {
867                 dmcu->funcs->dmcu_init(dmcu);
868                 abm->dmcu_is_running = dmcu->funcs->is_dmcu_initialized(dmcu);
869         }
870
871         adev->dm.dc->ctx->dmub_srv = dc_dmub_srv_create(adev->dm.dc, dmub_srv);
872         if (!adev->dm.dc->ctx->dmub_srv) {
873                 DRM_ERROR("Couldn't allocate DC DMUB server!\n");
874                 return -ENOMEM;
875         }
876
877         DRM_INFO("DMUB hardware initialized: version=0x%08X\n",
878                  adev->dm.dmcub_fw_version);
879
880         return 0;
881 }
882
883 static void amdgpu_check_debugfs_connector_property_change(struct amdgpu_device *adev,
884                                                            struct drm_atomic_state *state)
885 {
886         struct drm_connector *connector;
887         struct drm_crtc *crtc;
888         struct amdgpu_dm_connector *amdgpu_dm_connector;
889         struct drm_connector_state *conn_state;
890         struct dm_crtc_state *acrtc_state;
891         struct drm_crtc_state *crtc_state;
892         struct dc_stream_state *stream;
893         struct drm_device *dev = adev_to_drm(adev);
894
895         list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
896
897                 amdgpu_dm_connector = to_amdgpu_dm_connector(connector);
898                 conn_state = connector->state;
899
900                 if (!(conn_state && conn_state->crtc))
901                         continue;
902
903                 crtc = conn_state->crtc;
904                 acrtc_state = to_dm_crtc_state(crtc->state);
905
906                 if (!(acrtc_state && acrtc_state->stream))
907                         continue;
908
909                 stream = acrtc_state->stream;
910
911                 if (amdgpu_dm_connector->dsc_settings.dsc_force_enable ||
912                     amdgpu_dm_connector->dsc_settings.dsc_num_slices_v ||
913                     amdgpu_dm_connector->dsc_settings.dsc_num_slices_h ||
914                     amdgpu_dm_connector->dsc_settings.dsc_bits_per_pixel) {
915                         conn_state = drm_atomic_get_connector_state(state, connector);
916                         crtc_state = drm_atomic_get_crtc_state(state, crtc);
917                         crtc_state->mode_changed = true;
918                 }
919         }
920 }
921
922 static int amdgpu_dm_init(struct amdgpu_device *adev)
923 {
924         struct dc_init_data init_data;
925 #ifdef CONFIG_DRM_AMD_DC_HDCP
926         struct dc_callback_init init_params;
927 #endif
928         int r;
929
930         adev->dm.ddev = adev_to_drm(adev);
931         adev->dm.adev = adev;
932
933         /* Zero all the fields */
934         memset(&init_data, 0, sizeof(init_data));
935 #ifdef CONFIG_DRM_AMD_DC_HDCP
936         memset(&init_params, 0, sizeof(init_params));
937 #endif
938
939         mutex_init(&adev->dm.dc_lock);
940         mutex_init(&adev->dm.audio_lock);
941
942         if(amdgpu_dm_irq_init(adev)) {
943                 DRM_ERROR("amdgpu: failed to initialize DM IRQ support.\n");
944                 goto error;
945         }
946
947         init_data.asic_id.chip_family = adev->family;
948
949         init_data.asic_id.pci_revision_id = adev->pdev->revision;
950         init_data.asic_id.hw_internal_rev = adev->external_rev_id;
951
952         init_data.asic_id.vram_width = adev->gmc.vram_width;
953         /* TODO: initialize init_data.asic_id.vram_type here!!!! */
954         init_data.asic_id.atombios_base_address =
955                 adev->mode_info.atom_context->bios;
956
957         init_data.driver = adev;
958
959         adev->dm.cgs_device = amdgpu_cgs_create_device(adev);
960
961         if (!adev->dm.cgs_device) {
962                 DRM_ERROR("amdgpu: failed to create cgs device.\n");
963                 goto error;
964         }
965
966         init_data.cgs_device = adev->dm.cgs_device;
967
968         init_data.dce_environment = DCE_ENV_PRODUCTION_DRV;
969
970         switch (adev->asic_type) {
971         case CHIP_CARRIZO:
972         case CHIP_STONEY:
973         case CHIP_RAVEN:
974         case CHIP_RENOIR:
975                 init_data.flags.gpu_vm_support = true;
976                 break;
977         default:
978                 break;
979         }
980
981         if (amdgpu_dc_feature_mask & DC_FBC_MASK)
982                 init_data.flags.fbc_support = true;
983
984         if (amdgpu_dc_feature_mask & DC_MULTI_MON_PP_MCLK_SWITCH_MASK)
985                 init_data.flags.multi_mon_pp_mclk_switch = true;
986
987         if (amdgpu_dc_feature_mask & DC_DISABLE_FRACTIONAL_PWM_MASK)
988                 init_data.flags.disable_fractional_pwm = true;
989
990         init_data.flags.power_down_display_on_boot = true;
991
992         init_data.soc_bounding_box = adev->dm.soc_bounding_box;
993
994         /* Display Core create. */
995         adev->dm.dc = dc_create(&init_data);
996
997         if (adev->dm.dc) {
998                 DRM_INFO("Display Core initialized with v%s!\n", DC_VER);
999         } else {
1000                 DRM_INFO("Display Core failed to initialize with v%s!\n", DC_VER);
1001                 goto error;
1002         }
1003
1004         if (amdgpu_dc_debug_mask & DC_DISABLE_PIPE_SPLIT) {
1005                 adev->dm.dc->debug.force_single_disp_pipe_split = false;
1006                 adev->dm.dc->debug.pipe_split_policy = MPC_SPLIT_AVOID;
1007         }
1008
1009         if (adev->asic_type != CHIP_CARRIZO && adev->asic_type != CHIP_STONEY)
1010                 adev->dm.dc->debug.disable_stutter = amdgpu_pp_feature_mask & PP_STUTTER_MODE ? false : true;
1011
1012         if (amdgpu_dc_debug_mask & DC_DISABLE_STUTTER)
1013                 adev->dm.dc->debug.disable_stutter = true;
1014
1015         if (amdgpu_dc_debug_mask & DC_DISABLE_DSC)
1016                 adev->dm.dc->debug.disable_dsc = true;
1017
1018         if (amdgpu_dc_debug_mask & DC_DISABLE_CLOCK_GATING)
1019                 adev->dm.dc->debug.disable_clock_gate = true;
1020
1021         r = dm_dmub_hw_init(adev);
1022         if (r) {
1023                 DRM_ERROR("DMUB interface failed to initialize: status=%d\n", r);
1024                 goto error;
1025         }
1026
1027         dc_hardware_init(adev->dm.dc);
1028
1029         adev->dm.freesync_module = mod_freesync_create(adev->dm.dc);
1030         if (!adev->dm.freesync_module) {
1031                 DRM_ERROR(
1032                 "amdgpu: failed to initialize freesync_module.\n");
1033         } else
1034                 DRM_DEBUG_DRIVER("amdgpu: freesync_module init done %p.\n",
1035                                 adev->dm.freesync_module);
1036
1037         amdgpu_dm_init_color_mod();
1038
1039 #ifdef CONFIG_DRM_AMD_DC_HDCP
1040         if (adev->asic_type >= CHIP_RAVEN) {
1041                 adev->dm.hdcp_workqueue = hdcp_create_workqueue(adev, &init_params.cp_psp, adev->dm.dc);
1042
1043                 if (!adev->dm.hdcp_workqueue)
1044                         DRM_ERROR("amdgpu: failed to initialize hdcp_workqueue.\n");
1045                 else
1046                         DRM_DEBUG_DRIVER("amdgpu: hdcp_workqueue init done %p.\n", adev->dm.hdcp_workqueue);
1047
1048                 dc_init_callbacks(adev->dm.dc, &init_params);
1049         }
1050 #endif
1051         if (amdgpu_dm_initialize_drm_device(adev)) {
1052                 DRM_ERROR(
1053                 "amdgpu: failed to initialize sw for display support.\n");
1054                 goto error;
1055         }
1056
1057         /* Update the actual used number of crtc */
1058         adev->mode_info.num_crtc = adev->dm.display_indexes_num;
1059
1060         /* create fake encoders for MST */
1061         dm_dp_create_fake_mst_encoders(adev);
1062
1063         /* TODO: Add_display_info? */
1064
1065         /* TODO use dynamic cursor width */
1066         adev_to_drm(adev)->mode_config.cursor_width = adev->dm.dc->caps.max_cursor_size;
1067         adev_to_drm(adev)->mode_config.cursor_height = adev->dm.dc->caps.max_cursor_size;
1068
1069         if (drm_vblank_init(adev_to_drm(adev), adev->dm.display_indexes_num)) {
1070                 DRM_ERROR(
1071                 "amdgpu: failed to initialize sw for display support.\n");
1072                 goto error;
1073         }
1074
1075         DRM_DEBUG_DRIVER("KMS initialized.\n");
1076
1077         return 0;
1078 error:
1079         amdgpu_dm_fini(adev);
1080
1081         return -EINVAL;
1082 }
1083
1084 static void amdgpu_dm_fini(struct amdgpu_device *adev)
1085 {
1086         int i;
1087
1088         for (i = 0; i < adev->dm.display_indexes_num; i++) {
1089                 drm_encoder_cleanup(&adev->dm.mst_encoders[i].base);
1090         }
1091
1092         amdgpu_dm_audio_fini(adev);
1093
1094         amdgpu_dm_destroy_drm_device(&adev->dm);
1095
1096 #ifdef CONFIG_DRM_AMD_DC_HDCP
1097         if (adev->dm.hdcp_workqueue) {
1098                 hdcp_destroy(adev->dm.hdcp_workqueue);
1099                 adev->dm.hdcp_workqueue = NULL;
1100         }
1101
1102         if (adev->dm.dc)
1103                 dc_deinit_callbacks(adev->dm.dc);
1104 #endif
1105         if (adev->dm.dc->ctx->dmub_srv) {
1106                 dc_dmub_srv_destroy(&adev->dm.dc->ctx->dmub_srv);
1107                 adev->dm.dc->ctx->dmub_srv = NULL;
1108         }
1109
1110         if (adev->dm.dmub_bo)
1111                 amdgpu_bo_free_kernel(&adev->dm.dmub_bo,
1112                                       &adev->dm.dmub_bo_gpu_addr,
1113                                       &adev->dm.dmub_bo_cpu_addr);
1114
1115         /* DC Destroy TODO: Replace destroy DAL */
1116         if (adev->dm.dc)
1117                 dc_destroy(&adev->dm.dc);
1118         /*
1119          * TODO: pageflip, vlank interrupt
1120          *
1121          * amdgpu_dm_irq_fini(adev);
1122          */
1123
1124         if (adev->dm.cgs_device) {
1125                 amdgpu_cgs_destroy_device(adev->dm.cgs_device);
1126                 adev->dm.cgs_device = NULL;
1127         }
1128         if (adev->dm.freesync_module) {
1129                 mod_freesync_destroy(adev->dm.freesync_module);
1130                 adev->dm.freesync_module = NULL;
1131         }
1132
1133         mutex_destroy(&adev->dm.audio_lock);
1134         mutex_destroy(&adev->dm.dc_lock);
1135
1136         return;
1137 }
1138
1139 static int load_dmcu_fw(struct amdgpu_device *adev)
1140 {
1141         const char *fw_name_dmcu = NULL;
1142         int r;
1143         const struct dmcu_firmware_header_v1_0 *hdr;
1144
1145         switch(adev->asic_type) {
1146 #if defined(CONFIG_DRM_AMD_DC_SI)
1147         case CHIP_TAHITI:
1148         case CHIP_PITCAIRN:
1149         case CHIP_VERDE:
1150         case CHIP_OLAND:
1151 #endif
1152         case CHIP_BONAIRE:
1153         case CHIP_HAWAII:
1154         case CHIP_KAVERI:
1155         case CHIP_KABINI:
1156         case CHIP_MULLINS:
1157         case CHIP_TONGA:
1158         case CHIP_FIJI:
1159         case CHIP_CARRIZO:
1160         case CHIP_STONEY:
1161         case CHIP_POLARIS11:
1162         case CHIP_POLARIS10:
1163         case CHIP_POLARIS12:
1164         case CHIP_VEGAM:
1165         case CHIP_VEGA10:
1166         case CHIP_VEGA12:
1167         case CHIP_VEGA20:
1168         case CHIP_NAVI10:
1169         case CHIP_NAVI14:
1170         case CHIP_RENOIR:
1171 #if defined(CONFIG_DRM_AMD_DC_DCN3_0)
1172         case CHIP_SIENNA_CICHLID:
1173         case CHIP_NAVY_FLOUNDER:
1174 #endif
1175                 return 0;
1176         case CHIP_NAVI12:
1177                 fw_name_dmcu = FIRMWARE_NAVI12_DMCU;
1178                 break;
1179         case CHIP_RAVEN:
1180                 if (ASICREV_IS_PICASSO(adev->external_rev_id))
1181                         fw_name_dmcu = FIRMWARE_RAVEN_DMCU;
1182                 else if (ASICREV_IS_RAVEN2(adev->external_rev_id))
1183                         fw_name_dmcu = FIRMWARE_RAVEN_DMCU;
1184                 else
1185                         return 0;
1186                 break;
1187         default:
1188                 DRM_ERROR("Unsupported ASIC type: 0x%X\n", adev->asic_type);
1189                 return -EINVAL;
1190         }
1191
1192         if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) {
1193                 DRM_DEBUG_KMS("dm: DMCU firmware not supported on direct or SMU loading\n");
1194                 return 0;
1195         }
1196
1197         r = request_firmware_direct(&adev->dm.fw_dmcu, fw_name_dmcu, adev->dev);
1198         if (r == -ENOENT) {
1199                 /* DMCU firmware is not necessary, so don't raise a fuss if it's missing */
1200                 DRM_DEBUG_KMS("dm: DMCU firmware not found\n");
1201                 adev->dm.fw_dmcu = NULL;
1202                 return 0;
1203         }
1204         if (r) {
1205                 dev_err(adev->dev, "amdgpu_dm: Can't load firmware \"%s\"\n",
1206                         fw_name_dmcu);
1207                 return r;
1208         }
1209
1210         r = amdgpu_ucode_validate(adev->dm.fw_dmcu);
1211         if (r) {
1212                 dev_err(adev->dev, "amdgpu_dm: Can't validate firmware \"%s\"\n",
1213                         fw_name_dmcu);
1214                 release_firmware(adev->dm.fw_dmcu);
1215                 adev->dm.fw_dmcu = NULL;
1216                 return r;
1217         }
1218
1219         hdr = (const struct dmcu_firmware_header_v1_0 *)adev->dm.fw_dmcu->data;
1220         adev->firmware.ucode[AMDGPU_UCODE_ID_DMCU_ERAM].ucode_id = AMDGPU_UCODE_ID_DMCU_ERAM;
1221         adev->firmware.ucode[AMDGPU_UCODE_ID_DMCU_ERAM].fw = adev->dm.fw_dmcu;
1222         adev->firmware.fw_size +=
1223                 ALIGN(le32_to_cpu(hdr->header.ucode_size_bytes) - le32_to_cpu(hdr->intv_size_bytes), PAGE_SIZE);
1224
1225         adev->firmware.ucode[AMDGPU_UCODE_ID_DMCU_INTV].ucode_id = AMDGPU_UCODE_ID_DMCU_INTV;
1226         adev->firmware.ucode[AMDGPU_UCODE_ID_DMCU_INTV].fw = adev->dm.fw_dmcu;
1227         adev->firmware.fw_size +=
1228                 ALIGN(le32_to_cpu(hdr->intv_size_bytes), PAGE_SIZE);
1229
1230         adev->dm.dmcu_fw_version = le32_to_cpu(hdr->header.ucode_version);
1231
1232         DRM_DEBUG_KMS("PSP loading DMCU firmware\n");
1233
1234         return 0;
1235 }
1236
1237 static uint32_t amdgpu_dm_dmub_reg_read(void *ctx, uint32_t address)
1238 {
1239         struct amdgpu_device *adev = ctx;
1240
1241         return dm_read_reg(adev->dm.dc->ctx, address);
1242 }
1243
1244 static void amdgpu_dm_dmub_reg_write(void *ctx, uint32_t address,
1245                                      uint32_t value)
1246 {
1247         struct amdgpu_device *adev = ctx;
1248
1249         return dm_write_reg(adev->dm.dc->ctx, address, value);
1250 }
1251
1252 static int dm_dmub_sw_init(struct amdgpu_device *adev)
1253 {
1254         struct dmub_srv_create_params create_params;
1255         struct dmub_srv_region_params region_params;
1256         struct dmub_srv_region_info region_info;
1257         struct dmub_srv_fb_params fb_params;
1258         struct dmub_srv_fb_info *fb_info;
1259         struct dmub_srv *dmub_srv;
1260         const struct dmcub_firmware_header_v1_0 *hdr;
1261         const char *fw_name_dmub;
1262         enum dmub_asic dmub_asic;
1263         enum dmub_status status;
1264         int r;
1265
1266         switch (adev->asic_type) {
1267         case CHIP_RENOIR:
1268                 dmub_asic = DMUB_ASIC_DCN21;
1269                 fw_name_dmub = FIRMWARE_RENOIR_DMUB;
1270                 break;
1271 #if defined(CONFIG_DRM_AMD_DC_DCN3_0)
1272         case CHIP_SIENNA_CICHLID:
1273                 dmub_asic = DMUB_ASIC_DCN30;
1274                 fw_name_dmub = FIRMWARE_SIENNA_CICHLID_DMUB;
1275                 break;
1276         case CHIP_NAVY_FLOUNDER:
1277                 dmub_asic = DMUB_ASIC_DCN30;
1278                 fw_name_dmub = FIRMWARE_NAVY_FLOUNDER_DMUB;
1279                 break;
1280 #endif
1281
1282         default:
1283                 /* ASIC doesn't support DMUB. */
1284                 return 0;
1285         }
1286
1287         r = request_firmware_direct(&adev->dm.dmub_fw, fw_name_dmub, adev->dev);
1288         if (r) {
1289                 DRM_ERROR("DMUB firmware loading failed: %d\n", r);
1290                 return 0;
1291         }
1292
1293         r = amdgpu_ucode_validate(adev->dm.dmub_fw);
1294         if (r) {
1295                 DRM_ERROR("Couldn't validate DMUB firmware: %d\n", r);
1296                 return 0;
1297         }
1298
1299         hdr = (const struct dmcub_firmware_header_v1_0 *)adev->dm.dmub_fw->data;
1300
1301         if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
1302                 adev->firmware.ucode[AMDGPU_UCODE_ID_DMCUB].ucode_id =
1303                         AMDGPU_UCODE_ID_DMCUB;
1304                 adev->firmware.ucode[AMDGPU_UCODE_ID_DMCUB].fw =
1305                         adev->dm.dmub_fw;
1306                 adev->firmware.fw_size +=
1307                         ALIGN(le32_to_cpu(hdr->inst_const_bytes), PAGE_SIZE);
1308
1309                 DRM_INFO("Loading DMUB firmware via PSP: version=0x%08X\n",
1310                          adev->dm.dmcub_fw_version);
1311         }
1312
1313         adev->dm.dmcub_fw_version = le32_to_cpu(hdr->header.ucode_version);
1314
1315         adev->dm.dmub_srv = kzalloc(sizeof(*adev->dm.dmub_srv), GFP_KERNEL);
1316         dmub_srv = adev->dm.dmub_srv;
1317
1318         if (!dmub_srv) {
1319                 DRM_ERROR("Failed to allocate DMUB service!\n");
1320                 return -ENOMEM;
1321         }
1322
1323         memset(&create_params, 0, sizeof(create_params));
1324         create_params.user_ctx = adev;
1325         create_params.funcs.reg_read = amdgpu_dm_dmub_reg_read;
1326         create_params.funcs.reg_write = amdgpu_dm_dmub_reg_write;
1327         create_params.asic = dmub_asic;
1328
1329         /* Create the DMUB service. */
1330         status = dmub_srv_create(dmub_srv, &create_params);
1331         if (status != DMUB_STATUS_OK) {
1332                 DRM_ERROR("Error creating DMUB service: %d\n", status);
1333                 return -EINVAL;
1334         }
1335
1336         /* Calculate the size of all the regions for the DMUB service. */
1337         memset(&region_params, 0, sizeof(region_params));
1338
1339         region_params.inst_const_size = le32_to_cpu(hdr->inst_const_bytes) -
1340                                         PSP_HEADER_BYTES - PSP_FOOTER_BYTES;
1341         region_params.bss_data_size = le32_to_cpu(hdr->bss_data_bytes);
1342         region_params.vbios_size = adev->bios_size;
1343         region_params.fw_bss_data = region_params.bss_data_size ?
1344                 adev->dm.dmub_fw->data +
1345                 le32_to_cpu(hdr->header.ucode_array_offset_bytes) +
1346                 le32_to_cpu(hdr->inst_const_bytes) : NULL;
1347         region_params.fw_inst_const =
1348                 adev->dm.dmub_fw->data +
1349                 le32_to_cpu(hdr->header.ucode_array_offset_bytes) +
1350                 PSP_HEADER_BYTES;
1351
1352         status = dmub_srv_calc_region_info(dmub_srv, &region_params,
1353                                            &region_info);
1354
1355         if (status != DMUB_STATUS_OK) {
1356                 DRM_ERROR("Error calculating DMUB region info: %d\n", status);
1357                 return -EINVAL;
1358         }
1359
1360         /*
1361          * Allocate a framebuffer based on the total size of all the regions.
1362          * TODO: Move this into GART.
1363          */
1364         r = amdgpu_bo_create_kernel(adev, region_info.fb_size, PAGE_SIZE,
1365                                     AMDGPU_GEM_DOMAIN_VRAM, &adev->dm.dmub_bo,
1366                                     &adev->dm.dmub_bo_gpu_addr,
1367                                     &adev->dm.dmub_bo_cpu_addr);
1368         if (r)
1369                 return r;
1370
1371         /* Rebase the regions on the framebuffer address. */
1372         memset(&fb_params, 0, sizeof(fb_params));
1373         fb_params.cpu_addr = adev->dm.dmub_bo_cpu_addr;
1374         fb_params.gpu_addr = adev->dm.dmub_bo_gpu_addr;
1375         fb_params.region_info = &region_info;
1376
1377         adev->dm.dmub_fb_info =
1378                 kzalloc(sizeof(*adev->dm.dmub_fb_info), GFP_KERNEL);
1379         fb_info = adev->dm.dmub_fb_info;
1380
1381         if (!fb_info) {
1382                 DRM_ERROR(
1383                         "Failed to allocate framebuffer info for DMUB service!\n");
1384                 return -ENOMEM;
1385         }
1386
1387         status = dmub_srv_calc_fb_info(dmub_srv, &fb_params, fb_info);
1388         if (status != DMUB_STATUS_OK) {
1389                 DRM_ERROR("Error calculating DMUB FB info: %d\n", status);
1390                 return -EINVAL;
1391         }
1392
1393         return 0;
1394 }
1395
1396 static int dm_sw_init(void *handle)
1397 {
1398         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1399         int r;
1400
1401         r = dm_dmub_sw_init(adev);
1402         if (r)
1403                 return r;
1404
1405         return load_dmcu_fw(adev);
1406 }
1407
1408 static int dm_sw_fini(void *handle)
1409 {
1410         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1411
1412         kfree(adev->dm.dmub_fb_info);
1413         adev->dm.dmub_fb_info = NULL;
1414
1415         if (adev->dm.dmub_srv) {
1416                 dmub_srv_destroy(adev->dm.dmub_srv);
1417                 adev->dm.dmub_srv = NULL;
1418         }
1419
1420         release_firmware(adev->dm.dmub_fw);
1421         adev->dm.dmub_fw = NULL;
1422
1423         release_firmware(adev->dm.fw_dmcu);
1424         adev->dm.fw_dmcu = NULL;
1425
1426         return 0;
1427 }
1428
1429 static int detect_mst_link_for_all_connectors(struct drm_device *dev)
1430 {
1431         struct amdgpu_dm_connector *aconnector;
1432         struct drm_connector *connector;
1433         struct drm_connector_list_iter iter;
1434         int ret = 0;
1435
1436         drm_connector_list_iter_begin(dev, &iter);
1437         drm_for_each_connector_iter(connector, &iter) {
1438                 aconnector = to_amdgpu_dm_connector(connector);
1439                 if (aconnector->dc_link->type == dc_connection_mst_branch &&
1440                     aconnector->mst_mgr.aux) {
1441                         DRM_DEBUG_DRIVER("DM_MST: starting TM on aconnector: %p [id: %d]\n",
1442                                          aconnector,
1443                                          aconnector->base.base.id);
1444
1445                         ret = drm_dp_mst_topology_mgr_set_mst(&aconnector->mst_mgr, true);
1446                         if (ret < 0) {
1447                                 DRM_ERROR("DM_MST: Failed to start MST\n");
1448                                 aconnector->dc_link->type =
1449                                         dc_connection_single;
1450                                 break;
1451                         }
1452                 }
1453         }
1454         drm_connector_list_iter_end(&iter);
1455
1456         return ret;
1457 }
1458
1459 static int dm_late_init(void *handle)
1460 {
1461         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1462
1463         struct dmcu_iram_parameters params;
1464         unsigned int linear_lut[16];
1465         int i;
1466         struct dmcu *dmcu = NULL;
1467         bool ret = true;
1468
1469         dmcu = adev->dm.dc->res_pool->dmcu;
1470
1471         for (i = 0; i < 16; i++)
1472                 linear_lut[i] = 0xFFFF * i / 15;
1473
1474         params.set = 0;
1475         params.backlight_ramping_start = 0xCCCC;
1476         params.backlight_ramping_reduction = 0xCCCCCCCC;
1477         params.backlight_lut_array_size = 16;
1478         params.backlight_lut_array = linear_lut;
1479
1480         /* Min backlight level after ABM reduction,  Don't allow below 1%
1481          * 0xFFFF x 0.01 = 0x28F
1482          */
1483         params.min_abm_backlight = 0x28F;
1484
1485         /* In the case where abm is implemented on dmcub,
1486          * dmcu object will be null.
1487          * ABM 2.4 and up are implemented on dmcub.
1488          */
1489         if (dmcu)
1490                 ret = dmcu_load_iram(dmcu, params);
1491         else if (adev->dm.dc->ctx->dmub_srv)
1492                 ret = dmub_init_abm_config(adev->dm.dc->res_pool, params);
1493
1494         if (!ret)
1495                 return -EINVAL;
1496
1497         return detect_mst_link_for_all_connectors(adev_to_drm(adev));
1498 }
1499
1500 static void s3_handle_mst(struct drm_device *dev, bool suspend)
1501 {
1502         struct amdgpu_dm_connector *aconnector;
1503         struct drm_connector *connector;
1504         struct drm_connector_list_iter iter;
1505         struct drm_dp_mst_topology_mgr *mgr;
1506         int ret;
1507         bool need_hotplug = false;
1508
1509         drm_connector_list_iter_begin(dev, &iter);
1510         drm_for_each_connector_iter(connector, &iter) {
1511                 aconnector = to_amdgpu_dm_connector(connector);
1512                 if (aconnector->dc_link->type != dc_connection_mst_branch ||
1513                     aconnector->mst_port)
1514                         continue;
1515
1516                 mgr = &aconnector->mst_mgr;
1517
1518                 if (suspend) {
1519                         drm_dp_mst_topology_mgr_suspend(mgr);
1520                 } else {
1521                         ret = drm_dp_mst_topology_mgr_resume(mgr, true);
1522                         if (ret < 0) {
1523                                 drm_dp_mst_topology_mgr_set_mst(mgr, false);
1524                                 need_hotplug = true;
1525                         }
1526                 }
1527         }
1528         drm_connector_list_iter_end(&iter);
1529
1530         if (need_hotplug)
1531                 drm_kms_helper_hotplug_event(dev);
1532 }
1533
1534 static int amdgpu_dm_smu_write_watermarks_table(struct amdgpu_device *adev)
1535 {
1536         struct smu_context *smu = &adev->smu;
1537         int ret = 0;
1538
1539         if (!is_support_sw_smu(adev))
1540                 return 0;
1541
1542         /* This interface is for dGPU Navi1x.Linux dc-pplib interface depends
1543          * on window driver dc implementation.
1544          * For Navi1x, clock settings of dcn watermarks are fixed. the settings
1545          * should be passed to smu during boot up and resume from s3.
1546          * boot up: dc calculate dcn watermark clock settings within dc_create,
1547          * dcn20_resource_construct
1548          * then call pplib functions below to pass the settings to smu:
1549          * smu_set_watermarks_for_clock_ranges
1550          * smu_set_watermarks_table
1551          * navi10_set_watermarks_table
1552          * smu_write_watermarks_table
1553          *
1554          * For Renoir, clock settings of dcn watermark are also fixed values.
1555          * dc has implemented different flow for window driver:
1556          * dc_hardware_init / dc_set_power_state
1557          * dcn10_init_hw
1558          * notify_wm_ranges
1559          * set_wm_ranges
1560          * -- Linux
1561          * smu_set_watermarks_for_clock_ranges
1562          * renoir_set_watermarks_table
1563          * smu_write_watermarks_table
1564          *
1565          * For Linux,
1566          * dc_hardware_init -> amdgpu_dm_init
1567          * dc_set_power_state --> dm_resume
1568          *
1569          * therefore, this function apply to navi10/12/14 but not Renoir
1570          * *
1571          */
1572         switch(adev->asic_type) {
1573         case CHIP_NAVI10:
1574         case CHIP_NAVI14:
1575         case CHIP_NAVI12:
1576                 break;
1577         default:
1578                 return 0;
1579         }
1580
1581         ret = smu_write_watermarks_table(smu);
1582         if (ret) {
1583                 DRM_ERROR("Failed to update WMTABLE!\n");
1584                 return ret;
1585         }
1586
1587         return 0;
1588 }
1589
1590 /**
1591  * dm_hw_init() - Initialize DC device
1592  * @handle: The base driver device containing the amdgpu_dm device.
1593  *
1594  * Initialize the &struct amdgpu_display_manager device. This involves calling
1595  * the initializers of each DM component, then populating the struct with them.
1596  *
1597  * Although the function implies hardware initialization, both hardware and
1598  * software are initialized here. Splitting them out to their relevant init
1599  * hooks is a future TODO item.
1600  *
1601  * Some notable things that are initialized here:
1602  *
1603  * - Display Core, both software and hardware
1604  * - DC modules that we need (freesync and color management)
1605  * - DRM software states
1606  * - Interrupt sources and handlers
1607  * - Vblank support
1608  * - Debug FS entries, if enabled
1609  */
1610 static int dm_hw_init(void *handle)
1611 {
1612         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1613         /* Create DAL display manager */
1614         amdgpu_dm_init(adev);
1615         amdgpu_dm_hpd_init(adev);
1616
1617         return 0;
1618 }
1619
1620 /**
1621  * dm_hw_fini() - Teardown DC device
1622  * @handle: The base driver device containing the amdgpu_dm device.
1623  *
1624  * Teardown components within &struct amdgpu_display_manager that require
1625  * cleanup. This involves cleaning up the DRM device, DC, and any modules that
1626  * were loaded. Also flush IRQ workqueues and disable them.
1627  */
1628 static int dm_hw_fini(void *handle)
1629 {
1630         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1631
1632         amdgpu_dm_hpd_fini(adev);
1633
1634         amdgpu_dm_irq_fini(adev);
1635         amdgpu_dm_fini(adev);
1636         return 0;
1637 }
1638
1639
1640 static int dm_enable_vblank(struct drm_crtc *crtc);
1641 static void dm_disable_vblank(struct drm_crtc *crtc);
1642
1643 static void dm_gpureset_toggle_interrupts(struct amdgpu_device *adev,
1644                                  struct dc_state *state, bool enable)
1645 {
1646         enum dc_irq_source irq_source;
1647         struct amdgpu_crtc *acrtc;
1648         int rc = -EBUSY;
1649         int i = 0;
1650
1651         for (i = 0; i < state->stream_count; i++) {
1652                 acrtc = get_crtc_by_otg_inst(
1653                                 adev, state->stream_status[i].primary_otg_inst);
1654
1655                 if (acrtc && state->stream_status[i].plane_count != 0) {
1656                         irq_source = IRQ_TYPE_PFLIP + acrtc->otg_inst;
1657                         rc = dc_interrupt_set(adev->dm.dc, irq_source, enable) ? 0 : -EBUSY;
1658                         DRM_DEBUG("crtc %d - vupdate irq %sabling: r=%d\n",
1659                                   acrtc->crtc_id, enable ? "en" : "dis", rc);
1660                         if (rc)
1661                                 DRM_WARN("Failed to %s pflip interrupts\n",
1662                                          enable ? "enable" : "disable");
1663
1664                         if (enable) {
1665                                 rc = dm_enable_vblank(&acrtc->base);
1666                                 if (rc)
1667                                         DRM_WARN("Failed to enable vblank interrupts\n");
1668                         } else {
1669                                 dm_disable_vblank(&acrtc->base);
1670                         }
1671
1672                 }
1673         }
1674
1675 }
1676
1677 static enum dc_status amdgpu_dm_commit_zero_streams(struct dc *dc)
1678 {
1679         struct dc_state *context = NULL;
1680         enum dc_status res = DC_ERROR_UNEXPECTED;
1681         int i;
1682         struct dc_stream_state *del_streams[MAX_PIPES];
1683         int del_streams_count = 0;
1684
1685         memset(del_streams, 0, sizeof(del_streams));
1686
1687         context = dc_create_state(dc);
1688         if (context == NULL)
1689                 goto context_alloc_fail;
1690
1691         dc_resource_state_copy_construct_current(dc, context);
1692
1693         /* First remove from context all streams */
1694         for (i = 0; i < context->stream_count; i++) {
1695                 struct dc_stream_state *stream = context->streams[i];
1696
1697                 del_streams[del_streams_count++] = stream;
1698         }
1699
1700         /* Remove all planes for removed streams and then remove the streams */
1701         for (i = 0; i < del_streams_count; i++) {
1702                 if (!dc_rem_all_planes_for_stream(dc, del_streams[i], context)) {
1703                         res = DC_FAIL_DETACH_SURFACES;
1704                         goto fail;
1705                 }
1706
1707                 res = dc_remove_stream_from_ctx(dc, context, del_streams[i]);
1708                 if (res != DC_OK)
1709                         goto fail;
1710         }
1711
1712
1713         res = dc_validate_global_state(dc, context, false);
1714
1715         if (res != DC_OK) {
1716                 DRM_ERROR("%s:resource validation failed, dc_status:%d\n", __func__, res);
1717                 goto fail;
1718         }
1719
1720         res = dc_commit_state(dc, context);
1721
1722 fail:
1723         dc_release_state(context);
1724
1725 context_alloc_fail:
1726         return res;
1727 }
1728
1729 static int dm_suspend(void *handle)
1730 {
1731         struct amdgpu_device *adev = handle;
1732         struct amdgpu_display_manager *dm = &adev->dm;
1733         int ret = 0;
1734
1735         if (amdgpu_in_reset(adev)) {
1736                 mutex_lock(&dm->dc_lock);
1737                 dm->cached_dc_state = dc_copy_state(dm->dc->current_state);
1738
1739                 dm_gpureset_toggle_interrupts(adev, dm->cached_dc_state, false);
1740
1741                 amdgpu_dm_commit_zero_streams(dm->dc);
1742
1743                 amdgpu_dm_irq_suspend(adev);
1744
1745                 return ret;
1746         }
1747
1748         WARN_ON(adev->dm.cached_state);
1749         adev->dm.cached_state = drm_atomic_helper_suspend(adev_to_drm(adev));
1750
1751         s3_handle_mst(adev_to_drm(adev), true);
1752
1753         amdgpu_dm_irq_suspend(adev);
1754
1755
1756         dc_set_power_state(dm->dc, DC_ACPI_CM_POWER_STATE_D3);
1757
1758         return 0;
1759 }
1760
1761 static struct amdgpu_dm_connector *
1762 amdgpu_dm_find_first_crtc_matching_connector(struct drm_atomic_state *state,
1763                                              struct drm_crtc *crtc)
1764 {
1765         uint32_t i;
1766         struct drm_connector_state *new_con_state;
1767         struct drm_connector *connector;
1768         struct drm_crtc *crtc_from_state;
1769
1770         for_each_new_connector_in_state(state, connector, new_con_state, i) {
1771                 crtc_from_state = new_con_state->crtc;
1772
1773                 if (crtc_from_state == crtc)
1774                         return to_amdgpu_dm_connector(connector);
1775         }
1776
1777         return NULL;
1778 }
1779
1780 static void emulated_link_detect(struct dc_link *link)
1781 {
1782         struct dc_sink_init_data sink_init_data = { 0 };
1783         struct display_sink_capability sink_caps = { 0 };
1784         enum dc_edid_status edid_status;
1785         struct dc_context *dc_ctx = link->ctx;
1786         struct dc_sink *sink = NULL;
1787         struct dc_sink *prev_sink = NULL;
1788
1789         link->type = dc_connection_none;
1790         prev_sink = link->local_sink;
1791
1792         if (prev_sink != NULL)
1793                 dc_sink_retain(prev_sink);
1794
1795         switch (link->connector_signal) {
1796         case SIGNAL_TYPE_HDMI_TYPE_A: {
1797                 sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C;
1798                 sink_caps.signal = SIGNAL_TYPE_HDMI_TYPE_A;
1799                 break;
1800         }
1801
1802         case SIGNAL_TYPE_DVI_SINGLE_LINK: {
1803                 sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C;
1804                 sink_caps.signal = SIGNAL_TYPE_DVI_SINGLE_LINK;
1805                 break;
1806         }
1807
1808         case SIGNAL_TYPE_DVI_DUAL_LINK: {
1809                 sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C;
1810                 sink_caps.signal = SIGNAL_TYPE_DVI_DUAL_LINK;
1811                 break;
1812         }
1813
1814         case SIGNAL_TYPE_LVDS: {
1815                 sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C;
1816                 sink_caps.signal = SIGNAL_TYPE_LVDS;
1817                 break;
1818         }
1819
1820         case SIGNAL_TYPE_EDP: {
1821                 sink_caps.transaction_type =
1822                         DDC_TRANSACTION_TYPE_I2C_OVER_AUX;
1823                 sink_caps.signal = SIGNAL_TYPE_EDP;
1824                 break;
1825         }
1826
1827         case SIGNAL_TYPE_DISPLAY_PORT: {
1828                 sink_caps.transaction_type =
1829                         DDC_TRANSACTION_TYPE_I2C_OVER_AUX;
1830                 sink_caps.signal = SIGNAL_TYPE_VIRTUAL;
1831                 break;
1832         }
1833
1834         default:
1835                 DC_ERROR("Invalid connector type! signal:%d\n",
1836                         link->connector_signal);
1837                 return;
1838         }
1839
1840         sink_init_data.link = link;
1841         sink_init_data.sink_signal = sink_caps.signal;
1842
1843         sink = dc_sink_create(&sink_init_data);
1844         if (!sink) {
1845                 DC_ERROR("Failed to create sink!\n");
1846                 return;
1847         }
1848
1849         /* dc_sink_create returns a new reference */
1850         link->local_sink = sink;
1851
1852         edid_status = dm_helpers_read_local_edid(
1853                         link->ctx,
1854                         link,
1855                         sink);
1856
1857         if (edid_status != EDID_OK)
1858                 DC_ERROR("Failed to read EDID");
1859
1860 }
1861
1862 static void dm_gpureset_commit_state(struct dc_state *dc_state,
1863                                      struct amdgpu_display_manager *dm)
1864 {
1865         struct {
1866                 struct dc_surface_update surface_updates[MAX_SURFACES];
1867                 struct dc_plane_info plane_infos[MAX_SURFACES];
1868                 struct dc_scaling_info scaling_infos[MAX_SURFACES];
1869                 struct dc_flip_addrs flip_addrs[MAX_SURFACES];
1870                 struct dc_stream_update stream_update;
1871         } * bundle;
1872         int k, m;
1873
1874         bundle = kzalloc(sizeof(*bundle), GFP_KERNEL);
1875
1876         if (!bundle) {
1877                 dm_error("Failed to allocate update bundle\n");
1878                 goto cleanup;
1879         }
1880
1881         for (k = 0; k < dc_state->stream_count; k++) {
1882                 bundle->stream_update.stream = dc_state->streams[k];
1883
1884                 for (m = 0; m < dc_state->stream_status->plane_count; m++) {
1885                         bundle->surface_updates[m].surface =
1886                                 dc_state->stream_status->plane_states[m];
1887                         bundle->surface_updates[m].surface->force_full_update =
1888                                 true;
1889                 }
1890                 dc_commit_updates_for_stream(
1891                         dm->dc, bundle->surface_updates,
1892                         dc_state->stream_status->plane_count,
1893                         dc_state->streams[k], &bundle->stream_update, dc_state);
1894         }
1895
1896 cleanup:
1897         kfree(bundle);
1898
1899         return;
1900 }
1901
1902 static int dm_resume(void *handle)
1903 {
1904         struct amdgpu_device *adev = handle;
1905         struct drm_device *ddev = adev_to_drm(adev);
1906         struct amdgpu_display_manager *dm = &adev->dm;
1907         struct amdgpu_dm_connector *aconnector;
1908         struct drm_connector *connector;
1909         struct drm_connector_list_iter iter;
1910         struct drm_crtc *crtc;
1911         struct drm_crtc_state *new_crtc_state;
1912         struct dm_crtc_state *dm_new_crtc_state;
1913         struct drm_plane *plane;
1914         struct drm_plane_state *new_plane_state;
1915         struct dm_plane_state *dm_new_plane_state;
1916         struct dm_atomic_state *dm_state = to_dm_atomic_state(dm->atomic_obj.state);
1917         enum dc_connection_type new_connection_type = dc_connection_none;
1918         struct dc_state *dc_state;
1919         int i, r, j;
1920
1921         if (amdgpu_in_reset(adev)) {
1922                 dc_state = dm->cached_dc_state;
1923
1924                 r = dm_dmub_hw_init(adev);
1925                 if (r)
1926                         DRM_ERROR("DMUB interface failed to initialize: status=%d\n", r);
1927
1928                 dc_set_power_state(dm->dc, DC_ACPI_CM_POWER_STATE_D0);
1929                 dc_resume(dm->dc);
1930
1931                 amdgpu_dm_irq_resume_early(adev);
1932
1933                 for (i = 0; i < dc_state->stream_count; i++) {
1934                         dc_state->streams[i]->mode_changed = true;
1935                         for (j = 0; j < dc_state->stream_status->plane_count; j++) {
1936                                 dc_state->stream_status->plane_states[j]->update_flags.raw
1937                                         = 0xffffffff;
1938                         }
1939                 }
1940
1941                 WARN_ON(!dc_commit_state(dm->dc, dc_state));
1942
1943                 dm_gpureset_commit_state(dm->cached_dc_state, dm);
1944
1945                 dm_gpureset_toggle_interrupts(adev, dm->cached_dc_state, true);
1946
1947                 dc_release_state(dm->cached_dc_state);
1948                 dm->cached_dc_state = NULL;
1949
1950                 amdgpu_dm_irq_resume_late(adev);
1951
1952                 mutex_unlock(&dm->dc_lock);
1953
1954                 return 0;
1955         }
1956         /* Recreate dc_state - DC invalidates it when setting power state to S3. */
1957         dc_release_state(dm_state->context);
1958         dm_state->context = dc_create_state(dm->dc);
1959         /* TODO: Remove dc_state->dccg, use dc->dccg directly. */
1960         dc_resource_state_construct(dm->dc, dm_state->context);
1961
1962         /* Before powering on DC we need to re-initialize DMUB. */
1963         r = dm_dmub_hw_init(adev);
1964         if (r)
1965                 DRM_ERROR("DMUB interface failed to initialize: status=%d\n", r);
1966
1967         /* power on hardware */
1968         dc_set_power_state(dm->dc, DC_ACPI_CM_POWER_STATE_D0);
1969
1970         /* program HPD filter */
1971         dc_resume(dm->dc);
1972
1973         /*
1974          * early enable HPD Rx IRQ, should be done before set mode as short
1975          * pulse interrupts are used for MST
1976          */
1977         amdgpu_dm_irq_resume_early(adev);
1978
1979         /* On resume we need to rewrite the MSTM control bits to enable MST*/
1980         s3_handle_mst(ddev, false);
1981
1982         /* Do detection*/
1983         drm_connector_list_iter_begin(ddev, &iter);
1984         drm_for_each_connector_iter(connector, &iter) {
1985                 aconnector = to_amdgpu_dm_connector(connector);
1986
1987                 /*
1988                  * this is the case when traversing through already created
1989                  * MST connectors, should be skipped
1990                  */
1991                 if (aconnector->mst_port)
1992                         continue;
1993
1994                 mutex_lock(&aconnector->hpd_lock);
1995                 if (!dc_link_detect_sink(aconnector->dc_link, &new_connection_type))
1996                         DRM_ERROR("KMS: Failed to detect connector\n");
1997
1998                 if (aconnector->base.force && new_connection_type == dc_connection_none)
1999                         emulated_link_detect(aconnector->dc_link);
2000                 else
2001                         dc_link_detect(aconnector->dc_link, DETECT_REASON_HPD);
2002
2003                 if (aconnector->fake_enable && aconnector->dc_link->local_sink)
2004                         aconnector->fake_enable = false;
2005
2006                 if (aconnector->dc_sink)
2007                         dc_sink_release(aconnector->dc_sink);
2008                 aconnector->dc_sink = NULL;
2009                 amdgpu_dm_update_connector_after_detect(aconnector);
2010                 mutex_unlock(&aconnector->hpd_lock);
2011         }
2012         drm_connector_list_iter_end(&iter);
2013
2014         /* Force mode set in atomic commit */
2015         for_each_new_crtc_in_state(dm->cached_state, crtc, new_crtc_state, i)
2016                 new_crtc_state->active_changed = true;
2017
2018         /*
2019          * atomic_check is expected to create the dc states. We need to release
2020          * them here, since they were duplicated as part of the suspend
2021          * procedure.
2022          */
2023         for_each_new_crtc_in_state(dm->cached_state, crtc, new_crtc_state, i) {
2024                 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
2025                 if (dm_new_crtc_state->stream) {
2026                         WARN_ON(kref_read(&dm_new_crtc_state->stream->refcount) > 1);
2027                         dc_stream_release(dm_new_crtc_state->stream);
2028                         dm_new_crtc_state->stream = NULL;
2029                 }
2030         }
2031
2032         for_each_new_plane_in_state(dm->cached_state, plane, new_plane_state, i) {
2033                 dm_new_plane_state = to_dm_plane_state(new_plane_state);
2034                 if (dm_new_plane_state->dc_state) {
2035                         WARN_ON(kref_read(&dm_new_plane_state->dc_state->refcount) > 1);
2036                         dc_plane_state_release(dm_new_plane_state->dc_state);
2037                         dm_new_plane_state->dc_state = NULL;
2038                 }
2039         }
2040
2041         drm_atomic_helper_resume(ddev, dm->cached_state);
2042
2043         dm->cached_state = NULL;
2044
2045         amdgpu_dm_irq_resume_late(adev);
2046
2047         amdgpu_dm_smu_write_watermarks_table(adev);
2048
2049         return 0;
2050 }
2051
2052 /**
2053  * DOC: DM Lifecycle
2054  *
2055  * DM (and consequently DC) is registered in the amdgpu base driver as a IP
2056  * block. When CONFIG_DRM_AMD_DC is enabled, the DM device IP block is added to
2057  * the base driver's device list to be initialized and torn down accordingly.
2058  *
2059  * The functions to do so are provided as hooks in &struct amd_ip_funcs.
2060  */
2061
2062 static const struct amd_ip_funcs amdgpu_dm_funcs = {
2063         .name = "dm",
2064         .early_init = dm_early_init,
2065         .late_init = dm_late_init,
2066         .sw_init = dm_sw_init,
2067         .sw_fini = dm_sw_fini,
2068         .hw_init = dm_hw_init,
2069         .hw_fini = dm_hw_fini,
2070         .suspend = dm_suspend,
2071         .resume = dm_resume,
2072         .is_idle = dm_is_idle,
2073         .wait_for_idle = dm_wait_for_idle,
2074         .check_soft_reset = dm_check_soft_reset,
2075         .soft_reset = dm_soft_reset,
2076         .set_clockgating_state = dm_set_clockgating_state,
2077         .set_powergating_state = dm_set_powergating_state,
2078 };
2079
2080 const struct amdgpu_ip_block_version dm_ip_block =
2081 {
2082         .type = AMD_IP_BLOCK_TYPE_DCE,
2083         .major = 1,
2084         .minor = 0,
2085         .rev = 0,
2086         .funcs = &amdgpu_dm_funcs,
2087 };
2088
2089
2090 /**
2091  * DOC: atomic
2092  *
2093  * *WIP*
2094  */
2095
2096 static const struct drm_mode_config_funcs amdgpu_dm_mode_funcs = {
2097         .fb_create = amdgpu_display_user_framebuffer_create,
2098         .output_poll_changed = drm_fb_helper_output_poll_changed,
2099         .atomic_check = amdgpu_dm_atomic_check,
2100         .atomic_commit = amdgpu_dm_atomic_commit,
2101 };
2102
2103 static struct drm_mode_config_helper_funcs amdgpu_dm_mode_config_helperfuncs = {
2104         .atomic_commit_tail = amdgpu_dm_atomic_commit_tail
2105 };
2106
2107 static void update_connector_ext_caps(struct amdgpu_dm_connector *aconnector)
2108 {
2109         u32 max_cll, min_cll, max, min, q, r;
2110         struct amdgpu_dm_backlight_caps *caps;
2111         struct amdgpu_display_manager *dm;
2112         struct drm_connector *conn_base;
2113         struct amdgpu_device *adev;
2114         struct dc_link *link = NULL;
2115         static const u8 pre_computed_values[] = {
2116                 50, 51, 52, 53, 55, 56, 57, 58, 59, 61, 62, 63, 65, 66, 68, 69,
2117                 71, 72, 74, 75, 77, 79, 81, 82, 84, 86, 88, 90, 92, 94, 96, 98};
2118
2119         if (!aconnector || !aconnector->dc_link)
2120                 return;
2121
2122         link = aconnector->dc_link;
2123         if (link->connector_signal != SIGNAL_TYPE_EDP)
2124                 return;
2125
2126         conn_base = &aconnector->base;
2127         adev = drm_to_adev(conn_base->dev);
2128         dm = &adev->dm;
2129         caps = &dm->backlight_caps;
2130         caps->ext_caps = &aconnector->dc_link->dpcd_sink_ext_caps;
2131         caps->aux_support = false;
2132         max_cll = conn_base->hdr_sink_metadata.hdmi_type1.max_cll;
2133         min_cll = conn_base->hdr_sink_metadata.hdmi_type1.min_cll;
2134
2135         if (caps->ext_caps->bits.oled == 1 ||
2136             caps->ext_caps->bits.sdr_aux_backlight_control == 1 ||
2137             caps->ext_caps->bits.hdr_aux_backlight_control == 1)
2138                 caps->aux_support = true;
2139
2140         /* From the specification (CTA-861-G), for calculating the maximum
2141          * luminance we need to use:
2142          *      Luminance = 50*2**(CV/32)
2143          * Where CV is a one-byte value.
2144          * For calculating this expression we may need float point precision;
2145          * to avoid this complexity level, we take advantage that CV is divided
2146          * by a constant. From the Euclids division algorithm, we know that CV
2147          * can be written as: CV = 32*q + r. Next, we replace CV in the
2148          * Luminance expression and get 50*(2**q)*(2**(r/32)), hence we just
2149          * need to pre-compute the value of r/32. For pre-computing the values
2150          * We just used the following Ruby line:
2151          *      (0...32).each {|cv| puts (50*2**(cv/32.0)).round}
2152          * The results of the above expressions can be verified at
2153          * pre_computed_values.
2154          */
2155         q = max_cll >> 5;
2156         r = max_cll % 32;
2157         max = (1 << q) * pre_computed_values[r];
2158
2159         // min luminance: maxLum * (CV/255)^2 / 100
2160         q = DIV_ROUND_CLOSEST(min_cll, 255);
2161         min = max * DIV_ROUND_CLOSEST((q * q), 100);
2162
2163         caps->aux_max_input_signal = max;
2164         caps->aux_min_input_signal = min;
2165 }
2166
2167 void amdgpu_dm_update_connector_after_detect(
2168                 struct amdgpu_dm_connector *aconnector)
2169 {
2170         struct drm_connector *connector = &aconnector->base;
2171         struct drm_device *dev = connector->dev;
2172         struct dc_sink *sink;
2173
2174         /* MST handled by drm_mst framework */
2175         if (aconnector->mst_mgr.mst_state == true)
2176                 return;
2177
2178         sink = aconnector->dc_link->local_sink;
2179         if (sink)
2180                 dc_sink_retain(sink);
2181
2182         /*
2183          * Edid mgmt connector gets first update only in mode_valid hook and then
2184          * the connector sink is set to either fake or physical sink depends on link status.
2185          * Skip if already done during boot.
2186          */
2187         if (aconnector->base.force != DRM_FORCE_UNSPECIFIED
2188                         && aconnector->dc_em_sink) {
2189
2190                 /*
2191                  * For S3 resume with headless use eml_sink to fake stream
2192                  * because on resume connector->sink is set to NULL
2193                  */
2194                 mutex_lock(&dev->mode_config.mutex);
2195
2196                 if (sink) {
2197                         if (aconnector->dc_sink) {
2198                                 amdgpu_dm_update_freesync_caps(connector, NULL);
2199                                 /*
2200                                  * retain and release below are used to
2201                                  * bump up refcount for sink because the link doesn't point
2202                                  * to it anymore after disconnect, so on next crtc to connector
2203                                  * reshuffle by UMD we will get into unwanted dc_sink release
2204                                  */
2205                                 dc_sink_release(aconnector->dc_sink);
2206                         }
2207                         aconnector->dc_sink = sink;
2208                         dc_sink_retain(aconnector->dc_sink);
2209                         amdgpu_dm_update_freesync_caps(connector,
2210                                         aconnector->edid);
2211                 } else {
2212                         amdgpu_dm_update_freesync_caps(connector, NULL);
2213                         if (!aconnector->dc_sink) {
2214                                 aconnector->dc_sink = aconnector->dc_em_sink;
2215                                 dc_sink_retain(aconnector->dc_sink);
2216                         }
2217                 }
2218
2219                 mutex_unlock(&dev->mode_config.mutex);
2220
2221                 if (sink)
2222                         dc_sink_release(sink);
2223                 return;
2224         }
2225
2226         /*
2227          * TODO: temporary guard to look for proper fix
2228          * if this sink is MST sink, we should not do anything
2229          */
2230         if (sink && sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT_MST) {
2231                 dc_sink_release(sink);
2232                 return;
2233         }
2234
2235         if (aconnector->dc_sink == sink) {
2236                 /*
2237                  * We got a DP short pulse (Link Loss, DP CTS, etc...).
2238                  * Do nothing!!
2239                  */
2240                 DRM_DEBUG_DRIVER("DCHPD: connector_id=%d: dc_sink didn't change.\n",
2241                                 aconnector->connector_id);
2242                 if (sink)
2243                         dc_sink_release(sink);
2244                 return;
2245         }
2246
2247         DRM_DEBUG_DRIVER("DCHPD: connector_id=%d: Old sink=%p New sink=%p\n",
2248                 aconnector->connector_id, aconnector->dc_sink, sink);
2249
2250         mutex_lock(&dev->mode_config.mutex);
2251
2252         /*
2253          * 1. Update status of the drm connector
2254          * 2. Send an event and let userspace tell us what to do
2255          */
2256         if (sink) {
2257                 /*
2258                  * TODO: check if we still need the S3 mode update workaround.
2259                  * If yes, put it here.
2260                  */
2261                 if (aconnector->dc_sink)
2262                         amdgpu_dm_update_freesync_caps(connector, NULL);
2263
2264                 aconnector->dc_sink = sink;
2265                 dc_sink_retain(aconnector->dc_sink);
2266                 if (sink->dc_edid.length == 0) {
2267                         aconnector->edid = NULL;
2268                         if (aconnector->dc_link->aux_mode) {
2269                                 drm_dp_cec_unset_edid(
2270                                         &aconnector->dm_dp_aux.aux);
2271                         }
2272                 } else {
2273                         aconnector->edid =
2274                                 (struct edid *)sink->dc_edid.raw_edid;
2275
2276                         drm_connector_update_edid_property(connector,
2277                                                            aconnector->edid);
2278                         drm_add_edid_modes(connector, aconnector->edid);
2279
2280                         if (aconnector->dc_link->aux_mode)
2281                                 drm_dp_cec_set_edid(&aconnector->dm_dp_aux.aux,
2282                                                     aconnector->edid);
2283                 }
2284
2285                 amdgpu_dm_update_freesync_caps(connector, aconnector->edid);
2286                 update_connector_ext_caps(aconnector);
2287         } else {
2288                 drm_dp_cec_unset_edid(&aconnector->dm_dp_aux.aux);
2289                 amdgpu_dm_update_freesync_caps(connector, NULL);
2290                 drm_connector_update_edid_property(connector, NULL);
2291                 aconnector->num_modes = 0;
2292                 dc_sink_release(aconnector->dc_sink);
2293                 aconnector->dc_sink = NULL;
2294                 aconnector->edid = NULL;
2295 #ifdef CONFIG_DRM_AMD_DC_HDCP
2296                 /* Set CP to DESIRED if it was ENABLED, so we can re-enable it again on hotplug */
2297                 if (connector->state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED)
2298                         connector->state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED;
2299 #endif
2300         }
2301
2302         mutex_unlock(&dev->mode_config.mutex);
2303
2304         update_subconnector_property(aconnector);
2305
2306         if (sink)
2307                 dc_sink_release(sink);
2308 }
2309
2310 static void handle_hpd_irq(void *param)
2311 {
2312         struct amdgpu_dm_connector *aconnector = (struct amdgpu_dm_connector *)param;
2313         struct drm_connector *connector = &aconnector->base;
2314         struct drm_device *dev = connector->dev;
2315         enum dc_connection_type new_connection_type = dc_connection_none;
2316 #ifdef CONFIG_DRM_AMD_DC_HDCP
2317         struct amdgpu_device *adev = drm_to_adev(dev);
2318 #endif
2319
2320         /*
2321          * In case of failure or MST no need to update connector status or notify the OS
2322          * since (for MST case) MST does this in its own context.
2323          */
2324         mutex_lock(&aconnector->hpd_lock);
2325
2326 #ifdef CONFIG_DRM_AMD_DC_HDCP
2327         if (adev->dm.hdcp_workqueue)
2328                 hdcp_reset_display(adev->dm.hdcp_workqueue, aconnector->dc_link->link_index);
2329 #endif
2330         if (aconnector->fake_enable)
2331                 aconnector->fake_enable = false;
2332
2333         if (!dc_link_detect_sink(aconnector->dc_link, &new_connection_type))
2334                 DRM_ERROR("KMS: Failed to detect connector\n");
2335
2336         if (aconnector->base.force && new_connection_type == dc_connection_none) {
2337                 emulated_link_detect(aconnector->dc_link);
2338
2339
2340                 drm_modeset_lock_all(dev);
2341                 dm_restore_drm_connector_state(dev, connector);
2342                 drm_modeset_unlock_all(dev);
2343
2344                 if (aconnector->base.force == DRM_FORCE_UNSPECIFIED)
2345                         drm_kms_helper_hotplug_event(dev);
2346
2347         } else if (dc_link_detect(aconnector->dc_link, DETECT_REASON_HPD)) {
2348                 amdgpu_dm_update_connector_after_detect(aconnector);
2349
2350
2351                 drm_modeset_lock_all(dev);
2352                 dm_restore_drm_connector_state(dev, connector);
2353                 drm_modeset_unlock_all(dev);
2354
2355                 if (aconnector->base.force == DRM_FORCE_UNSPECIFIED)
2356                         drm_kms_helper_hotplug_event(dev);
2357         }
2358         mutex_unlock(&aconnector->hpd_lock);
2359
2360 }
2361
2362 static void dm_handle_hpd_rx_irq(struct amdgpu_dm_connector *aconnector)
2363 {
2364         uint8_t esi[DP_PSR_ERROR_STATUS - DP_SINK_COUNT_ESI] = { 0 };
2365         uint8_t dret;
2366         bool new_irq_handled = false;
2367         int dpcd_addr;
2368         int dpcd_bytes_to_read;
2369
2370         const int max_process_count = 30;
2371         int process_count = 0;
2372
2373         const struct dc_link_status *link_status = dc_link_get_status(aconnector->dc_link);
2374
2375         if (link_status->dpcd_caps->dpcd_rev.raw < 0x12) {
2376                 dpcd_bytes_to_read = DP_LANE0_1_STATUS - DP_SINK_COUNT;
2377                 /* DPCD 0x200 - 0x201 for downstream IRQ */
2378                 dpcd_addr = DP_SINK_COUNT;
2379         } else {
2380                 dpcd_bytes_to_read = DP_PSR_ERROR_STATUS - DP_SINK_COUNT_ESI;
2381                 /* DPCD 0x2002 - 0x2005 for downstream IRQ */
2382                 dpcd_addr = DP_SINK_COUNT_ESI;
2383         }
2384
2385         dret = drm_dp_dpcd_read(
2386                 &aconnector->dm_dp_aux.aux,
2387                 dpcd_addr,
2388                 esi,
2389                 dpcd_bytes_to_read);
2390
2391         while (dret == dpcd_bytes_to_read &&
2392                 process_count < max_process_count) {
2393                 uint8_t retry;
2394                 dret = 0;
2395
2396                 process_count++;
2397
2398                 DRM_DEBUG_DRIVER("ESI %02x %02x %02x\n", esi[0], esi[1], esi[2]);
2399                 /* handle HPD short pulse irq */
2400                 if (aconnector->mst_mgr.mst_state)
2401                         drm_dp_mst_hpd_irq(
2402                                 &aconnector->mst_mgr,
2403                                 esi,
2404                                 &new_irq_handled);
2405
2406                 if (new_irq_handled) {
2407                         /* ACK at DPCD to notify down stream */
2408                         const int ack_dpcd_bytes_to_write =
2409                                 dpcd_bytes_to_read - 1;
2410
2411                         for (retry = 0; retry < 3; retry++) {
2412                                 uint8_t wret;
2413
2414                                 wret = drm_dp_dpcd_write(
2415                                         &aconnector->dm_dp_aux.aux,
2416                                         dpcd_addr + 1,
2417                                         &esi[1],
2418                                         ack_dpcd_bytes_to_write);
2419                                 if (wret == ack_dpcd_bytes_to_write)
2420                                         break;
2421                         }
2422
2423                         /* check if there is new irq to be handled */
2424                         dret = drm_dp_dpcd_read(
2425                                 &aconnector->dm_dp_aux.aux,
2426                                 dpcd_addr,
2427                                 esi,
2428                                 dpcd_bytes_to_read);
2429
2430                         new_irq_handled = false;
2431                 } else {
2432                         break;
2433                 }
2434         }
2435
2436         if (process_count == max_process_count)
2437                 DRM_DEBUG_DRIVER("Loop exceeded max iterations\n");
2438 }
2439
2440 static void handle_hpd_rx_irq(void *param)
2441 {
2442         struct amdgpu_dm_connector *aconnector = (struct amdgpu_dm_connector *)param;
2443         struct drm_connector *connector = &aconnector->base;
2444         struct drm_device *dev = connector->dev;
2445         struct dc_link *dc_link = aconnector->dc_link;
2446         bool is_mst_root_connector = aconnector->mst_mgr.mst_state;
2447         enum dc_connection_type new_connection_type = dc_connection_none;
2448 #ifdef CONFIG_DRM_AMD_DC_HDCP
2449         union hpd_irq_data hpd_irq_data;
2450         struct amdgpu_device *adev = drm_to_adev(dev);
2451
2452         memset(&hpd_irq_data, 0, sizeof(hpd_irq_data));
2453 #endif
2454
2455         /*
2456          * TODO:Temporary add mutex to protect hpd interrupt not have a gpio
2457          * conflict, after implement i2c helper, this mutex should be
2458          * retired.
2459          */
2460         if (dc_link->type != dc_connection_mst_branch)
2461                 mutex_lock(&aconnector->hpd_lock);
2462
2463
2464 #ifdef CONFIG_DRM_AMD_DC_HDCP
2465         if (dc_link_handle_hpd_rx_irq(dc_link, &hpd_irq_data, NULL) &&
2466 #else
2467         if (dc_link_handle_hpd_rx_irq(dc_link, NULL, NULL) &&
2468 #endif
2469                         !is_mst_root_connector) {
2470                 /* Downstream Port status changed. */
2471                 if (!dc_link_detect_sink(dc_link, &new_connection_type))
2472                         DRM_ERROR("KMS: Failed to detect connector\n");
2473
2474                 if (aconnector->base.force && new_connection_type == dc_connection_none) {
2475                         emulated_link_detect(dc_link);
2476
2477                         if (aconnector->fake_enable)
2478                                 aconnector->fake_enable = false;
2479
2480                         amdgpu_dm_update_connector_after_detect(aconnector);
2481
2482
2483                         drm_modeset_lock_all(dev);
2484                         dm_restore_drm_connector_state(dev, connector);
2485                         drm_modeset_unlock_all(dev);
2486
2487                         drm_kms_helper_hotplug_event(dev);
2488                 } else if (dc_link_detect(dc_link, DETECT_REASON_HPDRX)) {
2489
2490                         if (aconnector->fake_enable)
2491                                 aconnector->fake_enable = false;
2492
2493                         amdgpu_dm_update_connector_after_detect(aconnector);
2494
2495
2496                         drm_modeset_lock_all(dev);
2497                         dm_restore_drm_connector_state(dev, connector);
2498                         drm_modeset_unlock_all(dev);
2499
2500                         drm_kms_helper_hotplug_event(dev);
2501                 }
2502         }
2503 #ifdef CONFIG_DRM_AMD_DC_HDCP
2504         if (hpd_irq_data.bytes.device_service_irq.bits.CP_IRQ) {
2505                 if (adev->dm.hdcp_workqueue)
2506                         hdcp_handle_cpirq(adev->dm.hdcp_workqueue,  aconnector->base.index);
2507         }
2508 #endif
2509         if ((dc_link->cur_link_settings.lane_count != LANE_COUNT_UNKNOWN) ||
2510             (dc_link->type == dc_connection_mst_branch))
2511                 dm_handle_hpd_rx_irq(aconnector);
2512
2513         if (dc_link->type != dc_connection_mst_branch) {
2514                 drm_dp_cec_irq(&aconnector->dm_dp_aux.aux);
2515                 mutex_unlock(&aconnector->hpd_lock);
2516         }
2517 }
2518
2519 static void register_hpd_handlers(struct amdgpu_device *adev)
2520 {
2521         struct drm_device *dev = adev_to_drm(adev);
2522         struct drm_connector *connector;
2523         struct amdgpu_dm_connector *aconnector;
2524         const struct dc_link *dc_link;
2525         struct dc_interrupt_params int_params = {0};
2526
2527         int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
2528         int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
2529
2530         list_for_each_entry(connector,
2531                         &dev->mode_config.connector_list, head) {
2532
2533                 aconnector = to_amdgpu_dm_connector(connector);
2534                 dc_link = aconnector->dc_link;
2535
2536                 if (DC_IRQ_SOURCE_INVALID != dc_link->irq_source_hpd) {
2537                         int_params.int_context = INTERRUPT_LOW_IRQ_CONTEXT;
2538                         int_params.irq_source = dc_link->irq_source_hpd;
2539
2540                         amdgpu_dm_irq_register_interrupt(adev, &int_params,
2541                                         handle_hpd_irq,
2542                                         (void *) aconnector);
2543                 }
2544
2545                 if (DC_IRQ_SOURCE_INVALID != dc_link->irq_source_hpd_rx) {
2546
2547                         /* Also register for DP short pulse (hpd_rx). */
2548                         int_params.int_context = INTERRUPT_LOW_IRQ_CONTEXT;
2549                         int_params.irq_source = dc_link->irq_source_hpd_rx;
2550
2551                         amdgpu_dm_irq_register_interrupt(adev, &int_params,
2552                                         handle_hpd_rx_irq,
2553                                         (void *) aconnector);
2554                 }
2555         }
2556 }
2557
2558 #if defined(CONFIG_DRM_AMD_DC_SI)
2559 /* Register IRQ sources and initialize IRQ callbacks */
2560 static int dce60_register_irq_handlers(struct amdgpu_device *adev)
2561 {
2562         struct dc *dc = adev->dm.dc;
2563         struct common_irq_params *c_irq_params;
2564         struct dc_interrupt_params int_params = {0};
2565         int r;
2566         int i;
2567         unsigned client_id = AMDGPU_IRQ_CLIENTID_LEGACY;
2568
2569         int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
2570         int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
2571
2572         /*
2573          * Actions of amdgpu_irq_add_id():
2574          * 1. Register a set() function with base driver.
2575          *    Base driver will call set() function to enable/disable an
2576          *    interrupt in DC hardware.
2577          * 2. Register amdgpu_dm_irq_handler().
2578          *    Base driver will call amdgpu_dm_irq_handler() for ALL interrupts
2579          *    coming from DC hardware.
2580          *    amdgpu_dm_irq_handler() will re-direct the interrupt to DC
2581          *    for acknowledging and handling. */
2582
2583         /* Use VBLANK interrupt */
2584         for (i = 0; i < adev->mode_info.num_crtc; i++) {
2585                 r = amdgpu_irq_add_id(adev, client_id, i+1 , &adev->crtc_irq);
2586                 if (r) {
2587                         DRM_ERROR("Failed to add crtc irq id!\n");
2588                         return r;
2589                 }
2590
2591                 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
2592                 int_params.irq_source =
2593                         dc_interrupt_to_irq_source(dc, i+1 , 0);
2594
2595                 c_irq_params = &adev->dm.vblank_params[int_params.irq_source - DC_IRQ_SOURCE_VBLANK1];
2596
2597                 c_irq_params->adev = adev;
2598                 c_irq_params->irq_src = int_params.irq_source;
2599
2600                 amdgpu_dm_irq_register_interrupt(adev, &int_params,
2601                                 dm_crtc_high_irq, c_irq_params);
2602         }
2603
2604         /* Use GRPH_PFLIP interrupt */
2605         for (i = VISLANDS30_IV_SRCID_D1_GRPH_PFLIP;
2606                         i <= VISLANDS30_IV_SRCID_D6_GRPH_PFLIP; i += 2) {
2607                 r = amdgpu_irq_add_id(adev, client_id, i, &adev->pageflip_irq);
2608                 if (r) {
2609                         DRM_ERROR("Failed to add page flip irq id!\n");
2610                         return r;
2611                 }
2612
2613                 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
2614                 int_params.irq_source =
2615                         dc_interrupt_to_irq_source(dc, i, 0);
2616
2617                 c_irq_params = &adev->dm.pflip_params[int_params.irq_source - DC_IRQ_SOURCE_PFLIP_FIRST];
2618
2619                 c_irq_params->adev = adev;
2620                 c_irq_params->irq_src = int_params.irq_source;
2621
2622                 amdgpu_dm_irq_register_interrupt(adev, &int_params,
2623                                 dm_pflip_high_irq, c_irq_params);
2624
2625         }
2626
2627         /* HPD */
2628         r = amdgpu_irq_add_id(adev, client_id,
2629                         VISLANDS30_IV_SRCID_HOTPLUG_DETECT_A, &adev->hpd_irq);
2630         if (r) {
2631                 DRM_ERROR("Failed to add hpd irq id!\n");
2632                 return r;
2633         }
2634
2635         register_hpd_handlers(adev);
2636
2637         return 0;
2638 }
2639 #endif
2640
2641 /* Register IRQ sources and initialize IRQ callbacks */
2642 static int dce110_register_irq_handlers(struct amdgpu_device *adev)
2643 {
2644         struct dc *dc = adev->dm.dc;
2645         struct common_irq_params *c_irq_params;
2646         struct dc_interrupt_params int_params = {0};
2647         int r;
2648         int i;
2649         unsigned client_id = AMDGPU_IRQ_CLIENTID_LEGACY;
2650
2651         if (adev->asic_type >= CHIP_VEGA10)
2652                 client_id = SOC15_IH_CLIENTID_DCE;
2653
2654         int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
2655         int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
2656
2657         /*
2658          * Actions of amdgpu_irq_add_id():
2659          * 1. Register a set() function with base driver.
2660          *    Base driver will call set() function to enable/disable an
2661          *    interrupt in DC hardware.
2662          * 2. Register amdgpu_dm_irq_handler().
2663          *    Base driver will call amdgpu_dm_irq_handler() for ALL interrupts
2664          *    coming from DC hardware.
2665          *    amdgpu_dm_irq_handler() will re-direct the interrupt to DC
2666          *    for acknowledging and handling. */
2667
2668         /* Use VBLANK interrupt */
2669         for (i = VISLANDS30_IV_SRCID_D1_VERTICAL_INTERRUPT0; i <= VISLANDS30_IV_SRCID_D6_VERTICAL_INTERRUPT0; i++) {
2670                 r = amdgpu_irq_add_id(adev, client_id, i, &adev->crtc_irq);
2671                 if (r) {
2672                         DRM_ERROR("Failed to add crtc irq id!\n");
2673                         return r;
2674                 }
2675
2676                 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
2677                 int_params.irq_source =
2678                         dc_interrupt_to_irq_source(dc, i, 0);
2679
2680                 c_irq_params = &adev->dm.vblank_params[int_params.irq_source - DC_IRQ_SOURCE_VBLANK1];
2681
2682                 c_irq_params->adev = adev;
2683                 c_irq_params->irq_src = int_params.irq_source;
2684
2685                 amdgpu_dm_irq_register_interrupt(adev, &int_params,
2686                                 dm_crtc_high_irq, c_irq_params);
2687         }
2688
2689         /* Use VUPDATE interrupt */
2690         for (i = VISLANDS30_IV_SRCID_D1_V_UPDATE_INT; i <= VISLANDS30_IV_SRCID_D6_V_UPDATE_INT; i += 2) {
2691                 r = amdgpu_irq_add_id(adev, client_id, i, &adev->vupdate_irq);
2692                 if (r) {
2693                         DRM_ERROR("Failed to add vupdate irq id!\n");
2694                         return r;
2695                 }
2696
2697                 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
2698                 int_params.irq_source =
2699                         dc_interrupt_to_irq_source(dc, i, 0);
2700
2701                 c_irq_params = &adev->dm.vupdate_params[int_params.irq_source - DC_IRQ_SOURCE_VUPDATE1];
2702
2703                 c_irq_params->adev = adev;
2704                 c_irq_params->irq_src = int_params.irq_source;
2705
2706                 amdgpu_dm_irq_register_interrupt(adev, &int_params,
2707                                 dm_vupdate_high_irq, c_irq_params);
2708         }
2709
2710         /* Use GRPH_PFLIP interrupt */
2711         for (i = VISLANDS30_IV_SRCID_D1_GRPH_PFLIP;
2712                         i <= VISLANDS30_IV_SRCID_D6_GRPH_PFLIP; i += 2) {
2713                 r = amdgpu_irq_add_id(adev, client_id, i, &adev->pageflip_irq);
2714                 if (r) {
2715                         DRM_ERROR("Failed to add page flip irq id!\n");
2716                         return r;
2717                 }
2718
2719                 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
2720                 int_params.irq_source =
2721                         dc_interrupt_to_irq_source(dc, i, 0);
2722
2723                 c_irq_params = &adev->dm.pflip_params[int_params.irq_source - DC_IRQ_SOURCE_PFLIP_FIRST];
2724
2725                 c_irq_params->adev = adev;
2726                 c_irq_params->irq_src = int_params.irq_source;
2727
2728                 amdgpu_dm_irq_register_interrupt(adev, &int_params,
2729                                 dm_pflip_high_irq, c_irq_params);
2730
2731         }
2732
2733         /* HPD */
2734         r = amdgpu_irq_add_id(adev, client_id,
2735                         VISLANDS30_IV_SRCID_HOTPLUG_DETECT_A, &adev->hpd_irq);
2736         if (r) {
2737                 DRM_ERROR("Failed to add hpd irq id!\n");
2738                 return r;
2739         }
2740
2741         register_hpd_handlers(adev);
2742
2743         return 0;
2744 }
2745
2746 #if defined(CONFIG_DRM_AMD_DC_DCN)
2747 /* Register IRQ sources and initialize IRQ callbacks */
2748 static int dcn10_register_irq_handlers(struct amdgpu_device *adev)
2749 {
2750         struct dc *dc = adev->dm.dc;
2751         struct common_irq_params *c_irq_params;
2752         struct dc_interrupt_params int_params = {0};
2753         int r;
2754         int i;
2755
2756         int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
2757         int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
2758
2759         /*
2760          * Actions of amdgpu_irq_add_id():
2761          * 1. Register a set() function with base driver.
2762          *    Base driver will call set() function to enable/disable an
2763          *    interrupt in DC hardware.
2764          * 2. Register amdgpu_dm_irq_handler().
2765          *    Base driver will call amdgpu_dm_irq_handler() for ALL interrupts
2766          *    coming from DC hardware.
2767          *    amdgpu_dm_irq_handler() will re-direct the interrupt to DC
2768          *    for acknowledging and handling.
2769          */
2770
2771         /* Use VSTARTUP interrupt */
2772         for (i = DCN_1_0__SRCID__DC_D1_OTG_VSTARTUP;
2773                         i <= DCN_1_0__SRCID__DC_D1_OTG_VSTARTUP + adev->mode_info.num_crtc - 1;
2774                         i++) {
2775                 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, i, &adev->crtc_irq);
2776
2777                 if (r) {
2778                         DRM_ERROR("Failed to add crtc irq id!\n");
2779                         return r;
2780                 }
2781
2782                 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
2783                 int_params.irq_source =
2784                         dc_interrupt_to_irq_source(dc, i, 0);
2785
2786                 c_irq_params = &adev->dm.vblank_params[int_params.irq_source - DC_IRQ_SOURCE_VBLANK1];
2787
2788                 c_irq_params->adev = adev;
2789                 c_irq_params->irq_src = int_params.irq_source;
2790
2791                 amdgpu_dm_irq_register_interrupt(
2792                         adev, &int_params, dm_crtc_high_irq, c_irq_params);
2793         }
2794
2795         /* Use VUPDATE_NO_LOCK interrupt on DCN, which seems to correspond to
2796          * the regular VUPDATE interrupt on DCE. We want DC_IRQ_SOURCE_VUPDATEx
2797          * to trigger at end of each vblank, regardless of state of the lock,
2798          * matching DCE behaviour.
2799          */
2800         for (i = DCN_1_0__SRCID__OTG0_IHC_V_UPDATE_NO_LOCK_INTERRUPT;
2801              i <= DCN_1_0__SRCID__OTG0_IHC_V_UPDATE_NO_LOCK_INTERRUPT + adev->mode_info.num_crtc - 1;
2802              i++) {
2803                 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, i, &adev->vupdate_irq);
2804
2805                 if (r) {
2806                         DRM_ERROR("Failed to add vupdate irq id!\n");
2807                         return r;
2808                 }
2809
2810                 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
2811                 int_params.irq_source =
2812                         dc_interrupt_to_irq_source(dc, i, 0);
2813
2814                 c_irq_params = &adev->dm.vupdate_params[int_params.irq_source - DC_IRQ_SOURCE_VUPDATE1];
2815
2816                 c_irq_params->adev = adev;
2817                 c_irq_params->irq_src = int_params.irq_source;
2818
2819                 amdgpu_dm_irq_register_interrupt(adev, &int_params,
2820                                 dm_vupdate_high_irq, c_irq_params);
2821         }
2822
2823         /* Use GRPH_PFLIP interrupt */
2824         for (i = DCN_1_0__SRCID__HUBP0_FLIP_INTERRUPT;
2825                         i <= DCN_1_0__SRCID__HUBP0_FLIP_INTERRUPT + adev->mode_info.num_crtc - 1;
2826                         i++) {
2827                 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, i, &adev->pageflip_irq);
2828                 if (r) {
2829                         DRM_ERROR("Failed to add page flip irq id!\n");
2830                         return r;
2831                 }
2832
2833                 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
2834                 int_params.irq_source =
2835                         dc_interrupt_to_irq_source(dc, i, 0);
2836
2837                 c_irq_params = &adev->dm.pflip_params[int_params.irq_source - DC_IRQ_SOURCE_PFLIP_FIRST];
2838
2839                 c_irq_params->adev = adev;
2840                 c_irq_params->irq_src = int_params.irq_source;
2841
2842                 amdgpu_dm_irq_register_interrupt(adev, &int_params,
2843                                 dm_pflip_high_irq, c_irq_params);
2844
2845         }
2846
2847         /* HPD */
2848         r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, DCN_1_0__SRCID__DC_HPD1_INT,
2849                         &adev->hpd_irq);
2850         if (r) {
2851                 DRM_ERROR("Failed to add hpd irq id!\n");
2852                 return r;
2853         }
2854
2855         register_hpd_handlers(adev);
2856
2857         return 0;
2858 }
2859 #endif
2860
2861 /*
2862  * Acquires the lock for the atomic state object and returns
2863  * the new atomic state.
2864  *
2865  * This should only be called during atomic check.
2866  */
2867 static int dm_atomic_get_state(struct drm_atomic_state *state,
2868                                struct dm_atomic_state **dm_state)
2869 {
2870         struct drm_device *dev = state->dev;
2871         struct amdgpu_device *adev = drm_to_adev(dev);
2872         struct amdgpu_display_manager *dm = &adev->dm;
2873         struct drm_private_state *priv_state;
2874
2875         if (*dm_state)
2876                 return 0;
2877
2878         priv_state = drm_atomic_get_private_obj_state(state, &dm->atomic_obj);
2879         if (IS_ERR(priv_state))
2880                 return PTR_ERR(priv_state);
2881
2882         *dm_state = to_dm_atomic_state(priv_state);
2883
2884         return 0;
2885 }
2886
2887 static struct dm_atomic_state *
2888 dm_atomic_get_new_state(struct drm_atomic_state *state)
2889 {
2890         struct drm_device *dev = state->dev;
2891         struct amdgpu_device *adev = drm_to_adev(dev);
2892         struct amdgpu_display_manager *dm = &adev->dm;
2893         struct drm_private_obj *obj;
2894         struct drm_private_state *new_obj_state;
2895         int i;
2896
2897         for_each_new_private_obj_in_state(state, obj, new_obj_state, i) {
2898                 if (obj->funcs == dm->atomic_obj.funcs)
2899                         return to_dm_atomic_state(new_obj_state);
2900         }
2901
2902         return NULL;
2903 }
2904
2905 static struct drm_private_state *
2906 dm_atomic_duplicate_state(struct drm_private_obj *obj)
2907 {
2908         struct dm_atomic_state *old_state, *new_state;
2909
2910         new_state = kzalloc(sizeof(*new_state), GFP_KERNEL);
2911         if (!new_state)
2912                 return NULL;
2913
2914         __drm_atomic_helper_private_obj_duplicate_state(obj, &new_state->base);
2915
2916         old_state = to_dm_atomic_state(obj->state);
2917
2918         if (old_state && old_state->context)
2919                 new_state->context = dc_copy_state(old_state->context);
2920
2921         if (!new_state->context) {
2922                 kfree(new_state);
2923                 return NULL;
2924         }
2925
2926         return &new_state->base;
2927 }
2928
2929 static void dm_atomic_destroy_state(struct drm_private_obj *obj,
2930                                     struct drm_private_state *state)
2931 {
2932         struct dm_atomic_state *dm_state = to_dm_atomic_state(state);
2933
2934         if (dm_state && dm_state->context)
2935                 dc_release_state(dm_state->context);
2936
2937         kfree(dm_state);
2938 }
2939
2940 static struct drm_private_state_funcs dm_atomic_state_funcs = {
2941         .atomic_duplicate_state = dm_atomic_duplicate_state,
2942         .atomic_destroy_state = dm_atomic_destroy_state,
2943 };
2944
2945 static int amdgpu_dm_mode_config_init(struct amdgpu_device *adev)
2946 {
2947         struct dm_atomic_state *state;
2948         int r;
2949
2950         adev->mode_info.mode_config_initialized = true;
2951
2952         adev_to_drm(adev)->mode_config.funcs = (void *)&amdgpu_dm_mode_funcs;
2953         adev_to_drm(adev)->mode_config.helper_private = &amdgpu_dm_mode_config_helperfuncs;
2954
2955         adev_to_drm(adev)->mode_config.max_width = 16384;
2956         adev_to_drm(adev)->mode_config.max_height = 16384;
2957
2958         adev_to_drm(adev)->mode_config.preferred_depth = 24;
2959         adev_to_drm(adev)->mode_config.prefer_shadow = 1;
2960         /* indicates support for immediate flip */
2961         adev_to_drm(adev)->mode_config.async_page_flip = true;
2962
2963         adev_to_drm(adev)->mode_config.fb_base = adev->gmc.aper_base;
2964
2965         state = kzalloc(sizeof(*state), GFP_KERNEL);
2966         if (!state)
2967                 return -ENOMEM;
2968
2969         state->context = dc_create_state(adev->dm.dc);
2970         if (!state->context) {
2971                 kfree(state);
2972                 return -ENOMEM;
2973         }
2974
2975         dc_resource_state_copy_construct_current(adev->dm.dc, state->context);
2976
2977         drm_atomic_private_obj_init(adev_to_drm(adev),
2978                                     &adev->dm.atomic_obj,
2979                                     &state->base,
2980                                     &dm_atomic_state_funcs);
2981
2982         r = amdgpu_display_modeset_create_props(adev);
2983         if (r) {
2984                 dc_release_state(state->context);
2985                 kfree(state);
2986                 return r;
2987         }
2988
2989         r = amdgpu_dm_audio_init(adev);
2990         if (r) {
2991                 dc_release_state(state->context);
2992                 kfree(state);
2993                 return r;
2994         }
2995
2996         return 0;
2997 }
2998
2999 #define AMDGPU_DM_DEFAULT_MIN_BACKLIGHT 12
3000 #define AMDGPU_DM_DEFAULT_MAX_BACKLIGHT 255
3001 #define AUX_BL_DEFAULT_TRANSITION_TIME_MS 50
3002
3003 #if defined(CONFIG_BACKLIGHT_CLASS_DEVICE) ||\
3004         defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE)
3005
3006 static void amdgpu_dm_update_backlight_caps(struct amdgpu_display_manager *dm)
3007 {
3008 #if defined(CONFIG_ACPI)
3009         struct amdgpu_dm_backlight_caps caps;
3010
3011         memset(&caps, 0, sizeof(caps));
3012
3013         if (dm->backlight_caps.caps_valid)
3014                 return;
3015
3016         amdgpu_acpi_get_backlight_caps(dm->adev, &caps);
3017         if (caps.caps_valid) {
3018                 dm->backlight_caps.caps_valid = true;
3019                 if (caps.aux_support)
3020                         return;
3021                 dm->backlight_caps.min_input_signal = caps.min_input_signal;
3022                 dm->backlight_caps.max_input_signal = caps.max_input_signal;
3023         } else {
3024                 dm->backlight_caps.min_input_signal =
3025                                 AMDGPU_DM_DEFAULT_MIN_BACKLIGHT;
3026                 dm->backlight_caps.max_input_signal =
3027                                 AMDGPU_DM_DEFAULT_MAX_BACKLIGHT;
3028         }
3029 #else
3030         if (dm->backlight_caps.aux_support)
3031                 return;
3032
3033         dm->backlight_caps.min_input_signal = AMDGPU_DM_DEFAULT_MIN_BACKLIGHT;
3034         dm->backlight_caps.max_input_signal = AMDGPU_DM_DEFAULT_MAX_BACKLIGHT;
3035 #endif
3036 }
3037
3038 static int set_backlight_via_aux(struct dc_link *link, uint32_t brightness)
3039 {
3040         bool rc;
3041
3042         if (!link)
3043                 return 1;
3044
3045         rc = dc_link_set_backlight_level_nits(link, true, brightness,
3046                                               AUX_BL_DEFAULT_TRANSITION_TIME_MS);
3047
3048         return rc ? 0 : 1;
3049 }
3050
3051 static int get_brightness_range(const struct amdgpu_dm_backlight_caps *caps,
3052                                 unsigned *min, unsigned *max)
3053 {
3054         if (!caps)
3055                 return 0;
3056
3057         if (caps->aux_support) {
3058                 // Firmware limits are in nits, DC API wants millinits.
3059                 *max = 1000 * caps->aux_max_input_signal;
3060                 *min = 1000 * caps->aux_min_input_signal;
3061         } else {
3062                 // Firmware limits are 8-bit, PWM control is 16-bit.
3063                 *max = 0x101 * caps->max_input_signal;
3064                 *min = 0x101 * caps->min_input_signal;
3065         }
3066         return 1;
3067 }
3068
3069 static u32 convert_brightness_from_user(const struct amdgpu_dm_backlight_caps *caps,
3070                                         uint32_t brightness)
3071 {
3072         unsigned min, max;
3073
3074         if (!get_brightness_range(caps, &min, &max))
3075                 return brightness;
3076
3077         // Rescale 0..255 to min..max
3078         return min + DIV_ROUND_CLOSEST((max - min) * brightness,
3079                                        AMDGPU_MAX_BL_LEVEL);
3080 }
3081
3082 static u32 convert_brightness_to_user(const struct amdgpu_dm_backlight_caps *caps,
3083                                       uint32_t brightness)
3084 {
3085         unsigned min, max;
3086
3087         if (!get_brightness_range(caps, &min, &max))
3088                 return brightness;
3089
3090         if (brightness < min)
3091                 return 0;
3092         // Rescale min..max to 0..255
3093         return DIV_ROUND_CLOSEST(AMDGPU_MAX_BL_LEVEL * (brightness - min),
3094                                  max - min);
3095 }
3096
3097 static int amdgpu_dm_backlight_update_status(struct backlight_device *bd)
3098 {
3099         struct amdgpu_display_manager *dm = bl_get_data(bd);
3100         struct amdgpu_dm_backlight_caps caps;
3101         struct dc_link *link = NULL;
3102         u32 brightness;
3103         bool rc;
3104
3105         amdgpu_dm_update_backlight_caps(dm);
3106         caps = dm->backlight_caps;
3107
3108         link = (struct dc_link *)dm->backlight_link;
3109
3110         brightness = convert_brightness_from_user(&caps, bd->props.brightness);
3111         // Change brightness based on AUX property
3112         if (caps.aux_support)
3113                 return set_backlight_via_aux(link, brightness);
3114
3115         rc = dc_link_set_backlight_level(dm->backlight_link, brightness, 0);
3116
3117         return rc ? 0 : 1;
3118 }
3119
3120 static int amdgpu_dm_backlight_get_brightness(struct backlight_device *bd)
3121 {
3122         struct amdgpu_display_manager *dm = bl_get_data(bd);
3123         int ret = dc_link_get_backlight_level(dm->backlight_link);
3124
3125         if (ret == DC_ERROR_UNEXPECTED)
3126                 return bd->props.brightness;
3127         return convert_brightness_to_user(&dm->backlight_caps, ret);
3128 }
3129
3130 static const struct backlight_ops amdgpu_dm_backlight_ops = {
3131         .options = BL_CORE_SUSPENDRESUME,
3132         .get_brightness = amdgpu_dm_backlight_get_brightness,
3133         .update_status  = amdgpu_dm_backlight_update_status,
3134 };
3135
3136 static void
3137 amdgpu_dm_register_backlight_device(struct amdgpu_display_manager *dm)
3138 {
3139         char bl_name[16];
3140         struct backlight_properties props = { 0 };
3141
3142         amdgpu_dm_update_backlight_caps(dm);
3143
3144         props.max_brightness = AMDGPU_MAX_BL_LEVEL;
3145         props.brightness = AMDGPU_MAX_BL_LEVEL;
3146         props.type = BACKLIGHT_RAW;
3147
3148         snprintf(bl_name, sizeof(bl_name), "amdgpu_bl%d",
3149                  adev_to_drm(dm->adev)->primary->index);
3150
3151         dm->backlight_dev = backlight_device_register(bl_name,
3152                                                       adev_to_drm(dm->adev)->dev,
3153                                                       dm,
3154                                                       &amdgpu_dm_backlight_ops,
3155                                                       &props);
3156
3157         if (IS_ERR(dm->backlight_dev))
3158                 DRM_ERROR("DM: Backlight registration failed!\n");
3159         else
3160                 DRM_DEBUG_DRIVER("DM: Registered Backlight device: %s\n", bl_name);
3161 }
3162
3163 #endif
3164
3165 static int initialize_plane(struct amdgpu_display_manager *dm,
3166                             struct amdgpu_mode_info *mode_info, int plane_id,
3167                             enum drm_plane_type plane_type,
3168                             const struct dc_plane_cap *plane_cap)
3169 {
3170         struct drm_plane *plane;
3171         unsigned long possible_crtcs;
3172         int ret = 0;
3173
3174         plane = kzalloc(sizeof(struct drm_plane), GFP_KERNEL);
3175         if (!plane) {
3176                 DRM_ERROR("KMS: Failed to allocate plane\n");
3177                 return -ENOMEM;
3178         }
3179         plane->type = plane_type;
3180
3181         /*
3182          * HACK: IGT tests expect that the primary plane for a CRTC
3183          * can only have one possible CRTC. Only expose support for
3184          * any CRTC if they're not going to be used as a primary plane
3185          * for a CRTC - like overlay or underlay planes.
3186          */
3187         possible_crtcs = 1 << plane_id;
3188         if (plane_id >= dm->dc->caps.max_streams)
3189                 possible_crtcs = 0xff;
3190
3191         ret = amdgpu_dm_plane_init(dm, plane, possible_crtcs, plane_cap);
3192
3193         if (ret) {
3194                 DRM_ERROR("KMS: Failed to initialize plane\n");
3195                 kfree(plane);
3196                 return ret;
3197         }
3198
3199         if (mode_info)
3200                 mode_info->planes[plane_id] = plane;
3201
3202         return ret;
3203 }
3204
3205
3206 static void register_backlight_device(struct amdgpu_display_manager *dm,
3207                                       struct dc_link *link)
3208 {
3209 #if defined(CONFIG_BACKLIGHT_CLASS_DEVICE) ||\
3210         defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE)
3211
3212         if ((link->connector_signal & (SIGNAL_TYPE_EDP | SIGNAL_TYPE_LVDS)) &&
3213             link->type != dc_connection_none) {
3214                 /*
3215                  * Event if registration failed, we should continue with
3216                  * DM initialization because not having a backlight control
3217                  * is better then a black screen.
3218                  */
3219                 amdgpu_dm_register_backlight_device(dm);
3220
3221                 if (dm->backlight_dev)
3222                         dm->backlight_link = link;
3223         }
3224 #endif
3225 }
3226
3227
3228 /*
3229  * In this architecture, the association
3230  * connector -> encoder -> crtc
3231  * id not really requried. The crtc and connector will hold the
3232  * display_index as an abstraction to use with DAL component
3233  *
3234  * Returns 0 on success
3235  */
3236 static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev)
3237 {
3238         struct amdgpu_display_manager *dm = &adev->dm;
3239         int32_t i;
3240         struct amdgpu_dm_connector *aconnector = NULL;
3241         struct amdgpu_encoder *aencoder = NULL;
3242         struct amdgpu_mode_info *mode_info = &adev->mode_info;
3243         uint32_t link_cnt;
3244         int32_t primary_planes;
3245         enum dc_connection_type new_connection_type = dc_connection_none;
3246         const struct dc_plane_cap *plane;
3247
3248         link_cnt = dm->dc->caps.max_links;
3249         if (amdgpu_dm_mode_config_init(dm->adev)) {
3250                 DRM_ERROR("DM: Failed to initialize mode config\n");
3251                 return -EINVAL;
3252         }
3253
3254         /* There is one primary plane per CRTC */
3255         primary_planes = dm->dc->caps.max_streams;
3256         ASSERT(primary_planes <= AMDGPU_MAX_PLANES);
3257
3258         /*
3259          * Initialize primary planes, implicit planes for legacy IOCTLS.
3260          * Order is reversed to match iteration order in atomic check.
3261          */
3262         for (i = (primary_planes - 1); i >= 0; i--) {
3263                 plane = &dm->dc->caps.planes[i];
3264
3265                 if (initialize_plane(dm, mode_info, i,
3266                                      DRM_PLANE_TYPE_PRIMARY, plane)) {
3267                         DRM_ERROR("KMS: Failed to initialize primary plane\n");
3268                         goto fail;
3269                 }
3270         }
3271
3272         /*
3273          * Initialize overlay planes, index starting after primary planes.
3274          * These planes have a higher DRM index than the primary planes since
3275          * they should be considered as having a higher z-order.
3276          * Order is reversed to match iteration order in atomic check.
3277          *
3278          * Only support DCN for now, and only expose one so we don't encourage
3279          * userspace to use up all the pipes.
3280          */
3281         for (i = 0; i < dm->dc->caps.max_planes; ++i) {
3282                 struct dc_plane_cap *plane = &dm->dc->caps.planes[i];
3283
3284                 if (plane->type != DC_PLANE_TYPE_DCN_UNIVERSAL)
3285                         continue;
3286
3287                 if (!plane->blends_with_above || !plane->blends_with_below)
3288                         continue;
3289
3290                 if (!plane->pixel_format_support.argb8888)
3291                         continue;
3292
3293                 if (initialize_plane(dm, NULL, primary_planes + i,
3294                                      DRM_PLANE_TYPE_OVERLAY, plane)) {
3295                         DRM_ERROR("KMS: Failed to initialize overlay plane\n");
3296                         goto fail;
3297                 }
3298
3299                 /* Only create one overlay plane. */
3300                 break;
3301         }
3302
3303         for (i = 0; i < dm->dc->caps.max_streams; i++)
3304                 if (amdgpu_dm_crtc_init(dm, mode_info->planes[i], i)) {
3305                         DRM_ERROR("KMS: Failed to initialize crtc\n");
3306                         goto fail;
3307                 }
3308
3309         dm->display_indexes_num = dm->dc->caps.max_streams;
3310
3311         /* loops over all connectors on the board */
3312         for (i = 0; i < link_cnt; i++) {
3313                 struct dc_link *link = NULL;
3314
3315                 if (i > AMDGPU_DM_MAX_DISPLAY_INDEX) {
3316                         DRM_ERROR(
3317                                 "KMS: Cannot support more than %d display indexes\n",
3318                                         AMDGPU_DM_MAX_DISPLAY_INDEX);
3319                         continue;
3320                 }
3321
3322                 aconnector = kzalloc(sizeof(*aconnector), GFP_KERNEL);
3323                 if (!aconnector)
3324                         goto fail;
3325
3326                 aencoder = kzalloc(sizeof(*aencoder), GFP_KERNEL);
3327                 if (!aencoder)
3328                         goto fail;
3329
3330                 if (amdgpu_dm_encoder_init(dm->ddev, aencoder, i)) {
3331                         DRM_ERROR("KMS: Failed to initialize encoder\n");
3332                         goto fail;
3333                 }
3334
3335                 if (amdgpu_dm_connector_init(dm, aconnector, i, aencoder)) {
3336                         DRM_ERROR("KMS: Failed to initialize connector\n");
3337                         goto fail;
3338                 }
3339
3340                 link = dc_get_link_at_index(dm->dc, i);
3341
3342                 if (!dc_link_detect_sink(link, &new_connection_type))
3343                         DRM_ERROR("KMS: Failed to detect connector\n");
3344
3345                 if (aconnector->base.force && new_connection_type == dc_connection_none) {
3346                         emulated_link_detect(link);
3347                         amdgpu_dm_update_connector_after_detect(aconnector);
3348
3349                 } else if (dc_link_detect(link, DETECT_REASON_BOOT)) {
3350                         amdgpu_dm_update_connector_after_detect(aconnector);
3351                         register_backlight_device(dm, link);
3352                         if (amdgpu_dc_feature_mask & DC_PSR_MASK)
3353                                 amdgpu_dm_set_psr_caps(link);
3354                 }
3355
3356
3357         }
3358
3359         /* Software is initialized. Now we can register interrupt handlers. */
3360         switch (adev->asic_type) {
3361 #if defined(CONFIG_DRM_AMD_DC_SI)
3362         case CHIP_TAHITI:
3363         case CHIP_PITCAIRN:
3364         case CHIP_VERDE:
3365         case CHIP_OLAND:
3366                 if (dce60_register_irq_handlers(dm->adev)) {
3367                         DRM_ERROR("DM: Failed to initialize IRQ\n");
3368                         goto fail;
3369                 }
3370                 break;
3371 #endif
3372         case CHIP_BONAIRE:
3373         case CHIP_HAWAII:
3374         case CHIP_KAVERI:
3375         case CHIP_KABINI:
3376         case CHIP_MULLINS:
3377         case CHIP_TONGA:
3378         case CHIP_FIJI:
3379         case CHIP_CARRIZO:
3380         case CHIP_STONEY:
3381         case CHIP_POLARIS11:
3382         case CHIP_POLARIS10:
3383         case CHIP_POLARIS12:
3384         case CHIP_VEGAM:
3385         case CHIP_VEGA10:
3386         case CHIP_VEGA12:
3387         case CHIP_VEGA20:
3388                 if (dce110_register_irq_handlers(dm->adev)) {
3389                         DRM_ERROR("DM: Failed to initialize IRQ\n");
3390                         goto fail;
3391                 }
3392                 break;
3393 #if defined(CONFIG_DRM_AMD_DC_DCN)
3394         case CHIP_RAVEN:
3395         case CHIP_NAVI12:
3396         case CHIP_NAVI10:
3397         case CHIP_NAVI14:
3398         case CHIP_RENOIR:
3399 #if defined(CONFIG_DRM_AMD_DC_DCN3_0)
3400         case CHIP_SIENNA_CICHLID:
3401         case CHIP_NAVY_FLOUNDER:
3402 #endif
3403                 if (dcn10_register_irq_handlers(dm->adev)) {
3404                         DRM_ERROR("DM: Failed to initialize IRQ\n");
3405                         goto fail;
3406                 }
3407                 break;
3408 #endif
3409         default:
3410                 DRM_ERROR("Unsupported ASIC type: 0x%X\n", adev->asic_type);
3411                 goto fail;
3412         }
3413
3414         return 0;
3415 fail:
3416         kfree(aencoder);
3417         kfree(aconnector);
3418
3419         return -EINVAL;
3420 }
3421
3422 static void amdgpu_dm_destroy_drm_device(struct amdgpu_display_manager *dm)
3423 {
3424         drm_mode_config_cleanup(dm->ddev);
3425         drm_atomic_private_obj_fini(&dm->atomic_obj);
3426         return;
3427 }
3428
3429 /******************************************************************************
3430  * amdgpu_display_funcs functions
3431  *****************************************************************************/
3432
3433 /*
3434  * dm_bandwidth_update - program display watermarks
3435  *
3436  * @adev: amdgpu_device pointer
3437  *
3438  * Calculate and program the display watermarks and line buffer allocation.
3439  */
3440 static void dm_bandwidth_update(struct amdgpu_device *adev)
3441 {
3442         /* TODO: implement later */
3443 }
3444
3445 static const struct amdgpu_display_funcs dm_display_funcs = {
3446         .bandwidth_update = dm_bandwidth_update, /* called unconditionally */
3447         .vblank_get_counter = dm_vblank_get_counter,/* called unconditionally */
3448         .backlight_set_level = NULL, /* never called for DC */
3449         .backlight_get_level = NULL, /* never called for DC */
3450         .hpd_sense = NULL,/* called unconditionally */
3451         .hpd_set_polarity = NULL, /* called unconditionally */
3452         .hpd_get_gpio_reg = NULL, /* VBIOS parsing. DAL does it. */
3453         .page_flip_get_scanoutpos =
3454                 dm_crtc_get_scanoutpos,/* called unconditionally */
3455         .add_encoder = NULL, /* VBIOS parsing. DAL does it. */
3456         .add_connector = NULL, /* VBIOS parsing. DAL does it. */
3457 };
3458
3459 #if defined(CONFIG_DEBUG_KERNEL_DC)
3460
3461 static ssize_t s3_debug_store(struct device *device,
3462                               struct device_attribute *attr,
3463                               const char *buf,
3464                               size_t count)
3465 {
3466         int ret;
3467         int s3_state;
3468         struct drm_device *drm_dev = dev_get_drvdata(device);
3469         struct amdgpu_device *adev = drm_to_adev(drm_dev);
3470
3471         ret = kstrtoint(buf, 0, &s3_state);
3472
3473         if (ret == 0) {
3474                 if (s3_state) {
3475                         dm_resume(adev);
3476                         drm_kms_helper_hotplug_event(adev_to_drm(adev));
3477                 } else
3478                         dm_suspend(adev);
3479         }
3480
3481         return ret == 0 ? count : 0;
3482 }
3483
3484 DEVICE_ATTR_WO(s3_debug);
3485
3486 #endif
3487
3488 static int dm_early_init(void *handle)
3489 {
3490         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
3491
3492         switch (adev->asic_type) {
3493 #if defined(CONFIG_DRM_AMD_DC_SI)
3494         case CHIP_TAHITI:
3495         case CHIP_PITCAIRN:
3496         case CHIP_VERDE:
3497                 adev->mode_info.num_crtc = 6;
3498                 adev->mode_info.num_hpd = 6;
3499                 adev->mode_info.num_dig = 6;
3500                 break;
3501         case CHIP_OLAND:
3502                 adev->mode_info.num_crtc = 2;
3503                 adev->mode_info.num_hpd = 2;
3504                 adev->mode_info.num_dig = 2;
3505                 break;
3506 #endif
3507         case CHIP_BONAIRE:
3508         case CHIP_HAWAII:
3509                 adev->mode_info.num_crtc = 6;
3510                 adev->mode_info.num_hpd = 6;
3511                 adev->mode_info.num_dig = 6;
3512                 break;
3513         case CHIP_KAVERI:
3514                 adev->mode_info.num_crtc = 4;
3515                 adev->mode_info.num_hpd = 6;
3516                 adev->mode_info.num_dig = 7;
3517                 break;
3518         case CHIP_KABINI:
3519         case CHIP_MULLINS:
3520                 adev->mode_info.num_crtc = 2;
3521                 adev->mode_info.num_hpd = 6;
3522                 adev->mode_info.num_dig = 6;
3523                 break;
3524         case CHIP_FIJI:
3525         case CHIP_TONGA:
3526                 adev->mode_info.num_crtc = 6;
3527                 adev->mode_info.num_hpd = 6;
3528                 adev->mode_info.num_dig = 7;
3529                 break;
3530         case CHIP_CARRIZO:
3531                 adev->mode_info.num_crtc = 3;
3532                 adev->mode_info.num_hpd = 6;
3533                 adev->mode_info.num_dig = 9;
3534                 break;
3535         case CHIP_STONEY:
3536                 adev->mode_info.num_crtc = 2;
3537                 adev->mode_info.num_hpd = 6;
3538                 adev->mode_info.num_dig = 9;
3539                 break;
3540         case CHIP_POLARIS11:
3541         case CHIP_POLARIS12:
3542                 adev->mode_info.num_crtc = 5;
3543                 adev->mode_info.num_hpd = 5;
3544                 adev->mode_info.num_dig = 5;
3545                 break;
3546         case CHIP_POLARIS10:
3547         case CHIP_VEGAM:
3548                 adev->mode_info.num_crtc = 6;
3549                 adev->mode_info.num_hpd = 6;
3550                 adev->mode_info.num_dig = 6;
3551                 break;
3552         case CHIP_VEGA10:
3553         case CHIP_VEGA12:
3554         case CHIP_VEGA20:
3555                 adev->mode_info.num_crtc = 6;
3556                 adev->mode_info.num_hpd = 6;
3557                 adev->mode_info.num_dig = 6;
3558                 break;
3559 #if defined(CONFIG_DRM_AMD_DC_DCN)
3560         case CHIP_RAVEN:
3561                 adev->mode_info.num_crtc = 4;
3562                 adev->mode_info.num_hpd = 4;
3563                 adev->mode_info.num_dig = 4;
3564                 break;
3565 #endif
3566         case CHIP_NAVI10:
3567         case CHIP_NAVI12:
3568 #if defined(CONFIG_DRM_AMD_DC_DCN3_0)
3569         case CHIP_SIENNA_CICHLID:
3570         case CHIP_NAVY_FLOUNDER:
3571 #endif
3572                 adev->mode_info.num_crtc = 6;
3573                 adev->mode_info.num_hpd = 6;
3574                 adev->mode_info.num_dig = 6;
3575                 break;
3576         case CHIP_NAVI14:
3577                 adev->mode_info.num_crtc = 5;
3578                 adev->mode_info.num_hpd = 5;
3579                 adev->mode_info.num_dig = 5;
3580                 break;
3581         case CHIP_RENOIR:
3582                 adev->mode_info.num_crtc = 4;
3583                 adev->mode_info.num_hpd = 4;
3584                 adev->mode_info.num_dig = 4;
3585                 break;
3586         default:
3587                 DRM_ERROR("Unsupported ASIC type: 0x%X\n", adev->asic_type);
3588                 return -EINVAL;
3589         }
3590
3591         amdgpu_dm_set_irq_funcs(adev);
3592
3593         if (adev->mode_info.funcs == NULL)
3594                 adev->mode_info.funcs = &dm_display_funcs;
3595
3596         /*
3597          * Note: Do NOT change adev->audio_endpt_rreg and
3598          * adev->audio_endpt_wreg because they are initialised in
3599          * amdgpu_device_init()
3600          */
3601 #if defined(CONFIG_DEBUG_KERNEL_DC)
3602         device_create_file(
3603                 adev_to_drm(adev)->dev,
3604                 &dev_attr_s3_debug);
3605 #endif
3606
3607         return 0;
3608 }
3609
3610 static bool modeset_required(struct drm_crtc_state *crtc_state,
3611                              struct dc_stream_state *new_stream,
3612                              struct dc_stream_state *old_stream)
3613 {
3614         return crtc_state->active && drm_atomic_crtc_needs_modeset(crtc_state);
3615 }
3616
3617 static bool modereset_required(struct drm_crtc_state *crtc_state)
3618 {
3619         return !crtc_state->active && drm_atomic_crtc_needs_modeset(crtc_state);
3620 }
3621
3622 static void amdgpu_dm_encoder_destroy(struct drm_encoder *encoder)
3623 {
3624         drm_encoder_cleanup(encoder);
3625         kfree(encoder);
3626 }
3627
3628 static const struct drm_encoder_funcs amdgpu_dm_encoder_funcs = {
3629         .destroy = amdgpu_dm_encoder_destroy,
3630 };
3631
3632
3633 static int fill_dc_scaling_info(const struct drm_plane_state *state,
3634                                 struct dc_scaling_info *scaling_info)
3635 {
3636         int scale_w, scale_h;
3637
3638         memset(scaling_info, 0, sizeof(*scaling_info));
3639
3640         /* Source is fixed 16.16 but we ignore mantissa for now... */
3641         scaling_info->src_rect.x = state->src_x >> 16;
3642         scaling_info->src_rect.y = state->src_y >> 16;
3643
3644         scaling_info->src_rect.width = state->src_w >> 16;
3645         if (scaling_info->src_rect.width == 0)
3646                 return -EINVAL;
3647
3648         scaling_info->src_rect.height = state->src_h >> 16;
3649         if (scaling_info->src_rect.height == 0)
3650                 return -EINVAL;
3651
3652         scaling_info->dst_rect.x = state->crtc_x;
3653         scaling_info->dst_rect.y = state->crtc_y;
3654
3655         if (state->crtc_w == 0)
3656                 return -EINVAL;
3657
3658         scaling_info->dst_rect.width = state->crtc_w;
3659
3660         if (state->crtc_h == 0)
3661                 return -EINVAL;
3662
3663         scaling_info->dst_rect.height = state->crtc_h;
3664
3665         /* DRM doesn't specify clipping on destination output. */
3666         scaling_info->clip_rect = scaling_info->dst_rect;
3667
3668         /* TODO: Validate scaling per-format with DC plane caps */
3669         scale_w = scaling_info->dst_rect.width * 1000 /
3670                   scaling_info->src_rect.width;
3671
3672         if (scale_w < 250 || scale_w > 16000)
3673                 return -EINVAL;
3674
3675         scale_h = scaling_info->dst_rect.height * 1000 /
3676                   scaling_info->src_rect.height;
3677
3678         if (scale_h < 250 || scale_h > 16000)
3679                 return -EINVAL;
3680
3681         /*
3682          * The "scaling_quality" can be ignored for now, quality = 0 has DC
3683          * assume reasonable defaults based on the format.
3684          */
3685
3686         return 0;
3687 }
3688
3689 static int get_fb_info(const struct amdgpu_framebuffer *amdgpu_fb,
3690                        uint64_t *tiling_flags, bool *tmz_surface)
3691 {
3692         struct amdgpu_bo *rbo;
3693         int r;
3694
3695         if (!amdgpu_fb) {
3696                 *tiling_flags = 0;
3697                 *tmz_surface = false;
3698                 return 0;
3699         }
3700
3701         rbo = gem_to_amdgpu_bo(amdgpu_fb->base.obj[0]);
3702         r = amdgpu_bo_reserve(rbo, false);
3703
3704         if (unlikely(r)) {
3705                 /* Don't show error message when returning -ERESTARTSYS */
3706                 if (r != -ERESTARTSYS)
3707                         DRM_ERROR("Unable to reserve buffer: %d\n", r);
3708                 return r;
3709         }
3710
3711         if (tiling_flags)
3712                 amdgpu_bo_get_tiling_flags(rbo, tiling_flags);
3713
3714         if (tmz_surface)
3715                 *tmz_surface = amdgpu_bo_encrypted(rbo);
3716
3717         amdgpu_bo_unreserve(rbo);
3718
3719         return r;
3720 }
3721
3722 static inline uint64_t get_dcc_address(uint64_t address, uint64_t tiling_flags)
3723 {
3724         uint32_t offset = AMDGPU_TILING_GET(tiling_flags, DCC_OFFSET_256B);
3725
3726         return offset ? (address + offset * 256) : 0;
3727 }
3728
3729 static int
3730 fill_plane_dcc_attributes(struct amdgpu_device *adev,
3731                           const struct amdgpu_framebuffer *afb,
3732                           const enum surface_pixel_format format,
3733                           const enum dc_rotation_angle rotation,
3734                           const struct plane_size *plane_size,
3735                           const union dc_tiling_info *tiling_info,
3736                           const uint64_t info,
3737                           struct dc_plane_dcc_param *dcc,
3738                           struct dc_plane_address *address,
3739                           bool force_disable_dcc)
3740 {
3741         struct dc *dc = adev->dm.dc;
3742         struct dc_dcc_surface_param input;
3743         struct dc_surface_dcc_cap output;
3744         uint32_t offset = AMDGPU_TILING_GET(info, DCC_OFFSET_256B);
3745         uint32_t i64b = AMDGPU_TILING_GET(info, DCC_INDEPENDENT_64B) != 0;
3746         uint64_t dcc_address;
3747
3748         memset(&input, 0, sizeof(input));
3749         memset(&output, 0, sizeof(output));
3750
3751         if (force_disable_dcc)
3752                 return 0;
3753
3754         if (!offset)
3755                 return 0;
3756
3757         if (format >= SURFACE_PIXEL_FORMAT_VIDEO_BEGIN)
3758                 return 0;
3759
3760         if (!dc->cap_funcs.get_dcc_compression_cap)
3761                 return -EINVAL;
3762
3763         input.format = format;
3764         input.surface_size.width = plane_size->surface_size.width;
3765         input.surface_size.height = plane_size->surface_size.height;
3766         input.swizzle_mode = tiling_info->gfx9.swizzle;
3767
3768         if (rotation == ROTATION_ANGLE_0 || rotation == ROTATION_ANGLE_180)
3769                 input.scan = SCAN_DIRECTION_HORIZONTAL;
3770         else if (rotation == ROTATION_ANGLE_90 || rotation == ROTATION_ANGLE_270)
3771                 input.scan = SCAN_DIRECTION_VERTICAL;
3772
3773         if (!dc->cap_funcs.get_dcc_compression_cap(dc, &input, &output))
3774                 return -EINVAL;
3775
3776         if (!output.capable)
3777                 return -EINVAL;
3778
3779         if (i64b == 0 && output.grph.rgb.independent_64b_blks != 0)
3780                 return -EINVAL;
3781
3782         dcc->enable = 1;
3783         dcc->meta_pitch =
3784                 AMDGPU_TILING_GET(info, DCC_PITCH_MAX) + 1;
3785         dcc->independent_64b_blks = i64b;
3786
3787         dcc_address = get_dcc_address(afb->address, info);
3788         address->grph.meta_addr.low_part = lower_32_bits(dcc_address);
3789         address->grph.meta_addr.high_part = upper_32_bits(dcc_address);
3790
3791         return 0;
3792 }
3793
3794 static int
3795 fill_plane_buffer_attributes(struct amdgpu_device *adev,
3796                              const struct amdgpu_framebuffer *afb,
3797                              const enum surface_pixel_format format,
3798                              const enum dc_rotation_angle rotation,
3799                              const uint64_t tiling_flags,
3800                              union dc_tiling_info *tiling_info,
3801                              struct plane_size *plane_size,
3802                              struct dc_plane_dcc_param *dcc,
3803                              struct dc_plane_address *address,
3804                              bool tmz_surface,
3805                              bool force_disable_dcc)
3806 {
3807         const struct drm_framebuffer *fb = &afb->base;
3808         int ret;
3809
3810         memset(tiling_info, 0, sizeof(*tiling_info));
3811         memset(plane_size, 0, sizeof(*plane_size));
3812         memset(dcc, 0, sizeof(*dcc));
3813         memset(address, 0, sizeof(*address));
3814
3815         address->tmz_surface = tmz_surface;
3816
3817         if (format < SURFACE_PIXEL_FORMAT_VIDEO_BEGIN) {
3818                 plane_size->surface_size.x = 0;
3819                 plane_size->surface_size.y = 0;
3820                 plane_size->surface_size.width = fb->width;
3821                 plane_size->surface_size.height = fb->height;
3822                 plane_size->surface_pitch =
3823                         fb->pitches[0] / fb->format->cpp[0];
3824
3825                 address->type = PLN_ADDR_TYPE_GRAPHICS;
3826                 address->grph.addr.low_part = lower_32_bits(afb->address);
3827                 address->grph.addr.high_part = upper_32_bits(afb->address);
3828         } else if (format < SURFACE_PIXEL_FORMAT_INVALID) {
3829                 uint64_t chroma_addr = afb->address + fb->offsets[1];
3830
3831                 plane_size->surface_size.x = 0;
3832                 plane_size->surface_size.y = 0;
3833                 plane_size->surface_size.width = fb->width;
3834                 plane_size->surface_size.height = fb->height;
3835                 plane_size->surface_pitch =
3836                         fb->pitches[0] / fb->format->cpp[0];
3837
3838                 plane_size->chroma_size.x = 0;
3839                 plane_size->chroma_size.y = 0;
3840                 /* TODO: set these based on surface format */
3841                 plane_size->chroma_size.width = fb->width / 2;
3842                 plane_size->chroma_size.height = fb->height / 2;
3843
3844                 plane_size->chroma_pitch =
3845                         fb->pitches[1] / fb->format->cpp[1];
3846
3847                 address->type = PLN_ADDR_TYPE_VIDEO_PROGRESSIVE;
3848                 address->video_progressive.luma_addr.low_part =
3849                         lower_32_bits(afb->address);
3850                 address->video_progressive.luma_addr.high_part =
3851                         upper_32_bits(afb->address);
3852                 address->video_progressive.chroma_addr.low_part =
3853                         lower_32_bits(chroma_addr);
3854                 address->video_progressive.chroma_addr.high_part =
3855                         upper_32_bits(chroma_addr);
3856         }
3857
3858         /* Fill GFX8 params */
3859         if (AMDGPU_TILING_GET(tiling_flags, ARRAY_MODE) == DC_ARRAY_2D_TILED_THIN1) {
3860                 unsigned int bankw, bankh, mtaspect, tile_split, num_banks;
3861
3862                 bankw = AMDGPU_TILING_GET(tiling_flags, BANK_WIDTH);
3863                 bankh = AMDGPU_TILING_GET(tiling_flags, BANK_HEIGHT);
3864                 mtaspect = AMDGPU_TILING_GET(tiling_flags, MACRO_TILE_ASPECT);
3865                 tile_split = AMDGPU_TILING_GET(tiling_flags, TILE_SPLIT);
3866                 num_banks = AMDGPU_TILING_GET(tiling_flags, NUM_BANKS);
3867
3868                 /* XXX fix me for VI */
3869                 tiling_info->gfx8.num_banks = num_banks;
3870                 tiling_info->gfx8.array_mode =
3871                                 DC_ARRAY_2D_TILED_THIN1;
3872                 tiling_info->gfx8.tile_split = tile_split;
3873                 tiling_info->gfx8.bank_width = bankw;
3874                 tiling_info->gfx8.bank_height = bankh;
3875                 tiling_info->gfx8.tile_aspect = mtaspect;
3876                 tiling_info->gfx8.tile_mode =
3877                                 DC_ADDR_SURF_MICRO_TILING_DISPLAY;
3878         } else if (AMDGPU_TILING_GET(tiling_flags, ARRAY_MODE)
3879                         == DC_ARRAY_1D_TILED_THIN1) {
3880                 tiling_info->gfx8.array_mode = DC_ARRAY_1D_TILED_THIN1;
3881         }
3882
3883         tiling_info->gfx8.pipe_config =
3884                         AMDGPU_TILING_GET(tiling_flags, PIPE_CONFIG);
3885
3886         if (adev->asic_type == CHIP_VEGA10 ||
3887             adev->asic_type == CHIP_VEGA12 ||
3888             adev->asic_type == CHIP_VEGA20 ||
3889             adev->asic_type == CHIP_NAVI10 ||
3890             adev->asic_type == CHIP_NAVI14 ||
3891             adev->asic_type == CHIP_NAVI12 ||
3892 #if defined(CONFIG_DRM_AMD_DC_DCN3_0)
3893                 adev->asic_type == CHIP_SIENNA_CICHLID ||
3894                 adev->asic_type == CHIP_NAVY_FLOUNDER ||
3895 #endif
3896             adev->asic_type == CHIP_RENOIR ||
3897             adev->asic_type == CHIP_RAVEN) {
3898                 /* Fill GFX9 params */
3899                 tiling_info->gfx9.num_pipes =
3900                         adev->gfx.config.gb_addr_config_fields.num_pipes;
3901                 tiling_info->gfx9.num_banks =
3902                         adev->gfx.config.gb_addr_config_fields.num_banks;
3903                 tiling_info->gfx9.pipe_interleave =
3904                         adev->gfx.config.gb_addr_config_fields.pipe_interleave_size;
3905                 tiling_info->gfx9.num_shader_engines =
3906                         adev->gfx.config.gb_addr_config_fields.num_se;
3907                 tiling_info->gfx9.max_compressed_frags =
3908                         adev->gfx.config.gb_addr_config_fields.max_compress_frags;
3909                 tiling_info->gfx9.num_rb_per_se =
3910                         adev->gfx.config.gb_addr_config_fields.num_rb_per_se;
3911                 tiling_info->gfx9.swizzle =
3912                         AMDGPU_TILING_GET(tiling_flags, SWIZZLE_MODE);
3913                 tiling_info->gfx9.shaderEnable = 1;
3914
3915 #ifdef CONFIG_DRM_AMD_DC_DCN3_0
3916                 if (adev->asic_type == CHIP_SIENNA_CICHLID ||
3917                     adev->asic_type == CHIP_NAVY_FLOUNDER)
3918                         tiling_info->gfx9.num_pkrs = adev->gfx.config.gb_addr_config_fields.num_pkrs;
3919 #endif
3920                 ret = fill_plane_dcc_attributes(adev, afb, format, rotation,
3921                                                 plane_size, tiling_info,
3922                                                 tiling_flags, dcc, address,
3923                                                 force_disable_dcc);
3924                 if (ret)
3925                         return ret;
3926         }
3927
3928         return 0;
3929 }
3930
3931 static void
3932 fill_blending_from_plane_state(const struct drm_plane_state *plane_state,
3933                                bool *per_pixel_alpha, bool *global_alpha,
3934                                int *global_alpha_value)
3935 {
3936         *per_pixel_alpha = false;
3937         *global_alpha = false;
3938         *global_alpha_value = 0xff;
3939
3940         if (plane_state->plane->type != DRM_PLANE_TYPE_OVERLAY)
3941                 return;
3942
3943         if (plane_state->pixel_blend_mode == DRM_MODE_BLEND_PREMULTI) {
3944                 static const uint32_t alpha_formats[] = {
3945                         DRM_FORMAT_ARGB8888,
3946                         DRM_FORMAT_RGBA8888,
3947                         DRM_FORMAT_ABGR8888,
3948                 };
3949                 uint32_t format = plane_state->fb->format->format;
3950                 unsigned int i;
3951
3952                 for (i = 0; i < ARRAY_SIZE(alpha_formats); ++i) {
3953                         if (format == alpha_formats[i]) {
3954                                 *per_pixel_alpha = true;
3955                                 break;
3956                         }
3957                 }
3958         }
3959
3960         if (plane_state->alpha < 0xffff) {
3961                 *global_alpha = true;
3962                 *global_alpha_value = plane_state->alpha >> 8;
3963         }
3964 }
3965
3966 static int
3967 fill_plane_color_attributes(const struct drm_plane_state *plane_state,
3968                             const enum surface_pixel_format format,
3969                             enum dc_color_space *color_space)
3970 {
3971         bool full_range;
3972
3973         *color_space = COLOR_SPACE_SRGB;
3974
3975         /* DRM color properties only affect non-RGB formats. */
3976         if (format < SURFACE_PIXEL_FORMAT_VIDEO_BEGIN)
3977                 return 0;
3978
3979         full_range = (plane_state->color_range == DRM_COLOR_YCBCR_FULL_RANGE);
3980
3981         switch (plane_state->color_encoding) {
3982         case DRM_COLOR_YCBCR_BT601:
3983                 if (full_range)
3984                         *color_space = COLOR_SPACE_YCBCR601;
3985                 else
3986                         *color_space = COLOR_SPACE_YCBCR601_LIMITED;
3987                 break;
3988
3989         case DRM_COLOR_YCBCR_BT709:
3990                 if (full_range)
3991                         *color_space = COLOR_SPACE_YCBCR709;
3992                 else
3993                         *color_space = COLOR_SPACE_YCBCR709_LIMITED;
3994                 break;
3995
3996         case DRM_COLOR_YCBCR_BT2020:
3997                 if (full_range)
3998                         *color_space = COLOR_SPACE_2020_YCBCR;
3999                 else
4000                         return -EINVAL;
4001                 break;
4002
4003         default:
4004                 return -EINVAL;
4005         }
4006
4007         return 0;
4008 }
4009
4010 static int
4011 fill_dc_plane_info_and_addr(struct amdgpu_device *adev,
4012                             const struct drm_plane_state *plane_state,
4013                             const uint64_t tiling_flags,
4014                             struct dc_plane_info *plane_info,
4015                             struct dc_plane_address *address,
4016                             bool tmz_surface,
4017                             bool force_disable_dcc)
4018 {
4019         const struct drm_framebuffer *fb = plane_state->fb;
4020         const struct amdgpu_framebuffer *afb =
4021                 to_amdgpu_framebuffer(plane_state->fb);
4022         struct drm_format_name_buf format_name;
4023         int ret;
4024
4025         memset(plane_info, 0, sizeof(*plane_info));
4026
4027         switch (fb->format->format) {
4028         case DRM_FORMAT_C8:
4029                 plane_info->format =
4030                         SURFACE_PIXEL_FORMAT_GRPH_PALETA_256_COLORS;
4031                 break;
4032         case DRM_FORMAT_RGB565:
4033                 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_RGB565;
4034                 break;
4035         case DRM_FORMAT_XRGB8888:
4036         case DRM_FORMAT_ARGB8888:
4037                 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ARGB8888;
4038                 break;
4039         case DRM_FORMAT_XRGB2101010:
4040         case DRM_FORMAT_ARGB2101010:
4041                 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ARGB2101010;
4042                 break;
4043         case DRM_FORMAT_XBGR2101010:
4044         case DRM_FORMAT_ABGR2101010:
4045                 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ABGR2101010;
4046                 break;
4047         case DRM_FORMAT_XBGR8888:
4048         case DRM_FORMAT_ABGR8888:
4049                 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ABGR8888;
4050                 break;
4051         case DRM_FORMAT_NV21:
4052                 plane_info->format = SURFACE_PIXEL_FORMAT_VIDEO_420_YCbCr;
4053                 break;
4054         case DRM_FORMAT_NV12:
4055                 plane_info->format = SURFACE_PIXEL_FORMAT_VIDEO_420_YCrCb;
4056                 break;
4057         case DRM_FORMAT_P010:
4058                 plane_info->format = SURFACE_PIXEL_FORMAT_VIDEO_420_10bpc_YCrCb;
4059                 break;
4060         case DRM_FORMAT_XRGB16161616F:
4061         case DRM_FORMAT_ARGB16161616F:
4062                 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ARGB16161616F;
4063                 break;
4064         case DRM_FORMAT_XBGR16161616F:
4065         case DRM_FORMAT_ABGR16161616F:
4066                 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ABGR16161616F;
4067                 break;
4068         default:
4069                 DRM_ERROR(
4070                         "Unsupported screen format %s\n",
4071                         drm_get_format_name(fb->format->format, &format_name));
4072                 return -EINVAL;
4073         }
4074
4075         switch (plane_state->rotation & DRM_MODE_ROTATE_MASK) {
4076         case DRM_MODE_ROTATE_0:
4077                 plane_info->rotation = ROTATION_ANGLE_0;
4078                 break;
4079         case DRM_MODE_ROTATE_90:
4080                 plane_info->rotation = ROTATION_ANGLE_90;
4081                 break;
4082         case DRM_MODE_ROTATE_180:
4083                 plane_info->rotation = ROTATION_ANGLE_180;
4084                 break;
4085         case DRM_MODE_ROTATE_270:
4086                 plane_info->rotation = ROTATION_ANGLE_270;
4087                 break;
4088         default:
4089                 plane_info->rotation = ROTATION_ANGLE_0;
4090                 break;
4091         }
4092
4093         plane_info->visible = true;
4094         plane_info->stereo_format = PLANE_STEREO_FORMAT_NONE;
4095
4096         plane_info->layer_index = 0;
4097
4098         ret = fill_plane_color_attributes(plane_state, plane_info->format,
4099                                           &plane_info->color_space);
4100         if (ret)
4101                 return ret;
4102
4103         ret = fill_plane_buffer_attributes(adev, afb, plane_info->format,
4104                                            plane_info->rotation, tiling_flags,
4105                                            &plane_info->tiling_info,
4106                                            &plane_info->plane_size,
4107                                            &plane_info->dcc, address, tmz_surface,
4108                                            force_disable_dcc);
4109         if (ret)
4110                 return ret;
4111
4112         fill_blending_from_plane_state(
4113                 plane_state, &plane_info->per_pixel_alpha,
4114                 &plane_info->global_alpha, &plane_info->global_alpha_value);
4115
4116         return 0;
4117 }
4118
4119 static int fill_dc_plane_attributes(struct amdgpu_device *adev,
4120                                     struct dc_plane_state *dc_plane_state,
4121                                     struct drm_plane_state *plane_state,
4122                                     struct drm_crtc_state *crtc_state)
4123 {
4124         struct dm_crtc_state *dm_crtc_state = to_dm_crtc_state(crtc_state);
4125         struct dm_plane_state *dm_plane_state = to_dm_plane_state(plane_state);
4126         struct dc_scaling_info scaling_info;
4127         struct dc_plane_info plane_info;
4128         int ret;
4129         bool force_disable_dcc = false;
4130
4131         ret = fill_dc_scaling_info(plane_state, &scaling_info);
4132         if (ret)
4133                 return ret;
4134
4135         dc_plane_state->src_rect = scaling_info.src_rect;
4136         dc_plane_state->dst_rect = scaling_info.dst_rect;
4137         dc_plane_state->clip_rect = scaling_info.clip_rect;
4138         dc_plane_state->scaling_quality = scaling_info.scaling_quality;
4139
4140         force_disable_dcc = adev->asic_type == CHIP_RAVEN && adev->in_suspend;
4141         ret = fill_dc_plane_info_and_addr(adev, plane_state,
4142                                           dm_plane_state->tiling_flags,
4143                                           &plane_info,
4144                                           &dc_plane_state->address,
4145                                           dm_plane_state->tmz_surface,
4146                                           force_disable_dcc);
4147         if (ret)
4148                 return ret;
4149
4150         dc_plane_state->format = plane_info.format;
4151         dc_plane_state->color_space = plane_info.color_space;
4152         dc_plane_state->format = plane_info.format;
4153         dc_plane_state->plane_size = plane_info.plane_size;
4154         dc_plane_state->rotation = plane_info.rotation;
4155         dc_plane_state->horizontal_mirror = plane_info.horizontal_mirror;
4156         dc_plane_state->stereo_format = plane_info.stereo_format;
4157         dc_plane_state->tiling_info = plane_info.tiling_info;
4158         dc_plane_state->visible = plane_info.visible;
4159         dc_plane_state->per_pixel_alpha = plane_info.per_pixel_alpha;
4160         dc_plane_state->global_alpha = plane_info.global_alpha;
4161         dc_plane_state->global_alpha_value = plane_info.global_alpha_value;
4162         dc_plane_state->dcc = plane_info.dcc;
4163         dc_plane_state->layer_index = plane_info.layer_index; // Always returns 0
4164
4165         /*
4166          * Always set input transfer function, since plane state is refreshed
4167          * every time.
4168          */
4169         ret = amdgpu_dm_update_plane_color_mgmt(dm_crtc_state, dc_plane_state);
4170         if (ret)
4171                 return ret;
4172
4173         return 0;
4174 }
4175
4176 static void update_stream_scaling_settings(const struct drm_display_mode *mode,
4177                                            const struct dm_connector_state *dm_state,
4178                                            struct dc_stream_state *stream)
4179 {
4180         enum amdgpu_rmx_type rmx_type;
4181
4182         struct rect src = { 0 }; /* viewport in composition space*/
4183         struct rect dst = { 0 }; /* stream addressable area */
4184
4185         /* no mode. nothing to be done */
4186         if (!mode)
4187                 return;
4188
4189         /* Full screen scaling by default */
4190         src.width = mode->hdisplay;
4191         src.height = mode->vdisplay;
4192         dst.width = stream->timing.h_addressable;
4193         dst.height = stream->timing.v_addressable;
4194
4195         if (dm_state) {
4196                 rmx_type = dm_state->scaling;
4197                 if (rmx_type == RMX_ASPECT || rmx_type == RMX_OFF) {
4198                         if (src.width * dst.height <
4199                                         src.height * dst.width) {
4200                                 /* height needs less upscaling/more downscaling */
4201                                 dst.width = src.width *
4202                                                 dst.height / src.height;
4203                         } else {
4204                                 /* width needs less upscaling/more downscaling */
4205                                 dst.height = src.height *
4206                                                 dst.width / src.width;
4207                         }
4208                 } else if (rmx_type == RMX_CENTER) {
4209                         dst = src;
4210                 }
4211
4212                 dst.x = (stream->timing.h_addressable - dst.width) / 2;
4213                 dst.y = (stream->timing.v_addressable - dst.height) / 2;
4214
4215                 if (dm_state->underscan_enable) {
4216                         dst.x += dm_state->underscan_hborder / 2;
4217                         dst.y += dm_state->underscan_vborder / 2;
4218                         dst.width -= dm_state->underscan_hborder;
4219                         dst.height -= dm_state->underscan_vborder;
4220                 }
4221         }
4222
4223         stream->src = src;
4224         stream->dst = dst;
4225
4226         DRM_DEBUG_DRIVER("Destination Rectangle x:%d  y:%d  width:%d  height:%d\n",
4227                         dst.x, dst.y, dst.width, dst.height);
4228
4229 }
4230
4231 static enum dc_color_depth
4232 convert_color_depth_from_display_info(const struct drm_connector *connector,
4233                                       bool is_y420, int requested_bpc)
4234 {
4235         uint8_t bpc;
4236
4237         if (is_y420) {
4238                 bpc = 8;
4239
4240                 /* Cap display bpc based on HDMI 2.0 HF-VSDB */
4241                 if (connector->display_info.hdmi.y420_dc_modes & DRM_EDID_YCBCR420_DC_48)
4242                         bpc = 16;
4243                 else if (connector->display_info.hdmi.y420_dc_modes & DRM_EDID_YCBCR420_DC_36)
4244                         bpc = 12;
4245                 else if (connector->display_info.hdmi.y420_dc_modes & DRM_EDID_YCBCR420_DC_30)
4246                         bpc = 10;
4247         } else {
4248                 bpc = (uint8_t)connector->display_info.bpc;
4249                 /* Assume 8 bpc by default if no bpc is specified. */
4250                 bpc = bpc ? bpc : 8;
4251         }
4252
4253         if (requested_bpc > 0) {
4254                 /*
4255                  * Cap display bpc based on the user requested value.
4256                  *
4257                  * The value for state->max_bpc may not correctly updated
4258                  * depending on when the connector gets added to the state
4259                  * or if this was called outside of atomic check, so it
4260                  * can't be used directly.
4261                  */
4262                 bpc = min_t(u8, bpc, requested_bpc);
4263
4264                 /* Round down to the nearest even number. */
4265                 bpc = bpc - (bpc & 1);
4266         }
4267
4268         switch (bpc) {
4269         case 0:
4270                 /*
4271                  * Temporary Work around, DRM doesn't parse color depth for
4272                  * EDID revision before 1.4
4273                  * TODO: Fix edid parsing
4274                  */
4275                 return COLOR_DEPTH_888;
4276         case 6:
4277                 return COLOR_DEPTH_666;
4278         case 8:
4279                 return COLOR_DEPTH_888;
4280         case 10:
4281                 return COLOR_DEPTH_101010;
4282         case 12:
4283                 return COLOR_DEPTH_121212;
4284         case 14:
4285                 return COLOR_DEPTH_141414;
4286         case 16:
4287                 return COLOR_DEPTH_161616;
4288         default:
4289                 return COLOR_DEPTH_UNDEFINED;
4290         }
4291 }
4292
4293 static enum dc_aspect_ratio
4294 get_aspect_ratio(const struct drm_display_mode *mode_in)
4295 {
4296         /* 1-1 mapping, since both enums follow the HDMI spec. */
4297         return (enum dc_aspect_ratio) mode_in->picture_aspect_ratio;
4298 }
4299
4300 static enum dc_color_space
4301 get_output_color_space(const struct dc_crtc_timing *dc_crtc_timing)
4302 {
4303         enum dc_color_space color_space = COLOR_SPACE_SRGB;
4304
4305         switch (dc_crtc_timing->pixel_encoding) {
4306         case PIXEL_ENCODING_YCBCR422:
4307         case PIXEL_ENCODING_YCBCR444:
4308         case PIXEL_ENCODING_YCBCR420:
4309         {
4310                 /*
4311                  * 27030khz is the separation point between HDTV and SDTV
4312                  * according to HDMI spec, we use YCbCr709 and YCbCr601
4313                  * respectively
4314                  */
4315                 if (dc_crtc_timing->pix_clk_100hz > 270300) {
4316                         if (dc_crtc_timing->flags.Y_ONLY)
4317                                 color_space =
4318                                         COLOR_SPACE_YCBCR709_LIMITED;
4319                         else
4320                                 color_space = COLOR_SPACE_YCBCR709;
4321                 } else {
4322                         if (dc_crtc_timing->flags.Y_ONLY)
4323                                 color_space =
4324                                         COLOR_SPACE_YCBCR601_LIMITED;
4325                         else
4326                                 color_space = COLOR_SPACE_YCBCR601;
4327                 }
4328
4329         }
4330         break;
4331         case PIXEL_ENCODING_RGB:
4332                 color_space = COLOR_SPACE_SRGB;
4333                 break;
4334
4335         default:
4336                 WARN_ON(1);
4337                 break;
4338         }
4339
4340         return color_space;
4341 }
4342
4343 static bool adjust_colour_depth_from_display_info(
4344         struct dc_crtc_timing *timing_out,
4345         const struct drm_display_info *info)
4346 {
4347         enum dc_color_depth depth = timing_out->display_color_depth;
4348         int normalized_clk;
4349         do {
4350                 normalized_clk = timing_out->pix_clk_100hz / 10;
4351                 /* YCbCr 4:2:0 requires additional adjustment of 1/2 */
4352                 if (timing_out->pixel_encoding == PIXEL_ENCODING_YCBCR420)
4353                         normalized_clk /= 2;
4354                 /* Adjusting pix clock following on HDMI spec based on colour depth */
4355                 switch (depth) {
4356                 case COLOR_DEPTH_888:
4357                         break;
4358                 case COLOR_DEPTH_101010:
4359                         normalized_clk = (normalized_clk * 30) / 24;
4360                         break;
4361                 case COLOR_DEPTH_121212:
4362                         normalized_clk = (normalized_clk * 36) / 24;
4363                         break;
4364                 case COLOR_DEPTH_161616:
4365                         normalized_clk = (normalized_clk * 48) / 24;
4366                         break;
4367                 default:
4368                         /* The above depths are the only ones valid for HDMI. */
4369                         return false;
4370                 }
4371                 if (normalized_clk <= info->max_tmds_clock) {
4372                         timing_out->display_color_depth = depth;
4373                         return true;
4374                 }
4375         } while (--depth > COLOR_DEPTH_666);
4376         return false;
4377 }
4378
4379 static void fill_stream_properties_from_drm_display_mode(
4380         struct dc_stream_state *stream,
4381         const struct drm_display_mode *mode_in,
4382         const struct drm_connector *connector,
4383         const struct drm_connector_state *connector_state,
4384         const struct dc_stream_state *old_stream,
4385         int requested_bpc)
4386 {
4387         struct dc_crtc_timing *timing_out = &stream->timing;
4388         const struct drm_display_info *info = &connector->display_info;
4389         struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
4390         struct hdmi_vendor_infoframe hv_frame;
4391         struct hdmi_avi_infoframe avi_frame;
4392
4393         memset(&hv_frame, 0, sizeof(hv_frame));
4394         memset(&avi_frame, 0, sizeof(avi_frame));
4395
4396         timing_out->h_border_left = 0;
4397         timing_out->h_border_right = 0;
4398         timing_out->v_border_top = 0;
4399         timing_out->v_border_bottom = 0;
4400         /* TODO: un-hardcode */
4401         if (drm_mode_is_420_only(info, mode_in)
4402                         && stream->signal == SIGNAL_TYPE_HDMI_TYPE_A)
4403                 timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR420;
4404         else if (drm_mode_is_420_also(info, mode_in)
4405                         && aconnector->force_yuv420_output)
4406                 timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR420;
4407         else if ((connector->display_info.color_formats & DRM_COLOR_FORMAT_YCRCB444)
4408                         && stream->signal == SIGNAL_TYPE_HDMI_TYPE_A)
4409                 timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR444;
4410         else
4411                 timing_out->pixel_encoding = PIXEL_ENCODING_RGB;
4412
4413         timing_out->timing_3d_format = TIMING_3D_FORMAT_NONE;
4414         timing_out->display_color_depth = convert_color_depth_from_display_info(
4415                 connector,
4416                 (timing_out->pixel_encoding == PIXEL_ENCODING_YCBCR420),
4417                 requested_bpc);
4418         timing_out->scan_type = SCANNING_TYPE_NODATA;
4419         timing_out->hdmi_vic = 0;
4420
4421         if(old_stream) {
4422                 timing_out->vic = old_stream->timing.vic;
4423                 timing_out->flags.HSYNC_POSITIVE_POLARITY = old_stream->timing.flags.HSYNC_POSITIVE_POLARITY;
4424                 timing_out->flags.VSYNC_POSITIVE_POLARITY = old_stream->timing.flags.VSYNC_POSITIVE_POLARITY;
4425         } else {
4426                 timing_out->vic = drm_match_cea_mode(mode_in);
4427                 if (mode_in->flags & DRM_MODE_FLAG_PHSYNC)
4428                         timing_out->flags.HSYNC_POSITIVE_POLARITY = 1;
4429                 if (mode_in->flags & DRM_MODE_FLAG_PVSYNC)
4430                         timing_out->flags.VSYNC_POSITIVE_POLARITY = 1;
4431         }
4432
4433         if (stream->signal == SIGNAL_TYPE_HDMI_TYPE_A) {
4434                 drm_hdmi_avi_infoframe_from_display_mode(&avi_frame, (struct drm_connector *)connector, mode_in);
4435                 timing_out->vic = avi_frame.video_code;
4436                 drm_hdmi_vendor_infoframe_from_display_mode(&hv_frame, (struct drm_connector *)connector, mode_in);
4437                 timing_out->hdmi_vic = hv_frame.vic;
4438         }
4439
4440         timing_out->h_addressable = mode_in->crtc_hdisplay;
4441         timing_out->h_total = mode_in->crtc_htotal;
4442         timing_out->h_sync_width =
4443                 mode_in->crtc_hsync_end - mode_in->crtc_hsync_start;
4444         timing_out->h_front_porch =
4445                 mode_in->crtc_hsync_start - mode_in->crtc_hdisplay;
4446         timing_out->v_total = mode_in->crtc_vtotal;
4447         timing_out->v_addressable = mode_in->crtc_vdisplay;
4448         timing_out->v_front_porch =
4449                 mode_in->crtc_vsync_start - mode_in->crtc_vdisplay;
4450         timing_out->v_sync_width =
4451                 mode_in->crtc_vsync_end - mode_in->crtc_vsync_start;
4452         timing_out->pix_clk_100hz = mode_in->crtc_clock * 10;
4453         timing_out->aspect_ratio = get_aspect_ratio(mode_in);
4454
4455         stream->output_color_space = get_output_color_space(timing_out);
4456
4457         stream->out_transfer_func->type = TF_TYPE_PREDEFINED;
4458         stream->out_transfer_func->tf = TRANSFER_FUNCTION_SRGB;
4459         if (stream->signal == SIGNAL_TYPE_HDMI_TYPE_A) {
4460                 if (!adjust_colour_depth_from_display_info(timing_out, info) &&
4461                     drm_mode_is_420_also(info, mode_in) &&
4462                     timing_out->pixel_encoding != PIXEL_ENCODING_YCBCR420) {
4463                         timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR420;
4464                         adjust_colour_depth_from_display_info(timing_out, info);
4465                 }
4466         }
4467 }
4468
4469 static void fill_audio_info(struct audio_info *audio_info,
4470                             const struct drm_connector *drm_connector,
4471                             const struct dc_sink *dc_sink)
4472 {
4473         int i = 0;
4474         int cea_revision = 0;
4475         const struct dc_edid_caps *edid_caps = &dc_sink->edid_caps;
4476
4477         audio_info->manufacture_id = edid_caps->manufacturer_id;
4478         audio_info->product_id = edid_caps->product_id;
4479
4480         cea_revision = drm_connector->display_info.cea_rev;
4481
4482         strscpy(audio_info->display_name,
4483                 edid_caps->display_name,
4484                 AUDIO_INFO_DISPLAY_NAME_SIZE_IN_CHARS);
4485
4486         if (cea_revision >= 3) {
4487                 audio_info->mode_count = edid_caps->audio_mode_count;
4488
4489                 for (i = 0; i < audio_info->mode_count; ++i) {
4490                         audio_info->modes[i].format_code =
4491                                         (enum audio_format_code)
4492                                         (edid_caps->audio_modes[i].format_code);
4493                         audio_info->modes[i].channel_count =
4494                                         edid_caps->audio_modes[i].channel_count;
4495                         audio_info->modes[i].sample_rates.all =
4496                                         edid_caps->audio_modes[i].sample_rate;
4497                         audio_info->modes[i].sample_size =
4498                                         edid_caps->audio_modes[i].sample_size;
4499                 }
4500         }
4501
4502         audio_info->flags.all = edid_caps->speaker_flags;
4503
4504         /* TODO: We only check for the progressive mode, check for interlace mode too */
4505         if (drm_connector->latency_present[0]) {
4506                 audio_info->video_latency = drm_connector->video_latency[0];
4507                 audio_info->audio_latency = drm_connector->audio_latency[0];
4508         }
4509
4510         /* TODO: For DP, video and audio latency should be calculated from DPCD caps */
4511
4512 }
4513
4514 static void
4515 copy_crtc_timing_for_drm_display_mode(const struct drm_display_mode *src_mode,
4516                                       struct drm_display_mode *dst_mode)
4517 {
4518         dst_mode->crtc_hdisplay = src_mode->crtc_hdisplay;
4519         dst_mode->crtc_vdisplay = src_mode->crtc_vdisplay;
4520         dst_mode->crtc_clock = src_mode->crtc_clock;
4521         dst_mode->crtc_hblank_start = src_mode->crtc_hblank_start;
4522         dst_mode->crtc_hblank_end = src_mode->crtc_hblank_end;
4523         dst_mode->crtc_hsync_start =  src_mode->crtc_hsync_start;
4524         dst_mode->crtc_hsync_end = src_mode->crtc_hsync_end;
4525         dst_mode->crtc_htotal = src_mode->crtc_htotal;
4526         dst_mode->crtc_hskew = src_mode->crtc_hskew;
4527         dst_mode->crtc_vblank_start = src_mode->crtc_vblank_start;
4528         dst_mode->crtc_vblank_end = src_mode->crtc_vblank_end;
4529         dst_mode->crtc_vsync_start = src_mode->crtc_vsync_start;
4530         dst_mode->crtc_vsync_end = src_mode->crtc_vsync_end;
4531         dst_mode->crtc_vtotal = src_mode->crtc_vtotal;
4532 }
4533
4534 static void
4535 decide_crtc_timing_for_drm_display_mode(struct drm_display_mode *drm_mode,
4536                                         const struct drm_display_mode *native_mode,
4537                                         bool scale_enabled)
4538 {
4539         if (scale_enabled) {
4540                 copy_crtc_timing_for_drm_display_mode(native_mode, drm_mode);
4541         } else if (native_mode->clock == drm_mode->clock &&
4542                         native_mode->htotal == drm_mode->htotal &&
4543                         native_mode->vtotal == drm_mode->vtotal) {
4544                 copy_crtc_timing_for_drm_display_mode(native_mode, drm_mode);
4545         } else {
4546                 /* no scaling nor amdgpu inserted, no need to patch */
4547         }
4548 }
4549
4550 static struct dc_sink *
4551 create_fake_sink(struct amdgpu_dm_connector *aconnector)
4552 {
4553         struct dc_sink_init_data sink_init_data = { 0 };
4554         struct dc_sink *sink = NULL;
4555         sink_init_data.link = aconnector->dc_link;
4556         sink_init_data.sink_signal = aconnector->dc_link->connector_signal;
4557
4558         sink = dc_sink_create(&sink_init_data);
4559         if (!sink) {
4560                 DRM_ERROR("Failed to create sink!\n");
4561                 return NULL;
4562         }
4563         sink->sink_signal = SIGNAL_TYPE_VIRTUAL;
4564
4565         return sink;
4566 }
4567
4568 static void set_multisync_trigger_params(
4569                 struct dc_stream_state *stream)
4570 {
4571         if (stream->triggered_crtc_reset.enabled) {
4572                 stream->triggered_crtc_reset.event = CRTC_EVENT_VSYNC_RISING;
4573                 stream->triggered_crtc_reset.delay = TRIGGER_DELAY_NEXT_LINE;
4574         }
4575 }
4576
4577 static void set_master_stream(struct dc_stream_state *stream_set[],
4578                               int stream_count)
4579 {
4580         int j, highest_rfr = 0, master_stream = 0;
4581
4582         for (j = 0;  j < stream_count; j++) {
4583                 if (stream_set[j] && stream_set[j]->triggered_crtc_reset.enabled) {
4584                         int refresh_rate = 0;
4585
4586                         refresh_rate = (stream_set[j]->timing.pix_clk_100hz*100)/
4587                                 (stream_set[j]->timing.h_total*stream_set[j]->timing.v_total);
4588                         if (refresh_rate > highest_rfr) {
4589                                 highest_rfr = refresh_rate;
4590                                 master_stream = j;
4591                         }
4592                 }
4593         }
4594         for (j = 0;  j < stream_count; j++) {
4595                 if (stream_set[j])
4596                         stream_set[j]->triggered_crtc_reset.event_source = stream_set[master_stream];
4597         }
4598 }
4599
4600 static void dm_enable_per_frame_crtc_master_sync(struct dc_state *context)
4601 {
4602         int i = 0;
4603
4604         if (context->stream_count < 2)
4605                 return;
4606         for (i = 0; i < context->stream_count ; i++) {
4607                 if (!context->streams[i])
4608                         continue;
4609                 /*
4610                  * TODO: add a function to read AMD VSDB bits and set
4611                  * crtc_sync_master.multi_sync_enabled flag
4612                  * For now it's set to false
4613                  */
4614                 set_multisync_trigger_params(context->streams[i]);
4615         }
4616         set_master_stream(context->streams, context->stream_count);
4617 }
4618
4619 static struct dc_stream_state *
4620 create_stream_for_sink(struct amdgpu_dm_connector *aconnector,
4621                        const struct drm_display_mode *drm_mode,
4622                        const struct dm_connector_state *dm_state,
4623                        const struct dc_stream_state *old_stream,
4624                        int requested_bpc)
4625 {
4626         struct drm_display_mode *preferred_mode = NULL;
4627         struct drm_connector *drm_connector;
4628         const struct drm_connector_state *con_state =
4629                 dm_state ? &dm_state->base : NULL;
4630         struct dc_stream_state *stream = NULL;
4631         struct drm_display_mode mode = *drm_mode;
4632         bool native_mode_found = false;
4633         bool scale = dm_state ? (dm_state->scaling != RMX_OFF) : false;
4634         int mode_refresh;
4635         int preferred_refresh = 0;
4636 #if defined(CONFIG_DRM_AMD_DC_DCN)
4637         struct dsc_dec_dpcd_caps dsc_caps;
4638 #endif
4639         uint32_t link_bandwidth_kbps;
4640
4641         struct dc_sink *sink = NULL;
4642         if (aconnector == NULL) {
4643                 DRM_ERROR("aconnector is NULL!\n");
4644                 return stream;
4645         }
4646
4647         drm_connector = &aconnector->base;
4648
4649         if (!aconnector->dc_sink) {
4650                 sink = create_fake_sink(aconnector);
4651                 if (!sink)
4652                         return stream;
4653         } else {
4654                 sink = aconnector->dc_sink;
4655                 dc_sink_retain(sink);
4656         }
4657
4658         stream = dc_create_stream_for_sink(sink);
4659
4660         if (stream == NULL) {
4661                 DRM_ERROR("Failed to create stream for sink!\n");
4662                 goto finish;
4663         }
4664
4665         stream->dm_stream_context = aconnector;
4666
4667         stream->timing.flags.LTE_340MCSC_SCRAMBLE =
4668                 drm_connector->display_info.hdmi.scdc.scrambling.low_rates;
4669
4670         list_for_each_entry(preferred_mode, &aconnector->base.modes, head) {
4671                 /* Search for preferred mode */
4672                 if (preferred_mode->type & DRM_MODE_TYPE_PREFERRED) {
4673                         native_mode_found = true;
4674                         break;
4675                 }
4676         }
4677         if (!native_mode_found)
4678                 preferred_mode = list_first_entry_or_null(
4679                                 &aconnector->base.modes,
4680                                 struct drm_display_mode,
4681                                 head);
4682
4683         mode_refresh = drm_mode_vrefresh(&mode);
4684
4685         if (preferred_mode == NULL) {
4686                 /*
4687                  * This may not be an error, the use case is when we have no
4688                  * usermode calls to reset and set mode upon hotplug. In this
4689                  * case, we call set mode ourselves to restore the previous mode
4690                  * and the modelist may not be filled in in time.
4691                  */
4692                 DRM_DEBUG_DRIVER("No preferred mode found\n");
4693         } else {
4694                 decide_crtc_timing_for_drm_display_mode(
4695                                 &mode, preferred_mode,
4696                                 dm_state ? (dm_state->scaling != RMX_OFF) : false);
4697                 preferred_refresh = drm_mode_vrefresh(preferred_mode);
4698         }
4699
4700         if (!dm_state)
4701                 drm_mode_set_crtcinfo(&mode, 0);
4702
4703         /*
4704         * If scaling is enabled and refresh rate didn't change
4705         * we copy the vic and polarities of the old timings
4706         */
4707         if (!scale || mode_refresh != preferred_refresh)
4708                 fill_stream_properties_from_drm_display_mode(stream,
4709                         &mode, &aconnector->base, con_state, NULL, requested_bpc);
4710         else
4711                 fill_stream_properties_from_drm_display_mode(stream,
4712                         &mode, &aconnector->base, con_state, old_stream, requested_bpc);
4713
4714         stream->timing.flags.DSC = 0;
4715
4716         if (aconnector->dc_link && sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT) {
4717 #if defined(CONFIG_DRM_AMD_DC_DCN)
4718                 dc_dsc_parse_dsc_dpcd(aconnector->dc_link->ctx->dc,
4719                                       aconnector->dc_link->dpcd_caps.dsc_caps.dsc_basic_caps.raw,
4720                                       aconnector->dc_link->dpcd_caps.dsc_caps.dsc_branch_decoder_caps.raw,
4721                                       &dsc_caps);
4722 #endif
4723                 link_bandwidth_kbps = dc_link_bandwidth_kbps(aconnector->dc_link,
4724                                                              dc_link_get_link_cap(aconnector->dc_link));
4725
4726 #if defined(CONFIG_DRM_AMD_DC_DCN)
4727                 if (aconnector->dsc_settings.dsc_force_enable != DSC_CLK_FORCE_DISABLE && dsc_caps.is_dsc_supported) {
4728                         /* Set DSC policy according to dsc_clock_en */
4729                         dc_dsc_policy_set_enable_dsc_when_not_needed(
4730                                 aconnector->dsc_settings.dsc_force_enable == DSC_CLK_FORCE_ENABLE);
4731
4732                         if (dc_dsc_compute_config(aconnector->dc_link->ctx->dc->res_pool->dscs[0],
4733                                                   &dsc_caps,
4734                                                   aconnector->dc_link->ctx->dc->debug.dsc_min_slice_height_override,
4735                                                   link_bandwidth_kbps,
4736                                                   &stream->timing,
4737                                                   &stream->timing.dsc_cfg))
4738                                 stream->timing.flags.DSC = 1;
4739                         /* Overwrite the stream flag if DSC is enabled through debugfs */
4740                         if (aconnector->dsc_settings.dsc_force_enable == DSC_CLK_FORCE_ENABLE)
4741                                 stream->timing.flags.DSC = 1;
4742
4743                         if (stream->timing.flags.DSC && aconnector->dsc_settings.dsc_num_slices_h)
4744                                 stream->timing.dsc_cfg.num_slices_h = aconnector->dsc_settings.dsc_num_slices_h;
4745
4746                         if (stream->timing.flags.DSC && aconnector->dsc_settings.dsc_num_slices_v)
4747                                 stream->timing.dsc_cfg.num_slices_v = aconnector->dsc_settings.dsc_num_slices_v;
4748
4749                         if (stream->timing.flags.DSC && aconnector->dsc_settings.dsc_bits_per_pixel)
4750                                 stream->timing.dsc_cfg.bits_per_pixel = aconnector->dsc_settings.dsc_bits_per_pixel;
4751                 }
4752 #endif
4753         }
4754
4755         update_stream_scaling_settings(&mode, dm_state, stream);
4756
4757         fill_audio_info(
4758                 &stream->audio_info,
4759                 drm_connector,
4760                 sink);
4761
4762         update_stream_signal(stream, sink);
4763
4764         if (stream->signal == SIGNAL_TYPE_HDMI_TYPE_A)
4765                 mod_build_hf_vsif_infopacket(stream, &stream->vsp_infopacket);
4766
4767         if (stream->link->psr_settings.psr_feature_enabled) {
4768                 //
4769                 // should decide stream support vsc sdp colorimetry capability
4770                 // before building vsc info packet
4771                 //
4772                 stream->use_vsc_sdp_for_colorimetry = false;
4773                 if (aconnector->dc_sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT_MST) {
4774                         stream->use_vsc_sdp_for_colorimetry =
4775                                 aconnector->dc_sink->is_vsc_sdp_colorimetry_supported;
4776                 } else {
4777                         if (stream->link->dpcd_caps.dprx_feature.bits.VSC_SDP_COLORIMETRY_SUPPORTED)
4778                                 stream->use_vsc_sdp_for_colorimetry = true;
4779                 }
4780                 mod_build_vsc_infopacket(stream, &stream->vsc_infopacket);
4781         }
4782 finish:
4783         dc_sink_release(sink);
4784
4785         return stream;
4786 }
4787
4788 static void amdgpu_dm_crtc_destroy(struct drm_crtc *crtc)
4789 {
4790         drm_crtc_cleanup(crtc);
4791         kfree(crtc);
4792 }
4793
4794 static void dm_crtc_destroy_state(struct drm_crtc *crtc,
4795                                   struct drm_crtc_state *state)
4796 {
4797         struct dm_crtc_state *cur = to_dm_crtc_state(state);
4798
4799         /* TODO Destroy dc_stream objects are stream object is flattened */
4800         if (cur->stream)
4801                 dc_stream_release(cur->stream);
4802
4803
4804         __drm_atomic_helper_crtc_destroy_state(state);
4805
4806
4807         kfree(state);
4808 }
4809
4810 static void dm_crtc_reset_state(struct drm_crtc *crtc)
4811 {
4812         struct dm_crtc_state *state;
4813
4814         if (crtc->state)
4815                 dm_crtc_destroy_state(crtc, crtc->state);
4816
4817         state = kzalloc(sizeof(*state), GFP_KERNEL);
4818         if (WARN_ON(!state))
4819                 return;
4820
4821         __drm_atomic_helper_crtc_reset(crtc, &state->base);
4822 }
4823
4824 static struct drm_crtc_state *
4825 dm_crtc_duplicate_state(struct drm_crtc *crtc)
4826 {
4827         struct dm_crtc_state *state, *cur;
4828
4829         cur = to_dm_crtc_state(crtc->state);
4830
4831         if (WARN_ON(!crtc->state))
4832                 return NULL;
4833
4834         state = kzalloc(sizeof(*state), GFP_KERNEL);
4835         if (!state)
4836                 return NULL;
4837
4838         __drm_atomic_helper_crtc_duplicate_state(crtc, &state->base);
4839
4840         if (cur->stream) {
4841                 state->stream = cur->stream;
4842                 dc_stream_retain(state->stream);
4843         }
4844
4845         state->active_planes = cur->active_planes;
4846         state->vrr_infopacket = cur->vrr_infopacket;
4847         state->abm_level = cur->abm_level;
4848         state->vrr_supported = cur->vrr_supported;
4849         state->freesync_config = cur->freesync_config;
4850         state->crc_src = cur->crc_src;
4851         state->cm_has_degamma = cur->cm_has_degamma;
4852         state->cm_is_degamma_srgb = cur->cm_is_degamma_srgb;
4853
4854         /* TODO Duplicate dc_stream after objects are stream object is flattened */
4855
4856         return &state->base;
4857 }
4858
4859 static inline int dm_set_vupdate_irq(struct drm_crtc *crtc, bool enable)
4860 {
4861         enum dc_irq_source irq_source;
4862         struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
4863         struct amdgpu_device *adev = drm_to_adev(crtc->dev);
4864         int rc;
4865
4866         irq_source = IRQ_TYPE_VUPDATE + acrtc->otg_inst;
4867
4868         rc = dc_interrupt_set(adev->dm.dc, irq_source, enable) ? 0 : -EBUSY;
4869
4870         DRM_DEBUG_DRIVER("crtc %d - vupdate irq %sabling: r=%d\n",
4871                          acrtc->crtc_id, enable ? "en" : "dis", rc);
4872         return rc;
4873 }
4874
4875 static inline int dm_set_vblank(struct drm_crtc *crtc, bool enable)
4876 {
4877         enum dc_irq_source irq_source;
4878         struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
4879         struct amdgpu_device *adev = drm_to_adev(crtc->dev);
4880         struct dm_crtc_state *acrtc_state = to_dm_crtc_state(crtc->state);
4881         int rc = 0;
4882
4883         if (enable) {
4884                 /* vblank irq on -> Only need vupdate irq in vrr mode */
4885                 if (amdgpu_dm_vrr_active(acrtc_state))
4886                         rc = dm_set_vupdate_irq(crtc, true);
4887         } else {
4888                 /* vblank irq off -> vupdate irq off */
4889                 rc = dm_set_vupdate_irq(crtc, false);
4890         }
4891
4892         if (rc)
4893                 return rc;
4894
4895         irq_source = IRQ_TYPE_VBLANK + acrtc->otg_inst;
4896         return dc_interrupt_set(adev->dm.dc, irq_source, enable) ? 0 : -EBUSY;
4897 }
4898
4899 static int dm_enable_vblank(struct drm_crtc *crtc)
4900 {
4901         return dm_set_vblank(crtc, true);
4902 }
4903
4904 static void dm_disable_vblank(struct drm_crtc *crtc)
4905 {
4906         dm_set_vblank(crtc, false);
4907 }
4908
4909 /* Implemented only the options currently availible for the driver */
4910 static const struct drm_crtc_funcs amdgpu_dm_crtc_funcs = {
4911         .reset = dm_crtc_reset_state,
4912         .destroy = amdgpu_dm_crtc_destroy,
4913         .gamma_set = drm_atomic_helper_legacy_gamma_set,
4914         .set_config = drm_atomic_helper_set_config,
4915         .page_flip = drm_atomic_helper_page_flip,
4916         .atomic_duplicate_state = dm_crtc_duplicate_state,
4917         .atomic_destroy_state = dm_crtc_destroy_state,
4918         .set_crc_source = amdgpu_dm_crtc_set_crc_source,
4919         .verify_crc_source = amdgpu_dm_crtc_verify_crc_source,
4920         .get_crc_sources = amdgpu_dm_crtc_get_crc_sources,
4921         .get_vblank_counter = amdgpu_get_vblank_counter_kms,
4922         .enable_vblank = dm_enable_vblank,
4923         .disable_vblank = dm_disable_vblank,
4924         .get_vblank_timestamp = drm_crtc_vblank_helper_get_vblank_timestamp,
4925 };
4926
4927 static enum drm_connector_status
4928 amdgpu_dm_connector_detect(struct drm_connector *connector, bool force)
4929 {
4930         bool connected;
4931         struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
4932
4933         /*
4934          * Notes:
4935          * 1. This interface is NOT called in context of HPD irq.
4936          * 2. This interface *is called* in context of user-mode ioctl. Which
4937          * makes it a bad place for *any* MST-related activity.
4938          */
4939
4940         if (aconnector->base.force == DRM_FORCE_UNSPECIFIED &&
4941             !aconnector->fake_enable)
4942                 connected = (aconnector->dc_sink != NULL);
4943         else
4944                 connected = (aconnector->base.force == DRM_FORCE_ON);
4945
4946         update_subconnector_property(aconnector);
4947
4948         return (connected ? connector_status_connected :
4949                         connector_status_disconnected);
4950 }
4951
4952 int amdgpu_dm_connector_atomic_set_property(struct drm_connector *connector,
4953                                             struct drm_connector_state *connector_state,
4954                                             struct drm_property *property,
4955                                             uint64_t val)
4956 {
4957         struct drm_device *dev = connector->dev;
4958         struct amdgpu_device *adev = drm_to_adev(dev);
4959         struct dm_connector_state *dm_old_state =
4960                 to_dm_connector_state(connector->state);
4961         struct dm_connector_state *dm_new_state =
4962                 to_dm_connector_state(connector_state);
4963
4964         int ret = -EINVAL;
4965
4966         if (property == dev->mode_config.scaling_mode_property) {
4967                 enum amdgpu_rmx_type rmx_type;
4968
4969                 switch (val) {
4970                 case DRM_MODE_SCALE_CENTER:
4971                         rmx_type = RMX_CENTER;
4972                         break;
4973                 case DRM_MODE_SCALE_ASPECT:
4974                         rmx_type = RMX_ASPECT;
4975                         break;
4976                 case DRM_MODE_SCALE_FULLSCREEN:
4977                         rmx_type = RMX_FULL;
4978                         break;
4979                 case DRM_MODE_SCALE_NONE:
4980                 default:
4981                         rmx_type = RMX_OFF;
4982                         break;
4983                 }
4984
4985                 if (dm_old_state->scaling == rmx_type)
4986                         return 0;
4987
4988                 dm_new_state->scaling = rmx_type;
4989                 ret = 0;
4990         } else if (property == adev->mode_info.underscan_hborder_property) {
4991                 dm_new_state->underscan_hborder = val;
4992                 ret = 0;
4993         } else if (property == adev->mode_info.underscan_vborder_property) {
4994                 dm_new_state->underscan_vborder = val;
4995                 ret = 0;
4996         } else if (property == adev->mode_info.underscan_property) {
4997                 dm_new_state->underscan_enable = val;
4998                 ret = 0;
4999         } else if (property == adev->mode_info.abm_level_property) {
5000                 dm_new_state->abm_level = val;
5001                 ret = 0;
5002         }
5003
5004         return ret;
5005 }
5006
5007 int amdgpu_dm_connector_atomic_get_property(struct drm_connector *connector,
5008                                             const struct drm_connector_state *state,
5009                                             struct drm_property *property,
5010                                             uint64_t *val)
5011 {
5012         struct drm_device *dev = connector->dev;
5013         struct amdgpu_device *adev = drm_to_adev(dev);
5014         struct dm_connector_state *dm_state =
5015                 to_dm_connector_state(state);
5016         int ret = -EINVAL;
5017
5018         if (property == dev->mode_config.scaling_mode_property) {
5019                 switch (dm_state->scaling) {
5020                 case RMX_CENTER:
5021                         *val = DRM_MODE_SCALE_CENTER;
5022                         break;
5023                 case RMX_ASPECT:
5024                         *val = DRM_MODE_SCALE_ASPECT;
5025                         break;
5026                 case RMX_FULL:
5027                         *val = DRM_MODE_SCALE_FULLSCREEN;
5028                         break;
5029                 case RMX_OFF:
5030                 default:
5031                         *val = DRM_MODE_SCALE_NONE;
5032                         break;
5033                 }
5034                 ret = 0;
5035         } else if (property == adev->mode_info.underscan_hborder_property) {
5036                 *val = dm_state->underscan_hborder;
5037                 ret = 0;
5038         } else if (property == adev->mode_info.underscan_vborder_property) {
5039                 *val = dm_state->underscan_vborder;
5040                 ret = 0;
5041         } else if (property == adev->mode_info.underscan_property) {
5042                 *val = dm_state->underscan_enable;
5043                 ret = 0;
5044         } else if (property == adev->mode_info.abm_level_property) {
5045                 *val = dm_state->abm_level;
5046                 ret = 0;
5047         }
5048
5049         return ret;
5050 }
5051
5052 static void amdgpu_dm_connector_unregister(struct drm_connector *connector)
5053 {
5054         struct amdgpu_dm_connector *amdgpu_dm_connector = to_amdgpu_dm_connector(connector);
5055
5056         drm_dp_aux_unregister(&amdgpu_dm_connector->dm_dp_aux.aux);
5057 }
5058
5059 static void amdgpu_dm_connector_destroy(struct drm_connector *connector)
5060 {
5061         struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
5062         const struct dc_link *link = aconnector->dc_link;
5063         struct amdgpu_device *adev = drm_to_adev(connector->dev);
5064         struct amdgpu_display_manager *dm = &adev->dm;
5065
5066         drm_atomic_private_obj_fini(&aconnector->mst_mgr.base);
5067 #if defined(CONFIG_BACKLIGHT_CLASS_DEVICE) ||\
5068         defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE)
5069
5070         if ((link->connector_signal & (SIGNAL_TYPE_EDP | SIGNAL_TYPE_LVDS)) &&
5071             link->type != dc_connection_none &&
5072             dm->backlight_dev) {
5073                 backlight_device_unregister(dm->backlight_dev);
5074                 dm->backlight_dev = NULL;
5075         }
5076 #endif
5077
5078         if (aconnector->dc_em_sink)
5079                 dc_sink_release(aconnector->dc_em_sink);
5080         aconnector->dc_em_sink = NULL;
5081         if (aconnector->dc_sink)
5082                 dc_sink_release(aconnector->dc_sink);
5083         aconnector->dc_sink = NULL;
5084
5085         drm_dp_cec_unregister_connector(&aconnector->dm_dp_aux.aux);
5086         drm_connector_unregister(connector);
5087         drm_connector_cleanup(connector);
5088         if (aconnector->i2c) {
5089                 i2c_del_adapter(&aconnector->i2c->base);
5090                 kfree(aconnector->i2c);
5091         }
5092         kfree(aconnector->dm_dp_aux.aux.name);
5093
5094         kfree(connector);
5095 }
5096
5097 void amdgpu_dm_connector_funcs_reset(struct drm_connector *connector)
5098 {
5099         struct dm_connector_state *state =
5100                 to_dm_connector_state(connector->state);
5101
5102         if (connector->state)
5103                 __drm_atomic_helper_connector_destroy_state(connector->state);
5104
5105         kfree(state);
5106
5107         state = kzalloc(sizeof(*state), GFP_KERNEL);
5108
5109         if (state) {
5110                 state->scaling = RMX_OFF;
5111                 state->underscan_enable = false;
5112                 state->underscan_hborder = 0;
5113                 state->underscan_vborder = 0;
5114                 state->base.max_requested_bpc = 8;
5115                 state->vcpi_slots = 0;
5116                 state->pbn = 0;
5117                 if (connector->connector_type == DRM_MODE_CONNECTOR_eDP)
5118                         state->abm_level = amdgpu_dm_abm_level;
5119
5120                 __drm_atomic_helper_connector_reset(connector, &state->base);
5121         }
5122 }
5123
5124 struct drm_connector_state *
5125 amdgpu_dm_connector_atomic_duplicate_state(struct drm_connector *connector)
5126 {
5127         struct dm_connector_state *state =
5128                 to_dm_connector_state(connector->state);
5129
5130         struct dm_connector_state *new_state =
5131                         kmemdup(state, sizeof(*state), GFP_KERNEL);
5132
5133         if (!new_state)
5134                 return NULL;
5135
5136         __drm_atomic_helper_connector_duplicate_state(connector, &new_state->base);
5137
5138         new_state->freesync_capable = state->freesync_capable;
5139         new_state->abm_level = state->abm_level;
5140         new_state->scaling = state->scaling;
5141         new_state->underscan_enable = state->underscan_enable;
5142         new_state->underscan_hborder = state->underscan_hborder;
5143         new_state->underscan_vborder = state->underscan_vborder;
5144         new_state->vcpi_slots = state->vcpi_slots;
5145         new_state->pbn = state->pbn;
5146         return &new_state->base;
5147 }
5148
5149 static int
5150 amdgpu_dm_connector_late_register(struct drm_connector *connector)
5151 {
5152         struct amdgpu_dm_connector *amdgpu_dm_connector =
5153                 to_amdgpu_dm_connector(connector);
5154         int r;
5155
5156         if ((connector->connector_type == DRM_MODE_CONNECTOR_DisplayPort) ||
5157             (connector->connector_type == DRM_MODE_CONNECTOR_eDP)) {
5158                 amdgpu_dm_connector->dm_dp_aux.aux.dev = connector->kdev;
5159                 r = drm_dp_aux_register(&amdgpu_dm_connector->dm_dp_aux.aux);
5160                 if (r)
5161                         return r;
5162         }
5163
5164 #if defined(CONFIG_DEBUG_FS)
5165         connector_debugfs_init(amdgpu_dm_connector);
5166 #endif
5167
5168         return 0;
5169 }
5170
5171 static const struct drm_connector_funcs amdgpu_dm_connector_funcs = {
5172         .reset = amdgpu_dm_connector_funcs_reset,
5173         .detect = amdgpu_dm_connector_detect,
5174         .fill_modes = drm_helper_probe_single_connector_modes,
5175         .destroy = amdgpu_dm_connector_destroy,
5176         .atomic_duplicate_state = amdgpu_dm_connector_atomic_duplicate_state,
5177         .atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
5178         .atomic_set_property = amdgpu_dm_connector_atomic_set_property,
5179         .atomic_get_property = amdgpu_dm_connector_atomic_get_property,
5180         .late_register = amdgpu_dm_connector_late_register,
5181         .early_unregister = amdgpu_dm_connector_unregister
5182 };
5183
5184 static int get_modes(struct drm_connector *connector)
5185 {
5186         return amdgpu_dm_connector_get_modes(connector);
5187 }
5188
5189 static void create_eml_sink(struct amdgpu_dm_connector *aconnector)
5190 {
5191         struct dc_sink_init_data init_params = {
5192                         .link = aconnector->dc_link,
5193                         .sink_signal = SIGNAL_TYPE_VIRTUAL
5194         };
5195         struct edid *edid;
5196
5197         if (!aconnector->base.edid_blob_ptr) {
5198                 DRM_ERROR("No EDID firmware found on connector: %s ,forcing to OFF!\n",
5199                                 aconnector->base.name);
5200
5201                 aconnector->base.force = DRM_FORCE_OFF;
5202                 aconnector->base.override_edid = false;
5203                 return;
5204         }
5205
5206         edid = (struct edid *) aconnector->base.edid_blob_ptr->data;
5207
5208         aconnector->edid = edid;
5209
5210         aconnector->dc_em_sink = dc_link_add_remote_sink(
5211                 aconnector->dc_link,
5212                 (uint8_t *)edid,
5213                 (edid->extensions + 1) * EDID_LENGTH,
5214                 &init_params);
5215
5216         if (aconnector->base.force == DRM_FORCE_ON) {
5217                 aconnector->dc_sink = aconnector->dc_link->local_sink ?
5218                 aconnector->dc_link->local_sink :
5219                 aconnector->dc_em_sink;
5220                 dc_sink_retain(aconnector->dc_sink);
5221         }
5222 }
5223
5224 static void handle_edid_mgmt(struct amdgpu_dm_connector *aconnector)
5225 {
5226         struct dc_link *link = (struct dc_link *)aconnector->dc_link;
5227
5228         /*
5229          * In case of headless boot with force on for DP managed connector
5230          * Those settings have to be != 0 to get initial modeset
5231          */
5232         if (link->connector_signal == SIGNAL_TYPE_DISPLAY_PORT) {
5233                 link->verified_link_cap.lane_count = LANE_COUNT_FOUR;
5234                 link->verified_link_cap.link_rate = LINK_RATE_HIGH2;
5235         }
5236
5237
5238         aconnector->base.override_edid = true;
5239         create_eml_sink(aconnector);
5240 }
5241
5242 static struct dc_stream_state *
5243 create_validate_stream_for_sink(struct amdgpu_dm_connector *aconnector,
5244                                 const struct drm_display_mode *drm_mode,
5245                                 const struct dm_connector_state *dm_state,
5246                                 const struct dc_stream_state *old_stream)
5247 {
5248         struct drm_connector *connector = &aconnector->base;
5249         struct amdgpu_device *adev = drm_to_adev(connector->dev);
5250         struct dc_stream_state *stream;
5251         const struct drm_connector_state *drm_state = dm_state ? &dm_state->base : NULL;
5252         int requested_bpc = drm_state ? drm_state->max_requested_bpc : 8;
5253         enum dc_status dc_result = DC_OK;
5254
5255         do {
5256                 stream = create_stream_for_sink(aconnector, drm_mode,
5257                                                 dm_state, old_stream,
5258                                                 requested_bpc);
5259                 if (stream == NULL) {
5260                         DRM_ERROR("Failed to create stream for sink!\n");
5261                         break;
5262                 }
5263
5264                 dc_result = dc_validate_stream(adev->dm.dc, stream);
5265
5266                 if (dc_result != DC_OK) {
5267                         DRM_DEBUG_KMS("Mode %dx%d (clk %d) failed DC validation with error %d (%s)\n",
5268                                       drm_mode->hdisplay,
5269                                       drm_mode->vdisplay,
5270                                       drm_mode->clock,
5271                                       dc_result,
5272                                       dc_status_to_str(dc_result));
5273
5274                         dc_stream_release(stream);
5275                         stream = NULL;
5276                         requested_bpc -= 2; /* lower bpc to retry validation */
5277                 }
5278
5279         } while (stream == NULL && requested_bpc >= 6);
5280
5281         return stream;
5282 }
5283
5284 enum drm_mode_status amdgpu_dm_connector_mode_valid(struct drm_connector *connector,
5285                                    struct drm_display_mode *mode)
5286 {
5287         int result = MODE_ERROR;
5288         struct dc_sink *dc_sink;
5289         /* TODO: Unhardcode stream count */
5290         struct dc_stream_state *stream;
5291         struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
5292
5293         if ((mode->flags & DRM_MODE_FLAG_INTERLACE) ||
5294                         (mode->flags & DRM_MODE_FLAG_DBLSCAN))
5295                 return result;
5296
5297         /*
5298          * Only run this the first time mode_valid is called to initilialize
5299          * EDID mgmt
5300          */
5301         if (aconnector->base.force != DRM_FORCE_UNSPECIFIED &&
5302                 !aconnector->dc_em_sink)
5303                 handle_edid_mgmt(aconnector);
5304
5305         dc_sink = to_amdgpu_dm_connector(connector)->dc_sink;
5306
5307         if (dc_sink == NULL) {
5308                 DRM_ERROR("dc_sink is NULL!\n");
5309                 goto fail;
5310         }
5311
5312         stream = create_validate_stream_for_sink(aconnector, mode, NULL, NULL);
5313         if (stream) {
5314                 dc_stream_release(stream);
5315                 result = MODE_OK;
5316         }
5317
5318 fail:
5319         /* TODO: error handling*/
5320         return result;
5321 }
5322
5323 static int fill_hdr_info_packet(const struct drm_connector_state *state,
5324                                 struct dc_info_packet *out)
5325 {
5326         struct hdmi_drm_infoframe frame;
5327         unsigned char buf[30]; /* 26 + 4 */
5328         ssize_t len;
5329         int ret, i;
5330
5331         memset(out, 0, sizeof(*out));
5332
5333         if (!state->hdr_output_metadata)
5334                 return 0;
5335
5336         ret = drm_hdmi_infoframe_set_hdr_metadata(&frame, state);
5337         if (ret)
5338                 return ret;
5339
5340         len = hdmi_drm_infoframe_pack_only(&frame, buf, sizeof(buf));
5341         if (len < 0)
5342                 return (int)len;
5343
5344         /* Static metadata is a fixed 26 bytes + 4 byte header. */
5345         if (len != 30)
5346                 return -EINVAL;
5347
5348         /* Prepare the infopacket for DC. */
5349         switch (state->connector->connector_type) {
5350         case DRM_MODE_CONNECTOR_HDMIA:
5351                 out->hb0 = 0x87; /* type */
5352                 out->hb1 = 0x01; /* version */
5353                 out->hb2 = 0x1A; /* length */
5354                 out->sb[0] = buf[3]; /* checksum */
5355                 i = 1;
5356                 break;
5357
5358         case DRM_MODE_CONNECTOR_DisplayPort:
5359         case DRM_MODE_CONNECTOR_eDP:
5360                 out->hb0 = 0x00; /* sdp id, zero */
5361                 out->hb1 = 0x87; /* type */
5362                 out->hb2 = 0x1D; /* payload len - 1 */
5363                 out->hb3 = (0x13 << 2); /* sdp version */
5364                 out->sb[0] = 0x01; /* version */
5365                 out->sb[1] = 0x1A; /* length */
5366                 i = 2;
5367                 break;
5368
5369         default:
5370                 return -EINVAL;
5371         }
5372
5373         memcpy(&out->sb[i], &buf[4], 26);
5374         out->valid = true;
5375
5376         print_hex_dump(KERN_DEBUG, "HDR SB:", DUMP_PREFIX_NONE, 16, 1, out->sb,
5377                        sizeof(out->sb), false);
5378
5379         return 0;
5380 }
5381
5382 static bool
5383 is_hdr_metadata_different(const struct drm_connector_state *old_state,
5384                           const struct drm_connector_state *new_state)
5385 {
5386         struct drm_property_blob *old_blob = old_state->hdr_output_metadata;
5387         struct drm_property_blob *new_blob = new_state->hdr_output_metadata;
5388
5389         if (old_blob != new_blob) {
5390                 if (old_blob && new_blob &&
5391                     old_blob->length == new_blob->length)
5392                         return memcmp(old_blob->data, new_blob->data,
5393                                       old_blob->length);
5394
5395                 return true;
5396         }
5397
5398         return false;
5399 }
5400
5401 static int
5402 amdgpu_dm_connector_atomic_check(struct drm_connector *conn,
5403                                  struct drm_atomic_state *state)
5404 {
5405         struct drm_connector_state *new_con_state =
5406                 drm_atomic_get_new_connector_state(state, conn);
5407         struct drm_connector_state *old_con_state =
5408                 drm_atomic_get_old_connector_state(state, conn);
5409         struct drm_crtc *crtc = new_con_state->crtc;
5410         struct drm_crtc_state *new_crtc_state;
5411         int ret;
5412
5413         if (!crtc)
5414                 return 0;
5415
5416         if (is_hdr_metadata_different(old_con_state, new_con_state)) {
5417                 struct dc_info_packet hdr_infopacket;
5418
5419                 ret = fill_hdr_info_packet(new_con_state, &hdr_infopacket);
5420                 if (ret)
5421                         return ret;
5422
5423                 new_crtc_state = drm_atomic_get_crtc_state(state, crtc);
5424                 if (IS_ERR(new_crtc_state))
5425                         return PTR_ERR(new_crtc_state);
5426
5427                 /*
5428                  * DC considers the stream backends changed if the
5429                  * static metadata changes. Forcing the modeset also
5430                  * gives a simple way for userspace to switch from
5431                  * 8bpc to 10bpc when setting the metadata to enter
5432                  * or exit HDR.
5433                  *
5434                  * Changing the static metadata after it's been
5435                  * set is permissible, however. So only force a
5436                  * modeset if we're entering or exiting HDR.
5437                  */
5438                 new_crtc_state->mode_changed =
5439                         !old_con_state->hdr_output_metadata ||
5440                         !new_con_state->hdr_output_metadata;
5441         }
5442
5443         return 0;
5444 }
5445
5446 static const struct drm_connector_helper_funcs
5447 amdgpu_dm_connector_helper_funcs = {
5448         /*
5449          * If hotplugging a second bigger display in FB Con mode, bigger resolution
5450          * modes will be filtered by drm_mode_validate_size(), and those modes
5451          * are missing after user start lightdm. So we need to renew modes list.
5452          * in get_modes call back, not just return the modes count
5453          */
5454         .get_modes = get_modes,
5455         .mode_valid = amdgpu_dm_connector_mode_valid,
5456         .atomic_check = amdgpu_dm_connector_atomic_check,
5457 };
5458
5459 static void dm_crtc_helper_disable(struct drm_crtc *crtc)
5460 {
5461 }
5462
5463 static int count_crtc_active_planes(struct drm_crtc_state *new_crtc_state)
5464 {
5465         struct drm_atomic_state *state = new_crtc_state->state;
5466         struct drm_plane *plane;
5467         int num_active = 0;
5468
5469         drm_for_each_plane_mask(plane, state->dev, new_crtc_state->plane_mask) {
5470                 struct drm_plane_state *new_plane_state;
5471
5472                 /* Cursor planes are "fake". */
5473                 if (plane->type == DRM_PLANE_TYPE_CURSOR)
5474                         continue;
5475
5476                 new_plane_state = drm_atomic_get_new_plane_state(state, plane);
5477
5478                 if (!new_plane_state) {
5479                         /*
5480                          * The plane is enable on the CRTC and hasn't changed
5481                          * state. This means that it previously passed
5482                          * validation and is therefore enabled.
5483                          */
5484                         num_active += 1;
5485                         continue;
5486                 }
5487
5488                 /* We need a framebuffer to be considered enabled. */
5489                 num_active += (new_plane_state->fb != NULL);
5490         }
5491
5492         return num_active;
5493 }
5494
5495 static void dm_update_crtc_active_planes(struct drm_crtc *crtc,
5496                                          struct drm_crtc_state *new_crtc_state)
5497 {
5498         struct dm_crtc_state *dm_new_crtc_state =
5499                 to_dm_crtc_state(new_crtc_state);
5500
5501         dm_new_crtc_state->active_planes = 0;
5502
5503         if (!dm_new_crtc_state->stream)
5504                 return;
5505
5506         dm_new_crtc_state->active_planes =
5507                 count_crtc_active_planes(new_crtc_state);
5508 }
5509
5510 static int dm_crtc_helper_atomic_check(struct drm_crtc *crtc,
5511                                        struct drm_crtc_state *state)
5512 {
5513         struct amdgpu_device *adev = drm_to_adev(crtc->dev);
5514         struct dc *dc = adev->dm.dc;
5515         struct dm_crtc_state *dm_crtc_state = to_dm_crtc_state(state);
5516         int ret = -EINVAL;
5517
5518         dm_update_crtc_active_planes(crtc, state);
5519
5520         if (unlikely(!dm_crtc_state->stream &&
5521                      modeset_required(state, NULL, dm_crtc_state->stream))) {
5522                 WARN_ON(1);
5523                 return ret;
5524         }
5525
5526         /*
5527          * We require the primary plane to be enabled whenever the CRTC is, otherwise
5528          * drm_mode_cursor_universal may end up trying to enable the cursor plane while all other
5529          * planes are disabled, which is not supported by the hardware. And there is legacy
5530          * userspace which stops using the HW cursor altogether in response to the resulting EINVAL.
5531          */
5532         if (state->enable &&
5533             !(state->plane_mask & drm_plane_mask(crtc->primary)))
5534                 return -EINVAL;
5535
5536         /* In some use cases, like reset, no stream is attached */
5537         if (!dm_crtc_state->stream)
5538                 return 0;
5539
5540         if (dc_validate_stream(dc, dm_crtc_state->stream) == DC_OK)
5541                 return 0;
5542
5543         return ret;
5544 }
5545
5546 static bool dm_crtc_helper_mode_fixup(struct drm_crtc *crtc,
5547                                       const struct drm_display_mode *mode,
5548                                       struct drm_display_mode *adjusted_mode)
5549 {
5550         return true;
5551 }
5552
5553 static const struct drm_crtc_helper_funcs amdgpu_dm_crtc_helper_funcs = {
5554         .disable = dm_crtc_helper_disable,
5555         .atomic_check = dm_crtc_helper_atomic_check,
5556         .mode_fixup = dm_crtc_helper_mode_fixup,
5557         .get_scanout_position = amdgpu_crtc_get_scanout_position,
5558 };
5559
5560 static void dm_encoder_helper_disable(struct drm_encoder *encoder)
5561 {
5562
5563 }
5564
5565 static int convert_dc_color_depth_into_bpc (enum dc_color_depth display_color_depth)
5566 {
5567         switch (display_color_depth) {
5568                 case COLOR_DEPTH_666:
5569                         return 6;
5570                 case COLOR_DEPTH_888:
5571                         return 8;
5572                 case COLOR_DEPTH_101010:
5573                         return 10;
5574                 case COLOR_DEPTH_121212:
5575                         return 12;
5576                 case COLOR_DEPTH_141414:
5577                         return 14;
5578                 case COLOR_DEPTH_161616:
5579                         return 16;
5580                 default:
5581                         break;
5582                 }
5583         return 0;
5584 }
5585
5586 static int dm_encoder_helper_atomic_check(struct drm_encoder *encoder,
5587                                           struct drm_crtc_state *crtc_state,
5588                                           struct drm_connector_state *conn_state)
5589 {
5590         struct drm_atomic_state *state = crtc_state->state;
5591         struct drm_connector *connector = conn_state->connector;
5592         struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
5593         struct dm_connector_state *dm_new_connector_state = to_dm_connector_state(conn_state);
5594         const struct drm_display_mode *adjusted_mode = &crtc_state->adjusted_mode;
5595         struct drm_dp_mst_topology_mgr *mst_mgr;
5596         struct drm_dp_mst_port *mst_port;
5597         enum dc_color_depth color_depth;
5598         int clock, bpp = 0;
5599         bool is_y420 = false;
5600
5601         if (!aconnector->port || !aconnector->dc_sink)
5602                 return 0;
5603
5604         mst_port = aconnector->port;
5605         mst_mgr = &aconnector->mst_port->mst_mgr;
5606
5607         if (!crtc_state->connectors_changed && !crtc_state->mode_changed)
5608                 return 0;
5609
5610         if (!state->duplicated) {
5611                 int max_bpc = conn_state->max_requested_bpc;
5612                 is_y420 = drm_mode_is_420_also(&connector->display_info, adjusted_mode) &&
5613                                 aconnector->force_yuv420_output;
5614                 color_depth = convert_color_depth_from_display_info(connector,
5615                                                                     is_y420,
5616                                                                     max_bpc);
5617                 bpp = convert_dc_color_depth_into_bpc(color_depth) * 3;
5618                 clock = adjusted_mode->clock;
5619                 dm_new_connector_state->pbn = drm_dp_calc_pbn_mode(clock, bpp, false);
5620         }
5621         dm_new_connector_state->vcpi_slots = drm_dp_atomic_find_vcpi_slots(state,
5622                                                                            mst_mgr,
5623                                                                            mst_port,
5624                                                                            dm_new_connector_state->pbn,
5625                                                                            dm_mst_get_pbn_divider(aconnector->dc_link));
5626         if (dm_new_connector_state->vcpi_slots < 0) {
5627                 DRM_DEBUG_ATOMIC("failed finding vcpi slots: %d\n", (int)dm_new_connector_state->vcpi_slots);
5628                 return dm_new_connector_state->vcpi_slots;
5629         }
5630         return 0;
5631 }
5632
5633 const struct drm_encoder_helper_funcs amdgpu_dm_encoder_helper_funcs = {
5634         .disable = dm_encoder_helper_disable,
5635         .atomic_check = dm_encoder_helper_atomic_check
5636 };
5637
5638 #if defined(CONFIG_DRM_AMD_DC_DCN)
5639 static int dm_update_mst_vcpi_slots_for_dsc(struct drm_atomic_state *state,
5640                                             struct dc_state *dc_state)
5641 {
5642         struct dc_stream_state *stream = NULL;
5643         struct drm_connector *connector;
5644         struct drm_connector_state *new_con_state, *old_con_state;
5645         struct amdgpu_dm_connector *aconnector;
5646         struct dm_connector_state *dm_conn_state;
5647         int i, j, clock, bpp;
5648         int vcpi, pbn_div, pbn = 0;
5649
5650         for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
5651
5652                 aconnector = to_amdgpu_dm_connector(connector);
5653
5654                 if (!aconnector->port)
5655                         continue;
5656
5657                 if (!new_con_state || !new_con_state->crtc)
5658                         continue;
5659
5660                 dm_conn_state = to_dm_connector_state(new_con_state);
5661
5662                 for (j = 0; j < dc_state->stream_count; j++) {
5663                         stream = dc_state->streams[j];
5664                         if (!stream)
5665                                 continue;
5666
5667                         if ((struct amdgpu_dm_connector*)stream->dm_stream_context == aconnector)
5668                                 break;
5669
5670                         stream = NULL;
5671                 }
5672
5673                 if (!stream)
5674                         continue;
5675
5676                 if (stream->timing.flags.DSC != 1) {
5677                         drm_dp_mst_atomic_enable_dsc(state,
5678                                                      aconnector->port,
5679                                                      dm_conn_state->pbn,
5680                                                      0,
5681                                                      false);
5682                         continue;
5683                 }
5684
5685                 pbn_div = dm_mst_get_pbn_divider(stream->link);
5686                 bpp = stream->timing.dsc_cfg.bits_per_pixel;
5687                 clock = stream->timing.pix_clk_100hz / 10;
5688                 pbn = drm_dp_calc_pbn_mode(clock, bpp, true);
5689                 vcpi = drm_dp_mst_atomic_enable_dsc(state,
5690                                                     aconnector->port,
5691                                                     pbn, pbn_div,
5692                                                     true);
5693                 if (vcpi < 0)
5694                         return vcpi;
5695
5696                 dm_conn_state->pbn = pbn;
5697                 dm_conn_state->vcpi_slots = vcpi;
5698         }
5699         return 0;
5700 }
5701 #endif
5702
5703 static void dm_drm_plane_reset(struct drm_plane *plane)
5704 {
5705         struct dm_plane_state *amdgpu_state = NULL;
5706
5707         if (plane->state)
5708                 plane->funcs->atomic_destroy_state(plane, plane->state);
5709
5710         amdgpu_state = kzalloc(sizeof(*amdgpu_state), GFP_KERNEL);
5711         WARN_ON(amdgpu_state == NULL);
5712
5713         if (amdgpu_state)
5714                 __drm_atomic_helper_plane_reset(plane, &amdgpu_state->base);
5715 }
5716
5717 static struct drm_plane_state *
5718 dm_drm_plane_duplicate_state(struct drm_plane *plane)
5719 {
5720         struct dm_plane_state *dm_plane_state, *old_dm_plane_state;
5721
5722         old_dm_plane_state = to_dm_plane_state(plane->state);
5723         dm_plane_state = kzalloc(sizeof(*dm_plane_state), GFP_KERNEL);
5724         if (!dm_plane_state)
5725                 return NULL;
5726
5727         __drm_atomic_helper_plane_duplicate_state(plane, &dm_plane_state->base);
5728
5729         if (old_dm_plane_state->dc_state) {
5730                 dm_plane_state->dc_state = old_dm_plane_state->dc_state;
5731                 dc_plane_state_retain(dm_plane_state->dc_state);
5732         }
5733
5734         /* Framebuffer hasn't been updated yet, so retain old flags. */
5735         dm_plane_state->tiling_flags = old_dm_plane_state->tiling_flags;
5736         dm_plane_state->tmz_surface = old_dm_plane_state->tmz_surface;
5737
5738         return &dm_plane_state->base;
5739 }
5740
5741 static void dm_drm_plane_destroy_state(struct drm_plane *plane,
5742                                 struct drm_plane_state *state)
5743 {
5744         struct dm_plane_state *dm_plane_state = to_dm_plane_state(state);
5745
5746         if (dm_plane_state->dc_state)
5747                 dc_plane_state_release(dm_plane_state->dc_state);
5748
5749         drm_atomic_helper_plane_destroy_state(plane, state);
5750 }
5751
5752 static const struct drm_plane_funcs dm_plane_funcs = {
5753         .update_plane   = drm_atomic_helper_update_plane,
5754         .disable_plane  = drm_atomic_helper_disable_plane,
5755         .destroy        = drm_primary_helper_destroy,
5756         .reset = dm_drm_plane_reset,
5757         .atomic_duplicate_state = dm_drm_plane_duplicate_state,
5758         .atomic_destroy_state = dm_drm_plane_destroy_state,
5759 };
5760
5761 static int dm_plane_helper_prepare_fb(struct drm_plane *plane,
5762                                       struct drm_plane_state *new_state)
5763 {
5764         struct amdgpu_framebuffer *afb;
5765         struct drm_gem_object *obj;
5766         struct amdgpu_device *adev;
5767         struct amdgpu_bo *rbo;
5768         struct dm_plane_state *dm_plane_state_new, *dm_plane_state_old;
5769         struct list_head list;
5770         struct ttm_validate_buffer tv;
5771         struct ww_acquire_ctx ticket;
5772         uint32_t domain;
5773         int r;
5774
5775         if (!new_state->fb) {
5776                 DRM_DEBUG_DRIVER("No FB bound\n");
5777                 return 0;
5778         }
5779
5780         afb = to_amdgpu_framebuffer(new_state->fb);
5781         obj = new_state->fb->obj[0];
5782         rbo = gem_to_amdgpu_bo(obj);
5783         adev = amdgpu_ttm_adev(rbo->tbo.bdev);
5784         INIT_LIST_HEAD(&list);
5785
5786         tv.bo = &rbo->tbo;
5787         tv.num_shared = 1;
5788         list_add(&tv.head, &list);
5789
5790         r = ttm_eu_reserve_buffers(&ticket, &list, false, NULL);
5791         if (r) {
5792                 dev_err(adev->dev, "fail to reserve bo (%d)\n", r);
5793                 return r;
5794         }
5795
5796         if (plane->type != DRM_PLANE_TYPE_CURSOR)
5797                 domain = amdgpu_display_supported_domains(adev, rbo->flags);
5798         else
5799                 domain = AMDGPU_GEM_DOMAIN_VRAM;
5800
5801         r = amdgpu_bo_pin(rbo, domain);
5802         if (unlikely(r != 0)) {
5803                 if (r != -ERESTARTSYS)
5804                         DRM_ERROR("Failed to pin framebuffer with error %d\n", r);
5805                 ttm_eu_backoff_reservation(&ticket, &list);
5806                 return r;
5807         }
5808
5809         r = amdgpu_ttm_alloc_gart(&rbo->tbo);
5810         if (unlikely(r != 0)) {
5811                 amdgpu_bo_unpin(rbo);
5812                 ttm_eu_backoff_reservation(&ticket, &list);
5813                 DRM_ERROR("%p bind failed\n", rbo);
5814                 return r;
5815         }
5816
5817         ttm_eu_backoff_reservation(&ticket, &list);
5818
5819         afb->address = amdgpu_bo_gpu_offset(rbo);
5820
5821         amdgpu_bo_ref(rbo);
5822
5823         /**
5824          * We don't do surface updates on planes that have been newly created,
5825          * but we also don't have the afb->address during atomic check.
5826          *
5827          * Fill in buffer attributes depending on the address here, but only on
5828          * newly created planes since they're not being used by DC yet and this
5829          * won't modify global state.
5830          */
5831         dm_plane_state_old = to_dm_plane_state(plane->state);
5832         dm_plane_state_new = to_dm_plane_state(new_state);
5833
5834         if (dm_plane_state_new->dc_state &&
5835             dm_plane_state_old->dc_state != dm_plane_state_new->dc_state) {
5836                 struct dc_plane_state *plane_state =
5837                         dm_plane_state_new->dc_state;
5838                 bool force_disable_dcc = !plane_state->dcc.enable;
5839
5840                 fill_plane_buffer_attributes(
5841                         adev, afb, plane_state->format, plane_state->rotation,
5842                         dm_plane_state_new->tiling_flags,
5843                         &plane_state->tiling_info, &plane_state->plane_size,
5844                         &plane_state->dcc, &plane_state->address,
5845                         dm_plane_state_new->tmz_surface, force_disable_dcc);
5846         }
5847
5848         return 0;
5849 }
5850
5851 static void dm_plane_helper_cleanup_fb(struct drm_plane *plane,
5852                                        struct drm_plane_state *old_state)
5853 {
5854         struct amdgpu_bo *rbo;
5855         int r;
5856
5857         if (!old_state->fb)
5858                 return;
5859
5860         rbo = gem_to_amdgpu_bo(old_state->fb->obj[0]);
5861         r = amdgpu_bo_reserve(rbo, false);
5862         if (unlikely(r)) {
5863                 DRM_ERROR("failed to reserve rbo before unpin\n");
5864                 return;
5865         }
5866
5867         amdgpu_bo_unpin(rbo);
5868         amdgpu_bo_unreserve(rbo);
5869         amdgpu_bo_unref(&rbo);
5870 }
5871
5872 static int dm_plane_helper_check_state(struct drm_plane_state *state,
5873                                        struct drm_crtc_state *new_crtc_state)
5874 {
5875         int max_downscale = 0;
5876         int max_upscale = INT_MAX;
5877
5878         /* TODO: These should be checked against DC plane caps */
5879         return drm_atomic_helper_check_plane_state(
5880                 state, new_crtc_state, max_downscale, max_upscale, true, true);
5881 }
5882
5883 static int dm_plane_atomic_check(struct drm_plane *plane,
5884                                  struct drm_plane_state *state)
5885 {
5886         struct amdgpu_device *adev = drm_to_adev(plane->dev);
5887         struct dc *dc = adev->dm.dc;
5888         struct dm_plane_state *dm_plane_state;
5889         struct dc_scaling_info scaling_info;
5890         struct drm_crtc_state *new_crtc_state;
5891         int ret;
5892
5893         dm_plane_state = to_dm_plane_state(state);
5894
5895         if (!dm_plane_state->dc_state)
5896                 return 0;
5897
5898         new_crtc_state =
5899                 drm_atomic_get_new_crtc_state(state->state, state->crtc);
5900         if (!new_crtc_state)
5901                 return -EINVAL;
5902
5903         ret = dm_plane_helper_check_state(state, new_crtc_state);
5904         if (ret)
5905                 return ret;
5906
5907         ret = fill_dc_scaling_info(state, &scaling_info);
5908         if (ret)
5909                 return ret;
5910
5911         if (dc_validate_plane(dc, dm_plane_state->dc_state) == DC_OK)
5912                 return 0;
5913
5914         return -EINVAL;
5915 }
5916
5917 static int dm_plane_atomic_async_check(struct drm_plane *plane,
5918                                        struct drm_plane_state *new_plane_state)
5919 {
5920         /* Only support async updates on cursor planes. */
5921         if (plane->type != DRM_PLANE_TYPE_CURSOR)
5922                 return -EINVAL;
5923
5924         return 0;
5925 }
5926
5927 static void dm_plane_atomic_async_update(struct drm_plane *plane,
5928                                          struct drm_plane_state *new_state)
5929 {
5930         struct drm_plane_state *old_state =
5931                 drm_atomic_get_old_plane_state(new_state->state, plane);
5932
5933         swap(plane->state->fb, new_state->fb);
5934
5935         plane->state->src_x = new_state->src_x;
5936         plane->state->src_y = new_state->src_y;
5937         plane->state->src_w = new_state->src_w;
5938         plane->state->src_h = new_state->src_h;
5939         plane->state->crtc_x = new_state->crtc_x;
5940         plane->state->crtc_y = new_state->crtc_y;
5941         plane->state->crtc_w = new_state->crtc_w;
5942         plane->state->crtc_h = new_state->crtc_h;
5943
5944         handle_cursor_update(plane, old_state);
5945 }
5946
5947 static const struct drm_plane_helper_funcs dm_plane_helper_funcs = {
5948         .prepare_fb = dm_plane_helper_prepare_fb,
5949         .cleanup_fb = dm_plane_helper_cleanup_fb,
5950         .atomic_check = dm_plane_atomic_check,
5951         .atomic_async_check = dm_plane_atomic_async_check,
5952         .atomic_async_update = dm_plane_atomic_async_update
5953 };
5954
5955 /*
5956  * TODO: these are currently initialized to rgb formats only.
5957  * For future use cases we should either initialize them dynamically based on
5958  * plane capabilities, or initialize this array to all formats, so internal drm
5959  * check will succeed, and let DC implement proper check
5960  */
5961 static const uint32_t rgb_formats[] = {
5962         DRM_FORMAT_XRGB8888,
5963         DRM_FORMAT_ARGB8888,
5964         DRM_FORMAT_RGBA8888,
5965         DRM_FORMAT_XRGB2101010,
5966         DRM_FORMAT_XBGR2101010,
5967         DRM_FORMAT_ARGB2101010,
5968         DRM_FORMAT_ABGR2101010,
5969         DRM_FORMAT_XBGR8888,
5970         DRM_FORMAT_ABGR8888,
5971         DRM_FORMAT_RGB565,
5972 };
5973
5974 static const uint32_t overlay_formats[] = {
5975         DRM_FORMAT_XRGB8888,
5976         DRM_FORMAT_ARGB8888,
5977         DRM_FORMAT_RGBA8888,
5978         DRM_FORMAT_XBGR8888,
5979         DRM_FORMAT_ABGR8888,
5980         DRM_FORMAT_RGB565
5981 };
5982
5983 static const u32 cursor_formats[] = {
5984         DRM_FORMAT_ARGB8888
5985 };
5986
5987 static int get_plane_formats(const struct drm_plane *plane,
5988                              const struct dc_plane_cap *plane_cap,
5989                              uint32_t *formats, int max_formats)
5990 {
5991         int i, num_formats = 0;
5992
5993         /*
5994          * TODO: Query support for each group of formats directly from
5995          * DC plane caps. This will require adding more formats to the
5996          * caps list.
5997          */
5998
5999         switch (plane->type) {
6000         case DRM_PLANE_TYPE_PRIMARY:
6001                 for (i = 0; i < ARRAY_SIZE(rgb_formats); ++i) {
6002                         if (num_formats >= max_formats)
6003                                 break;
6004
6005                         formats[num_formats++] = rgb_formats[i];
6006                 }
6007
6008                 if (plane_cap && plane_cap->pixel_format_support.nv12)
6009                         formats[num_formats++] = DRM_FORMAT_NV12;
6010                 if (plane_cap && plane_cap->pixel_format_support.p010)
6011                         formats[num_formats++] = DRM_FORMAT_P010;
6012                 if (plane_cap && plane_cap->pixel_format_support.fp16) {
6013                         formats[num_formats++] = DRM_FORMAT_XRGB16161616F;
6014                         formats[num_formats++] = DRM_FORMAT_ARGB16161616F;
6015                         formats[num_formats++] = DRM_FORMAT_XBGR16161616F;
6016                         formats[num_formats++] = DRM_FORMAT_ABGR16161616F;
6017                 }
6018                 break;
6019
6020         case DRM_PLANE_TYPE_OVERLAY:
6021                 for (i = 0; i < ARRAY_SIZE(overlay_formats); ++i) {
6022                         if (num_formats >= max_formats)
6023                                 break;
6024
6025                         formats[num_formats++] = overlay_formats[i];
6026                 }
6027                 break;
6028
6029         case DRM_PLANE_TYPE_CURSOR:
6030                 for (i = 0; i < ARRAY_SIZE(cursor_formats); ++i) {
6031                         if (num_formats >= max_formats)
6032                                 break;
6033
6034                         formats[num_formats++] = cursor_formats[i];
6035                 }
6036                 break;
6037         }
6038
6039         return num_formats;
6040 }
6041
6042 static int amdgpu_dm_plane_init(struct amdgpu_display_manager *dm,
6043                                 struct drm_plane *plane,
6044                                 unsigned long possible_crtcs,
6045                                 const struct dc_plane_cap *plane_cap)
6046 {
6047         uint32_t formats[32];
6048         int num_formats;
6049         int res = -EPERM;
6050         unsigned int supported_rotations;
6051
6052         num_formats = get_plane_formats(plane, plane_cap, formats,
6053                                         ARRAY_SIZE(formats));
6054
6055         res = drm_universal_plane_init(adev_to_drm(dm->adev), plane, possible_crtcs,
6056                                        &dm_plane_funcs, formats, num_formats,
6057                                        NULL, plane->type, NULL);
6058         if (res)
6059                 return res;
6060
6061         if (plane->type == DRM_PLANE_TYPE_OVERLAY &&
6062             plane_cap && plane_cap->per_pixel_alpha) {
6063                 unsigned int blend_caps = BIT(DRM_MODE_BLEND_PIXEL_NONE) |
6064                                           BIT(DRM_MODE_BLEND_PREMULTI);
6065
6066                 drm_plane_create_alpha_property(plane);
6067                 drm_plane_create_blend_mode_property(plane, blend_caps);
6068         }
6069
6070         if (plane->type == DRM_PLANE_TYPE_PRIMARY &&
6071             plane_cap &&
6072             (plane_cap->pixel_format_support.nv12 ||
6073              plane_cap->pixel_format_support.p010)) {
6074                 /* This only affects YUV formats. */
6075                 drm_plane_create_color_properties(
6076                         plane,
6077                         BIT(DRM_COLOR_YCBCR_BT601) |
6078                         BIT(DRM_COLOR_YCBCR_BT709) |
6079                         BIT(DRM_COLOR_YCBCR_BT2020),
6080                         BIT(DRM_COLOR_YCBCR_LIMITED_RANGE) |
6081                         BIT(DRM_COLOR_YCBCR_FULL_RANGE),
6082                         DRM_COLOR_YCBCR_BT709, DRM_COLOR_YCBCR_LIMITED_RANGE);
6083         }
6084
6085         supported_rotations =
6086                 DRM_MODE_ROTATE_0 | DRM_MODE_ROTATE_90 |
6087                 DRM_MODE_ROTATE_180 | DRM_MODE_ROTATE_270;
6088
6089         if (dm->adev->asic_type >= CHIP_BONAIRE)
6090                 drm_plane_create_rotation_property(plane, DRM_MODE_ROTATE_0,
6091                                                    supported_rotations);
6092
6093         drm_plane_helper_add(plane, &dm_plane_helper_funcs);
6094
6095         /* Create (reset) the plane state */
6096         if (plane->funcs->reset)
6097                 plane->funcs->reset(plane);
6098
6099         return 0;
6100 }
6101
6102 static int amdgpu_dm_crtc_init(struct amdgpu_display_manager *dm,
6103                                struct drm_plane *plane,
6104                                uint32_t crtc_index)
6105 {
6106         struct amdgpu_crtc *acrtc = NULL;
6107         struct drm_plane *cursor_plane;
6108
6109         int res = -ENOMEM;
6110
6111         cursor_plane = kzalloc(sizeof(*cursor_plane), GFP_KERNEL);
6112         if (!cursor_plane)
6113                 goto fail;
6114
6115         cursor_plane->type = DRM_PLANE_TYPE_CURSOR;
6116         res = amdgpu_dm_plane_init(dm, cursor_plane, 0, NULL);
6117
6118         acrtc = kzalloc(sizeof(struct amdgpu_crtc), GFP_KERNEL);
6119         if (!acrtc)
6120                 goto fail;
6121
6122         res = drm_crtc_init_with_planes(
6123                         dm->ddev,
6124                         &acrtc->base,
6125                         plane,
6126                         cursor_plane,
6127                         &amdgpu_dm_crtc_funcs, NULL);
6128
6129         if (res)
6130                 goto fail;
6131
6132         drm_crtc_helper_add(&acrtc->base, &amdgpu_dm_crtc_helper_funcs);
6133
6134         /* Create (reset) the plane state */
6135         if (acrtc->base.funcs->reset)
6136                 acrtc->base.funcs->reset(&acrtc->base);
6137
6138         acrtc->max_cursor_width = dm->adev->dm.dc->caps.max_cursor_size;
6139         acrtc->max_cursor_height = dm->adev->dm.dc->caps.max_cursor_size;
6140
6141         acrtc->crtc_id = crtc_index;
6142         acrtc->base.enabled = false;
6143         acrtc->otg_inst = -1;
6144
6145         dm->adev->mode_info.crtcs[crtc_index] = acrtc;
6146         drm_crtc_enable_color_mgmt(&acrtc->base, MAX_COLOR_LUT_ENTRIES,
6147                                    true, MAX_COLOR_LUT_ENTRIES);
6148         drm_mode_crtc_set_gamma_size(&acrtc->base, MAX_COLOR_LEGACY_LUT_ENTRIES);
6149
6150         return 0;
6151
6152 fail:
6153         kfree(acrtc);
6154         kfree(cursor_plane);
6155         return res;
6156 }
6157
6158
6159 static int to_drm_connector_type(enum signal_type st)
6160 {
6161         switch (st) {
6162         case SIGNAL_TYPE_HDMI_TYPE_A:
6163                 return DRM_MODE_CONNECTOR_HDMIA;
6164         case SIGNAL_TYPE_EDP:
6165                 return DRM_MODE_CONNECTOR_eDP;
6166         case SIGNAL_TYPE_LVDS:
6167                 return DRM_MODE_CONNECTOR_LVDS;
6168         case SIGNAL_TYPE_RGB:
6169                 return DRM_MODE_CONNECTOR_VGA;
6170         case SIGNAL_TYPE_DISPLAY_PORT:
6171         case SIGNAL_TYPE_DISPLAY_PORT_MST:
6172                 return DRM_MODE_CONNECTOR_DisplayPort;
6173         case SIGNAL_TYPE_DVI_DUAL_LINK:
6174         case SIGNAL_TYPE_DVI_SINGLE_LINK:
6175                 return DRM_MODE_CONNECTOR_DVID;
6176         case SIGNAL_TYPE_VIRTUAL:
6177                 return DRM_MODE_CONNECTOR_VIRTUAL;
6178
6179         default:
6180                 return DRM_MODE_CONNECTOR_Unknown;
6181         }
6182 }
6183
6184 static struct drm_encoder *amdgpu_dm_connector_to_encoder(struct drm_connector *connector)
6185 {
6186         struct drm_encoder *encoder;
6187
6188         /* There is only one encoder per connector */
6189         drm_connector_for_each_possible_encoder(connector, encoder)
6190                 return encoder;
6191
6192         return NULL;
6193 }
6194
6195 static void amdgpu_dm_get_native_mode(struct drm_connector *connector)
6196 {
6197         struct drm_encoder *encoder;
6198         struct amdgpu_encoder *amdgpu_encoder;
6199
6200         encoder = amdgpu_dm_connector_to_encoder(connector);
6201
6202         if (encoder == NULL)
6203                 return;
6204
6205         amdgpu_encoder = to_amdgpu_encoder(encoder);
6206
6207         amdgpu_encoder->native_mode.clock = 0;
6208
6209         if (!list_empty(&connector->probed_modes)) {
6210                 struct drm_display_mode *preferred_mode = NULL;
6211
6212                 list_for_each_entry(preferred_mode,
6213                                     &connector->probed_modes,
6214                                     head) {
6215                         if (preferred_mode->type & DRM_MODE_TYPE_PREFERRED)
6216                                 amdgpu_encoder->native_mode = *preferred_mode;
6217
6218                         break;
6219                 }
6220
6221         }
6222 }
6223
6224 static struct drm_display_mode *
6225 amdgpu_dm_create_common_mode(struct drm_encoder *encoder,
6226                              char *name,
6227                              int hdisplay, int vdisplay)
6228 {
6229         struct drm_device *dev = encoder->dev;
6230         struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
6231         struct drm_display_mode *mode = NULL;
6232         struct drm_display_mode *native_mode = &amdgpu_encoder->native_mode;
6233
6234         mode = drm_mode_duplicate(dev, native_mode);
6235
6236         if (mode == NULL)
6237                 return NULL;
6238
6239         mode->hdisplay = hdisplay;
6240         mode->vdisplay = vdisplay;
6241         mode->type &= ~DRM_MODE_TYPE_PREFERRED;
6242         strscpy(mode->name, name, DRM_DISPLAY_MODE_LEN);
6243
6244         return mode;
6245
6246 }
6247
6248 static void amdgpu_dm_connector_add_common_modes(struct drm_encoder *encoder,
6249                                                  struct drm_connector *connector)
6250 {
6251         struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
6252         struct drm_display_mode *mode = NULL;
6253         struct drm_display_mode *native_mode = &amdgpu_encoder->native_mode;
6254         struct amdgpu_dm_connector *amdgpu_dm_connector =
6255                                 to_amdgpu_dm_connector(connector);
6256         int i;
6257         int n;
6258         struct mode_size {
6259                 char name[DRM_DISPLAY_MODE_LEN];
6260                 int w;
6261                 int h;
6262         } common_modes[] = {
6263                 {  "640x480",  640,  480},
6264                 {  "800x600",  800,  600},
6265                 { "1024x768", 1024,  768},
6266                 { "1280x720", 1280,  720},
6267                 { "1280x800", 1280,  800},
6268                 {"1280x1024", 1280, 1024},
6269                 { "1440x900", 1440,  900},
6270                 {"1680x1050", 1680, 1050},
6271                 {"1600x1200", 1600, 1200},
6272                 {"1920x1080", 1920, 1080},
6273                 {"1920x1200", 1920, 1200}
6274         };
6275
6276         n = ARRAY_SIZE(common_modes);
6277
6278         for (i = 0; i < n; i++) {
6279                 struct drm_display_mode *curmode = NULL;
6280                 bool mode_existed = false;
6281
6282                 if (common_modes[i].w > native_mode->hdisplay ||
6283                     common_modes[i].h > native_mode->vdisplay ||
6284                    (common_modes[i].w == native_mode->hdisplay &&
6285                     common_modes[i].h == native_mode->vdisplay))
6286                         continue;
6287
6288                 list_for_each_entry(curmode, &connector->probed_modes, head) {
6289                         if (common_modes[i].w == curmode->hdisplay &&
6290                             common_modes[i].h == curmode->vdisplay) {
6291                                 mode_existed = true;
6292                                 break;
6293                         }
6294                 }
6295
6296                 if (mode_existed)
6297                         continue;
6298
6299                 mode = amdgpu_dm_create_common_mode(encoder,
6300                                 common_modes[i].name, common_modes[i].w,
6301                                 common_modes[i].h);
6302                 drm_mode_probed_add(connector, mode);
6303                 amdgpu_dm_connector->num_modes++;
6304         }
6305 }
6306
6307 static void amdgpu_dm_connector_ddc_get_modes(struct drm_connector *connector,
6308                                               struct edid *edid)
6309 {
6310         struct amdgpu_dm_connector *amdgpu_dm_connector =
6311                         to_amdgpu_dm_connector(connector);
6312
6313         if (edid) {
6314                 /* empty probed_modes */
6315                 INIT_LIST_HEAD(&connector->probed_modes);
6316                 amdgpu_dm_connector->num_modes =
6317                                 drm_add_edid_modes(connector, edid);
6318
6319                 /* sorting the probed modes before calling function
6320                  * amdgpu_dm_get_native_mode() since EDID can have
6321                  * more than one preferred mode. The modes that are
6322                  * later in the probed mode list could be of higher
6323                  * and preferred resolution. For example, 3840x2160
6324                  * resolution in base EDID preferred timing and 4096x2160
6325                  * preferred resolution in DID extension block later.
6326                  */
6327                 drm_mode_sort(&connector->probed_modes);
6328                 amdgpu_dm_get_native_mode(connector);
6329         } else {
6330                 amdgpu_dm_connector->num_modes = 0;
6331         }
6332 }
6333
6334 static int amdgpu_dm_connector_get_modes(struct drm_connector *connector)
6335 {
6336         struct amdgpu_dm_connector *amdgpu_dm_connector =
6337                         to_amdgpu_dm_connector(connector);
6338         struct drm_encoder *encoder;
6339         struct edid *edid = amdgpu_dm_connector->edid;
6340
6341         encoder = amdgpu_dm_connector_to_encoder(connector);
6342
6343         if (!edid || !drm_edid_is_valid(edid)) {
6344                 amdgpu_dm_connector->num_modes =
6345                                 drm_add_modes_noedid(connector, 640, 480);
6346         } else {
6347                 amdgpu_dm_connector_ddc_get_modes(connector, edid);
6348                 amdgpu_dm_connector_add_common_modes(encoder, connector);
6349         }
6350         amdgpu_dm_fbc_init(connector);
6351
6352         return amdgpu_dm_connector->num_modes;
6353 }
6354
6355 void amdgpu_dm_connector_init_helper(struct amdgpu_display_manager *dm,
6356                                      struct amdgpu_dm_connector *aconnector,
6357                                      int connector_type,
6358                                      struct dc_link *link,
6359                                      int link_index)
6360 {
6361         struct amdgpu_device *adev = drm_to_adev(dm->ddev);
6362
6363         /*
6364          * Some of the properties below require access to state, like bpc.
6365          * Allocate some default initial connector state with our reset helper.
6366          */
6367         if (aconnector->base.funcs->reset)
6368                 aconnector->base.funcs->reset(&aconnector->base);
6369
6370         aconnector->connector_id = link_index;
6371         aconnector->dc_link = link;
6372         aconnector->base.interlace_allowed = false;
6373         aconnector->base.doublescan_allowed = false;
6374         aconnector->base.stereo_allowed = false;
6375         aconnector->base.dpms = DRM_MODE_DPMS_OFF;
6376         aconnector->hpd.hpd = AMDGPU_HPD_NONE; /* not used */
6377         aconnector->audio_inst = -1;
6378         mutex_init(&aconnector->hpd_lock);
6379
6380         /*
6381          * configure support HPD hot plug connector_>polled default value is 0
6382          * which means HPD hot plug not supported
6383          */
6384         switch (connector_type) {
6385         case DRM_MODE_CONNECTOR_HDMIA:
6386                 aconnector->base.polled = DRM_CONNECTOR_POLL_HPD;
6387                 aconnector->base.ycbcr_420_allowed =
6388                         link->link_enc->features.hdmi_ycbcr420_supported ? true : false;
6389                 break;
6390         case DRM_MODE_CONNECTOR_DisplayPort:
6391                 aconnector->base.polled = DRM_CONNECTOR_POLL_HPD;
6392                 aconnector->base.ycbcr_420_allowed =
6393                         link->link_enc->features.dp_ycbcr420_supported ? true : false;
6394                 break;
6395         case DRM_MODE_CONNECTOR_DVID:
6396                 aconnector->base.polled = DRM_CONNECTOR_POLL_HPD;
6397                 break;
6398         default:
6399                 break;
6400         }
6401
6402         drm_object_attach_property(&aconnector->base.base,
6403                                 dm->ddev->mode_config.scaling_mode_property,
6404                                 DRM_MODE_SCALE_NONE);
6405
6406         drm_object_attach_property(&aconnector->base.base,
6407                                 adev->mode_info.underscan_property,
6408                                 UNDERSCAN_OFF);
6409         drm_object_attach_property(&aconnector->base.base,
6410                                 adev->mode_info.underscan_hborder_property,
6411                                 0);
6412         drm_object_attach_property(&aconnector->base.base,
6413                                 adev->mode_info.underscan_vborder_property,
6414                                 0);
6415
6416         if (!aconnector->mst_port)
6417                 drm_connector_attach_max_bpc_property(&aconnector->base, 8, 16);
6418
6419         /* This defaults to the max in the range, but we want 8bpc for non-edp. */
6420         aconnector->base.state->max_bpc = (connector_type == DRM_MODE_CONNECTOR_eDP) ? 16 : 8;
6421         aconnector->base.state->max_requested_bpc = aconnector->base.state->max_bpc;
6422
6423         if (connector_type == DRM_MODE_CONNECTOR_eDP &&
6424             (dc_is_dmcu_initialized(adev->dm.dc) || adev->dm.dc->ctx->dmub_srv)) {
6425                 drm_object_attach_property(&aconnector->base.base,
6426                                 adev->mode_info.abm_level_property, 0);
6427         }
6428
6429         if (connector_type == DRM_MODE_CONNECTOR_HDMIA ||
6430             connector_type == DRM_MODE_CONNECTOR_DisplayPort ||
6431             connector_type == DRM_MODE_CONNECTOR_eDP) {
6432                 drm_object_attach_property(
6433                         &aconnector->base.base,
6434                         dm->ddev->mode_config.hdr_output_metadata_property, 0);
6435
6436                 if (!aconnector->mst_port)
6437                         drm_connector_attach_vrr_capable_property(&aconnector->base);
6438
6439 #ifdef CONFIG_DRM_AMD_DC_HDCP
6440                 if (adev->dm.hdcp_workqueue)
6441                         drm_connector_attach_content_protection_property(&aconnector->base, true);
6442 #endif
6443         }
6444 }
6445
6446 static int amdgpu_dm_i2c_xfer(struct i2c_adapter *i2c_adap,
6447                               struct i2c_msg *msgs, int num)
6448 {
6449         struct amdgpu_i2c_adapter *i2c = i2c_get_adapdata(i2c_adap);
6450         struct ddc_service *ddc_service = i2c->ddc_service;
6451         struct i2c_command cmd;
6452         int i;
6453         int result = -EIO;
6454
6455         cmd.payloads = kcalloc(num, sizeof(struct i2c_payload), GFP_KERNEL);
6456
6457         if (!cmd.payloads)
6458                 return result;
6459
6460         cmd.number_of_payloads = num;
6461         cmd.engine = I2C_COMMAND_ENGINE_DEFAULT;
6462         cmd.speed = 100;
6463
6464         for (i = 0; i < num; i++) {
6465                 cmd.payloads[i].write = !(msgs[i].flags & I2C_M_RD);
6466                 cmd.payloads[i].address = msgs[i].addr;
6467                 cmd.payloads[i].length = msgs[i].len;
6468                 cmd.payloads[i].data = msgs[i].buf;
6469         }
6470
6471         if (dc_submit_i2c(
6472                         ddc_service->ctx->dc,
6473                         ddc_service->ddc_pin->hw_info.ddc_channel,
6474                         &cmd))
6475                 result = num;
6476
6477         kfree(cmd.payloads);
6478         return result;
6479 }
6480
6481 static u32 amdgpu_dm_i2c_func(struct i2c_adapter *adap)
6482 {
6483         return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL;
6484 }
6485
6486 static const struct i2c_algorithm amdgpu_dm_i2c_algo = {
6487         .master_xfer = amdgpu_dm_i2c_xfer,
6488         .functionality = amdgpu_dm_i2c_func,
6489 };
6490
6491 static struct amdgpu_i2c_adapter *
6492 create_i2c(struct ddc_service *ddc_service,
6493            int link_index,
6494            int *res)
6495 {
6496         struct amdgpu_device *adev = ddc_service->ctx->driver_context;
6497         struct amdgpu_i2c_adapter *i2c;
6498
6499         i2c = kzalloc(sizeof(struct amdgpu_i2c_adapter), GFP_KERNEL);
6500         if (!i2c)
6501                 return NULL;
6502         i2c->base.owner = THIS_MODULE;
6503         i2c->base.class = I2C_CLASS_DDC;
6504         i2c->base.dev.parent = &adev->pdev->dev;
6505         i2c->base.algo = &amdgpu_dm_i2c_algo;
6506         snprintf(i2c->base.name, sizeof(i2c->base.name), "AMDGPU DM i2c hw bus %d", link_index);
6507         i2c_set_adapdata(&i2c->base, i2c);
6508         i2c->ddc_service = ddc_service;
6509         i2c->ddc_service->ddc_pin->hw_info.ddc_channel = link_index;
6510
6511         return i2c;
6512 }
6513
6514
6515 /*
6516  * Note: this function assumes that dc_link_detect() was called for the
6517  * dc_link which will be represented by this aconnector.
6518  */
6519 static int amdgpu_dm_connector_init(struct amdgpu_display_manager *dm,
6520                                     struct amdgpu_dm_connector *aconnector,
6521                                     uint32_t link_index,
6522                                     struct amdgpu_encoder *aencoder)
6523 {
6524         int res = 0;
6525         int connector_type;
6526         struct dc *dc = dm->dc;
6527         struct dc_link *link = dc_get_link_at_index(dc, link_index);
6528         struct amdgpu_i2c_adapter *i2c;
6529
6530         link->priv = aconnector;
6531
6532         DRM_DEBUG_DRIVER("%s()\n", __func__);
6533
6534         i2c = create_i2c(link->ddc, link->link_index, &res);
6535         if (!i2c) {
6536                 DRM_ERROR("Failed to create i2c adapter data\n");
6537                 return -ENOMEM;
6538         }
6539
6540         aconnector->i2c = i2c;
6541         res = i2c_add_adapter(&i2c->base);
6542
6543         if (res) {
6544                 DRM_ERROR("Failed to register hw i2c %d\n", link->link_index);
6545                 goto out_free;
6546         }
6547
6548         connector_type = to_drm_connector_type(link->connector_signal);
6549
6550         res = drm_connector_init_with_ddc(
6551                         dm->ddev,
6552                         &aconnector->base,
6553                         &amdgpu_dm_connector_funcs,
6554                         connector_type,
6555                         &i2c->base);
6556
6557         if (res) {
6558                 DRM_ERROR("connector_init failed\n");
6559                 aconnector->connector_id = -1;
6560                 goto out_free;
6561         }
6562
6563         drm_connector_helper_add(
6564                         &aconnector->base,
6565                         &amdgpu_dm_connector_helper_funcs);
6566
6567         amdgpu_dm_connector_init_helper(
6568                 dm,
6569                 aconnector,
6570                 connector_type,
6571                 link,
6572                 link_index);
6573
6574         drm_connector_attach_encoder(
6575                 &aconnector->base, &aencoder->base);
6576
6577         if (connector_type == DRM_MODE_CONNECTOR_DisplayPort
6578                 || connector_type == DRM_MODE_CONNECTOR_eDP)
6579                 amdgpu_dm_initialize_dp_connector(dm, aconnector, link->link_index);
6580
6581 out_free:
6582         if (res) {
6583                 kfree(i2c);
6584                 aconnector->i2c = NULL;
6585         }
6586         return res;
6587 }
6588
6589 int amdgpu_dm_get_encoder_crtc_mask(struct amdgpu_device *adev)
6590 {
6591         switch (adev->mode_info.num_crtc) {
6592         case 1:
6593                 return 0x1;
6594         case 2:
6595                 return 0x3;
6596         case 3:
6597                 return 0x7;
6598         case 4:
6599                 return 0xf;
6600         case 5:
6601                 return 0x1f;
6602         case 6:
6603         default:
6604                 return 0x3f;
6605         }
6606 }
6607
6608 static int amdgpu_dm_encoder_init(struct drm_device *dev,
6609                                   struct amdgpu_encoder *aencoder,
6610                                   uint32_t link_index)
6611 {
6612         struct amdgpu_device *adev = drm_to_adev(dev);
6613
6614         int res = drm_encoder_init(dev,
6615                                    &aencoder->base,
6616                                    &amdgpu_dm_encoder_funcs,
6617                                    DRM_MODE_ENCODER_TMDS,
6618                                    NULL);
6619
6620         aencoder->base.possible_crtcs = amdgpu_dm_get_encoder_crtc_mask(adev);
6621
6622         if (!res)
6623                 aencoder->encoder_id = link_index;
6624         else
6625                 aencoder->encoder_id = -1;
6626
6627         drm_encoder_helper_add(&aencoder->base, &amdgpu_dm_encoder_helper_funcs);
6628
6629         return res;
6630 }
6631
6632 static void manage_dm_interrupts(struct amdgpu_device *adev,
6633                                  struct amdgpu_crtc *acrtc,
6634                                  bool enable)
6635 {
6636         /*
6637          * We have no guarantee that the frontend index maps to the same
6638          * backend index - some even map to more than one.
6639          *
6640          * TODO: Use a different interrupt or check DC itself for the mapping.
6641          */
6642         int irq_type =
6643                 amdgpu_display_crtc_idx_to_irq_type(
6644                         adev,
6645                         acrtc->crtc_id);
6646
6647         if (enable) {
6648                 drm_crtc_vblank_on(&acrtc->base);
6649                 amdgpu_irq_get(
6650                         adev,
6651                         &adev->pageflip_irq,
6652                         irq_type);
6653         } else {
6654
6655                 amdgpu_irq_put(
6656                         adev,
6657                         &adev->pageflip_irq,
6658                         irq_type);
6659                 drm_crtc_vblank_off(&acrtc->base);
6660         }
6661 }
6662
6663 static void dm_update_pflip_irq_state(struct amdgpu_device *adev,
6664                                       struct amdgpu_crtc *acrtc)
6665 {
6666         int irq_type =
6667                 amdgpu_display_crtc_idx_to_irq_type(adev, acrtc->crtc_id);
6668
6669         /**
6670          * This reads the current state for the IRQ and force reapplies
6671          * the setting to hardware.
6672          */
6673         amdgpu_irq_update(adev, &adev->pageflip_irq, irq_type);
6674 }
6675
6676 static bool
6677 is_scaling_state_different(const struct dm_connector_state *dm_state,
6678                            const struct dm_connector_state *old_dm_state)
6679 {
6680         if (dm_state->scaling != old_dm_state->scaling)
6681                 return true;
6682         if (!dm_state->underscan_enable && old_dm_state->underscan_enable) {
6683                 if (old_dm_state->underscan_hborder != 0 && old_dm_state->underscan_vborder != 0)
6684                         return true;
6685         } else  if (dm_state->underscan_enable && !old_dm_state->underscan_enable) {
6686                 if (dm_state->underscan_hborder != 0 && dm_state->underscan_vborder != 0)
6687                         return true;
6688         } else if (dm_state->underscan_hborder != old_dm_state->underscan_hborder ||
6689                    dm_state->underscan_vborder != old_dm_state->underscan_vborder)
6690                 return true;
6691         return false;
6692 }
6693
6694 #ifdef CONFIG_DRM_AMD_DC_HDCP
6695 static bool is_content_protection_different(struct drm_connector_state *state,
6696                                             const struct drm_connector_state *old_state,
6697                                             const struct drm_connector *connector, struct hdcp_workqueue *hdcp_w)
6698 {
6699         struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
6700
6701         if (old_state->hdcp_content_type != state->hdcp_content_type &&
6702             state->content_protection != DRM_MODE_CONTENT_PROTECTION_UNDESIRED) {
6703                 state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED;
6704                 return true;
6705         }
6706
6707         /* CP is being re enabled, ignore this */
6708         if (old_state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED &&
6709             state->content_protection == DRM_MODE_CONTENT_PROTECTION_DESIRED) {
6710                 state->content_protection = DRM_MODE_CONTENT_PROTECTION_ENABLED;
6711                 return false;
6712         }
6713
6714         /* S3 resume case, since old state will always be 0 (UNDESIRED) and the restored state will be ENABLED */
6715         if (old_state->content_protection == DRM_MODE_CONTENT_PROTECTION_UNDESIRED &&
6716             state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED)
6717                 state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED;
6718
6719         /* Check if something is connected/enabled, otherwise we start hdcp but nothing is connected/enabled
6720          * hot-plug, headless s3, dpms
6721          */
6722         if (state->content_protection == DRM_MODE_CONTENT_PROTECTION_DESIRED && connector->dpms == DRM_MODE_DPMS_ON &&
6723             aconnector->dc_sink != NULL)
6724                 return true;
6725
6726         if (old_state->content_protection == state->content_protection)
6727                 return false;
6728
6729         if (state->content_protection == DRM_MODE_CONTENT_PROTECTION_UNDESIRED)
6730                 return true;
6731
6732         return false;
6733 }
6734
6735 #endif
6736 static void remove_stream(struct amdgpu_device *adev,
6737                           struct amdgpu_crtc *acrtc,
6738                           struct dc_stream_state *stream)
6739 {
6740         /* this is the update mode case */
6741
6742         acrtc->otg_inst = -1;
6743         acrtc->enabled = false;
6744 }
6745
6746 static int get_cursor_position(struct drm_plane *plane, struct drm_crtc *crtc,
6747                                struct dc_cursor_position *position)
6748 {
6749         struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
6750         int x, y;
6751         int xorigin = 0, yorigin = 0;
6752
6753         position->enable = false;
6754         position->x = 0;
6755         position->y = 0;
6756
6757         if (!crtc || !plane->state->fb)
6758                 return 0;
6759
6760         if ((plane->state->crtc_w > amdgpu_crtc->max_cursor_width) ||
6761             (plane->state->crtc_h > amdgpu_crtc->max_cursor_height)) {
6762                 DRM_ERROR("%s: bad cursor width or height %d x %d\n",
6763                           __func__,
6764                           plane->state->crtc_w,
6765                           plane->state->crtc_h);
6766                 return -EINVAL;
6767         }
6768
6769         x = plane->state->crtc_x;
6770         y = plane->state->crtc_y;
6771
6772         if (x <= -amdgpu_crtc->max_cursor_width ||
6773             y <= -amdgpu_crtc->max_cursor_height)
6774                 return 0;
6775
6776         if (x < 0) {
6777                 xorigin = min(-x, amdgpu_crtc->max_cursor_width - 1);
6778                 x = 0;
6779         }
6780         if (y < 0) {
6781                 yorigin = min(-y, amdgpu_crtc->max_cursor_height - 1);
6782                 y = 0;
6783         }
6784         position->enable = true;
6785         position->translate_by_source = true;
6786         position->x = x;
6787         position->y = y;
6788         position->x_hotspot = xorigin;
6789         position->y_hotspot = yorigin;
6790
6791         return 0;
6792 }
6793
6794 static void handle_cursor_update(struct drm_plane *plane,
6795                                  struct drm_plane_state *old_plane_state)
6796 {
6797         struct amdgpu_device *adev = drm_to_adev(plane->dev);
6798         struct amdgpu_framebuffer *afb = to_amdgpu_framebuffer(plane->state->fb);
6799         struct drm_crtc *crtc = afb ? plane->state->crtc : old_plane_state->crtc;
6800         struct dm_crtc_state *crtc_state = crtc ? to_dm_crtc_state(crtc->state) : NULL;
6801         struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
6802         uint64_t address = afb ? afb->address : 0;
6803         struct dc_cursor_position position;
6804         struct dc_cursor_attributes attributes;
6805         int ret;
6806
6807         if (!plane->state->fb && !old_plane_state->fb)
6808                 return;
6809
6810         DRM_DEBUG_DRIVER("%s: crtc_id=%d with size %d to %d\n",
6811                          __func__,
6812                          amdgpu_crtc->crtc_id,
6813                          plane->state->crtc_w,
6814                          plane->state->crtc_h);
6815
6816         ret = get_cursor_position(plane, crtc, &position);
6817         if (ret)
6818                 return;
6819
6820         if (!position.enable) {
6821                 /* turn off cursor */
6822                 if (crtc_state && crtc_state->stream) {
6823                         mutex_lock(&adev->dm.dc_lock);
6824                         dc_stream_set_cursor_position(crtc_state->stream,
6825                                                       &position);
6826                         mutex_unlock(&adev->dm.dc_lock);
6827                 }
6828                 return;
6829         }
6830
6831         amdgpu_crtc->cursor_width = plane->state->crtc_w;
6832         amdgpu_crtc->cursor_height = plane->state->crtc_h;
6833
6834         memset(&attributes, 0, sizeof(attributes));
6835         attributes.address.high_part = upper_32_bits(address);
6836         attributes.address.low_part  = lower_32_bits(address);
6837         attributes.width             = plane->state->crtc_w;
6838         attributes.height            = plane->state->crtc_h;
6839         attributes.color_format      = CURSOR_MODE_COLOR_PRE_MULTIPLIED_ALPHA;
6840         attributes.rotation_angle    = 0;
6841         attributes.attribute_flags.value = 0;
6842
6843         attributes.pitch = attributes.width;
6844
6845         if (crtc_state->stream) {
6846                 mutex_lock(&adev->dm.dc_lock);
6847                 if (!dc_stream_set_cursor_attributes(crtc_state->stream,
6848                                                          &attributes))
6849                         DRM_ERROR("DC failed to set cursor attributes\n");
6850
6851                 if (!dc_stream_set_cursor_position(crtc_state->stream,
6852                                                    &position))
6853                         DRM_ERROR("DC failed to set cursor position\n");
6854                 mutex_unlock(&adev->dm.dc_lock);
6855         }
6856 }
6857
6858 static void prepare_flip_isr(struct amdgpu_crtc *acrtc)
6859 {
6860
6861         assert_spin_locked(&acrtc->base.dev->event_lock);
6862         WARN_ON(acrtc->event);
6863
6864         acrtc->event = acrtc->base.state->event;
6865
6866         /* Set the flip status */
6867         acrtc->pflip_status = AMDGPU_FLIP_SUBMITTED;
6868
6869         /* Mark this event as consumed */
6870         acrtc->base.state->event = NULL;
6871
6872         DRM_DEBUG_DRIVER("crtc:%d, pflip_stat:AMDGPU_FLIP_SUBMITTED\n",
6873                                                  acrtc->crtc_id);
6874 }
6875
6876 static void update_freesync_state_on_stream(
6877         struct amdgpu_display_manager *dm,
6878         struct dm_crtc_state *new_crtc_state,
6879         struct dc_stream_state *new_stream,
6880         struct dc_plane_state *surface,
6881         u32 flip_timestamp_in_us)
6882 {
6883         struct mod_vrr_params vrr_params;
6884         struct dc_info_packet vrr_infopacket = {0};
6885         struct amdgpu_device *adev = dm->adev;
6886         struct amdgpu_crtc *acrtc = to_amdgpu_crtc(new_crtc_state->base.crtc);
6887         unsigned long flags;
6888
6889         if (!new_stream)
6890                 return;
6891
6892         /*
6893          * TODO: Determine why min/max totals and vrefresh can be 0 here.
6894          * For now it's sufficient to just guard against these conditions.
6895          */
6896
6897         if (!new_stream->timing.h_total || !new_stream->timing.v_total)
6898                 return;
6899
6900         spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
6901         vrr_params = acrtc->dm_irq_params.vrr_params;
6902
6903         if (surface) {
6904                 mod_freesync_handle_preflip(
6905                         dm->freesync_module,
6906                         surface,
6907                         new_stream,
6908                         flip_timestamp_in_us,
6909                         &vrr_params);
6910
6911                 if (adev->family < AMDGPU_FAMILY_AI &&
6912                     amdgpu_dm_vrr_active(new_crtc_state)) {
6913                         mod_freesync_handle_v_update(dm->freesync_module,
6914                                                      new_stream, &vrr_params);
6915
6916                         /* Need to call this before the frame ends. */
6917                         dc_stream_adjust_vmin_vmax(dm->dc,
6918                                                    new_crtc_state->stream,
6919                                                    &vrr_params.adjust);
6920                 }
6921         }
6922
6923         mod_freesync_build_vrr_infopacket(
6924                 dm->freesync_module,
6925                 new_stream,
6926                 &vrr_params,
6927                 PACKET_TYPE_VRR,
6928                 TRANSFER_FUNC_UNKNOWN,
6929                 &vrr_infopacket);
6930
6931         new_crtc_state->freesync_timing_changed |=
6932                 (memcmp(&acrtc->dm_irq_params.vrr_params.adjust,
6933                         &vrr_params.adjust,
6934                         sizeof(vrr_params.adjust)) != 0);
6935
6936         new_crtc_state->freesync_vrr_info_changed |=
6937                 (memcmp(&new_crtc_state->vrr_infopacket,
6938                         &vrr_infopacket,
6939                         sizeof(vrr_infopacket)) != 0);
6940
6941         acrtc->dm_irq_params.vrr_params = vrr_params;
6942         new_crtc_state->vrr_infopacket = vrr_infopacket;
6943
6944         new_stream->adjust = acrtc->dm_irq_params.vrr_params.adjust;
6945         new_stream->vrr_infopacket = vrr_infopacket;
6946
6947         if (new_crtc_state->freesync_vrr_info_changed)
6948                 DRM_DEBUG_KMS("VRR packet update: crtc=%u enabled=%d state=%d",
6949                               new_crtc_state->base.crtc->base.id,
6950                               (int)new_crtc_state->base.vrr_enabled,
6951                               (int)vrr_params.state);
6952
6953         spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
6954 }
6955
6956 static void update_stream_irq_parameters(
6957         struct amdgpu_display_manager *dm,
6958         struct dm_crtc_state *new_crtc_state)
6959 {
6960         struct dc_stream_state *new_stream = new_crtc_state->stream;
6961         struct mod_vrr_params vrr_params;
6962         struct mod_freesync_config config = new_crtc_state->freesync_config;
6963         struct amdgpu_device *adev = dm->adev;
6964         struct amdgpu_crtc *acrtc = to_amdgpu_crtc(new_crtc_state->base.crtc);
6965         unsigned long flags;
6966
6967         if (!new_stream)
6968                 return;
6969
6970         /*
6971          * TODO: Determine why min/max totals and vrefresh can be 0 here.
6972          * For now it's sufficient to just guard against these conditions.
6973          */
6974         if (!new_stream->timing.h_total || !new_stream->timing.v_total)
6975                 return;
6976
6977         spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
6978         vrr_params = acrtc->dm_irq_params.vrr_params;
6979
6980         if (new_crtc_state->vrr_supported &&
6981             config.min_refresh_in_uhz &&
6982             config.max_refresh_in_uhz) {
6983                 config.state = new_crtc_state->base.vrr_enabled ?
6984                         VRR_STATE_ACTIVE_VARIABLE :
6985                         VRR_STATE_INACTIVE;
6986         } else {
6987                 config.state = VRR_STATE_UNSUPPORTED;
6988         }
6989
6990         mod_freesync_build_vrr_params(dm->freesync_module,
6991                                       new_stream,
6992                                       &config, &vrr_params);
6993
6994         new_crtc_state->freesync_timing_changed |=
6995                 (memcmp(&acrtc->dm_irq_params.vrr_params.adjust,
6996                         &vrr_params.adjust, sizeof(vrr_params.adjust)) != 0);
6997
6998         new_crtc_state->freesync_config = config;
6999         /* Copy state for access from DM IRQ handler */
7000         acrtc->dm_irq_params.freesync_config = config;
7001         acrtc->dm_irq_params.active_planes = new_crtc_state->active_planes;
7002         acrtc->dm_irq_params.vrr_params = vrr_params;
7003         spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
7004 }
7005
7006 static void amdgpu_dm_handle_vrr_transition(struct dm_crtc_state *old_state,
7007                                             struct dm_crtc_state *new_state)
7008 {
7009         bool old_vrr_active = amdgpu_dm_vrr_active(old_state);
7010         bool new_vrr_active = amdgpu_dm_vrr_active(new_state);
7011
7012         if (!old_vrr_active && new_vrr_active) {
7013                 /* Transition VRR inactive -> active:
7014                  * While VRR is active, we must not disable vblank irq, as a
7015                  * reenable after disable would compute bogus vblank/pflip
7016                  * timestamps if it likely happened inside display front-porch.
7017                  *
7018                  * We also need vupdate irq for the actual core vblank handling
7019                  * at end of vblank.
7020                  */
7021                 dm_set_vupdate_irq(new_state->base.crtc, true);
7022                 drm_crtc_vblank_get(new_state->base.crtc);
7023                 DRM_DEBUG_DRIVER("%s: crtc=%u VRR off->on: Get vblank ref\n",
7024                                  __func__, new_state->base.crtc->base.id);
7025         } else if (old_vrr_active && !new_vrr_active) {
7026                 /* Transition VRR active -> inactive:
7027                  * Allow vblank irq disable again for fixed refresh rate.
7028                  */
7029                 dm_set_vupdate_irq(new_state->base.crtc, false);
7030                 drm_crtc_vblank_put(new_state->base.crtc);
7031                 DRM_DEBUG_DRIVER("%s: crtc=%u VRR on->off: Drop vblank ref\n",
7032                                  __func__, new_state->base.crtc->base.id);
7033         }
7034 }
7035
7036 static void amdgpu_dm_commit_cursors(struct drm_atomic_state *state)
7037 {
7038         struct drm_plane *plane;
7039         struct drm_plane_state *old_plane_state, *new_plane_state;
7040         int i;
7041
7042         /*
7043          * TODO: Make this per-stream so we don't issue redundant updates for
7044          * commits with multiple streams.
7045          */
7046         for_each_oldnew_plane_in_state(state, plane, old_plane_state,
7047                                        new_plane_state, i)
7048                 if (plane->type == DRM_PLANE_TYPE_CURSOR)
7049                         handle_cursor_update(plane, old_plane_state);
7050 }
7051
7052 static void amdgpu_dm_commit_planes(struct drm_atomic_state *state,
7053                                     struct dc_state *dc_state,
7054                                     struct drm_device *dev,
7055                                     struct amdgpu_display_manager *dm,
7056                                     struct drm_crtc *pcrtc,
7057                                     bool wait_for_vblank)
7058 {
7059         uint32_t i;
7060         uint64_t timestamp_ns;
7061         struct drm_plane *plane;
7062         struct drm_plane_state *old_plane_state, *new_plane_state;
7063         struct amdgpu_crtc *acrtc_attach = to_amdgpu_crtc(pcrtc);
7064         struct drm_crtc_state *new_pcrtc_state =
7065                         drm_atomic_get_new_crtc_state(state, pcrtc);
7066         struct dm_crtc_state *acrtc_state = to_dm_crtc_state(new_pcrtc_state);
7067         struct dm_crtc_state *dm_old_crtc_state =
7068                         to_dm_crtc_state(drm_atomic_get_old_crtc_state(state, pcrtc));
7069         int planes_count = 0, vpos, hpos;
7070         long r;
7071         unsigned long flags;
7072         struct amdgpu_bo *abo;
7073         uint32_t target_vblank, last_flip_vblank;
7074         bool vrr_active = amdgpu_dm_vrr_active(acrtc_state);
7075         bool pflip_present = false;
7076         struct {
7077                 struct dc_surface_update surface_updates[MAX_SURFACES];
7078                 struct dc_plane_info plane_infos[MAX_SURFACES];
7079                 struct dc_scaling_info scaling_infos[MAX_SURFACES];
7080                 struct dc_flip_addrs flip_addrs[MAX_SURFACES];
7081                 struct dc_stream_update stream_update;
7082         } *bundle;
7083
7084         bundle = kzalloc(sizeof(*bundle), GFP_KERNEL);
7085
7086         if (!bundle) {
7087                 dm_error("Failed to allocate update bundle\n");
7088                 goto cleanup;
7089         }
7090
7091         /*
7092          * Disable the cursor first if we're disabling all the planes.
7093          * It'll remain on the screen after the planes are re-enabled
7094          * if we don't.
7095          */
7096         if (acrtc_state->active_planes == 0)
7097                 amdgpu_dm_commit_cursors(state);
7098
7099         /* update planes when needed */
7100         for_each_oldnew_plane_in_state(state, plane, old_plane_state, new_plane_state, i) {
7101                 struct drm_crtc *crtc = new_plane_state->crtc;
7102                 struct drm_crtc_state *new_crtc_state;
7103                 struct drm_framebuffer *fb = new_plane_state->fb;
7104                 bool plane_needs_flip;
7105                 struct dc_plane_state *dc_plane;
7106                 struct dm_plane_state *dm_new_plane_state = to_dm_plane_state(new_plane_state);
7107
7108                 /* Cursor plane is handled after stream updates */
7109                 if (plane->type == DRM_PLANE_TYPE_CURSOR)
7110                         continue;
7111
7112                 if (!fb || !crtc || pcrtc != crtc)
7113                         continue;
7114
7115                 new_crtc_state = drm_atomic_get_new_crtc_state(state, crtc);
7116                 if (!new_crtc_state->active)
7117                         continue;
7118
7119                 dc_plane = dm_new_plane_state->dc_state;
7120
7121                 bundle->surface_updates[planes_count].surface = dc_plane;
7122                 if (new_pcrtc_state->color_mgmt_changed) {
7123                         bundle->surface_updates[planes_count].gamma = dc_plane->gamma_correction;
7124                         bundle->surface_updates[planes_count].in_transfer_func = dc_plane->in_transfer_func;
7125                         bundle->surface_updates[planes_count].gamut_remap_matrix = &dc_plane->gamut_remap_matrix;
7126                 }
7127
7128                 fill_dc_scaling_info(new_plane_state,
7129                                      &bundle->scaling_infos[planes_count]);
7130
7131                 bundle->surface_updates[planes_count].scaling_info =
7132                         &bundle->scaling_infos[planes_count];
7133
7134                 plane_needs_flip = old_plane_state->fb && new_plane_state->fb;
7135
7136                 pflip_present = pflip_present || plane_needs_flip;
7137
7138                 if (!plane_needs_flip) {
7139                         planes_count += 1;
7140                         continue;
7141                 }
7142
7143                 abo = gem_to_amdgpu_bo(fb->obj[0]);
7144
7145                 /*
7146                  * Wait for all fences on this FB. Do limited wait to avoid
7147                  * deadlock during GPU reset when this fence will not signal
7148                  * but we hold reservation lock for the BO.
7149                  */
7150                 r = dma_resv_wait_timeout_rcu(abo->tbo.base.resv, true,
7151                                                         false,
7152                                                         msecs_to_jiffies(5000));
7153                 if (unlikely(r <= 0))
7154                         DRM_ERROR("Waiting for fences timed out!");
7155
7156                 fill_dc_plane_info_and_addr(
7157                         dm->adev, new_plane_state,
7158                         dm_new_plane_state->tiling_flags,
7159                         &bundle->plane_infos[planes_count],
7160                         &bundle->flip_addrs[planes_count].address,
7161                         dm_new_plane_state->tmz_surface, false);
7162
7163                 DRM_DEBUG_DRIVER("plane: id=%d dcc_en=%d\n",
7164                                  new_plane_state->plane->index,
7165                                  bundle->plane_infos[planes_count].dcc.enable);
7166
7167                 bundle->surface_updates[planes_count].plane_info =
7168                         &bundle->plane_infos[planes_count];
7169
7170                 /*
7171                  * Only allow immediate flips for fast updates that don't
7172                  * change FB pitch, DCC state, rotation or mirroing.
7173                  */
7174                 bundle->flip_addrs[planes_count].flip_immediate =
7175                         crtc->state->async_flip &&
7176                         acrtc_state->update_type == UPDATE_TYPE_FAST;
7177
7178                 timestamp_ns = ktime_get_ns();
7179                 bundle->flip_addrs[planes_count].flip_timestamp_in_us = div_u64(timestamp_ns, 1000);
7180                 bundle->surface_updates[planes_count].flip_addr = &bundle->flip_addrs[planes_count];
7181                 bundle->surface_updates[planes_count].surface = dc_plane;
7182
7183                 if (!bundle->surface_updates[planes_count].surface) {
7184                         DRM_ERROR("No surface for CRTC: id=%d\n",
7185                                         acrtc_attach->crtc_id);
7186                         continue;
7187                 }
7188
7189                 if (plane == pcrtc->primary)
7190                         update_freesync_state_on_stream(
7191                                 dm,
7192                                 acrtc_state,
7193                                 acrtc_state->stream,
7194                                 dc_plane,
7195                                 bundle->flip_addrs[planes_count].flip_timestamp_in_us);
7196
7197                 DRM_DEBUG_DRIVER("%s Flipping to hi: 0x%x, low: 0x%x\n",
7198                                  __func__,
7199                                  bundle->flip_addrs[planes_count].address.grph.addr.high_part,
7200                                  bundle->flip_addrs[planes_count].address.grph.addr.low_part);
7201
7202                 planes_count += 1;
7203
7204         }
7205
7206         if (pflip_present) {
7207                 if (!vrr_active) {
7208                         /* Use old throttling in non-vrr fixed refresh rate mode
7209                          * to keep flip scheduling based on target vblank counts
7210                          * working in a backwards compatible way, e.g., for
7211                          * clients using the GLX_OML_sync_control extension or
7212                          * DRI3/Present extension with defined target_msc.
7213                          */
7214                         last_flip_vblank = amdgpu_get_vblank_counter_kms(pcrtc);
7215                 }
7216                 else {
7217                         /* For variable refresh rate mode only:
7218                          * Get vblank of last completed flip to avoid > 1 vrr
7219                          * flips per video frame by use of throttling, but allow
7220                          * flip programming anywhere in the possibly large
7221                          * variable vrr vblank interval for fine-grained flip
7222                          * timing control and more opportunity to avoid stutter
7223                          * on late submission of flips.
7224                          */
7225                         spin_lock_irqsave(&pcrtc->dev->event_lock, flags);
7226                         last_flip_vblank = acrtc_attach->dm_irq_params.last_flip_vblank;
7227                         spin_unlock_irqrestore(&pcrtc->dev->event_lock, flags);
7228                 }
7229
7230                 target_vblank = last_flip_vblank + wait_for_vblank;
7231
7232                 /*
7233                  * Wait until we're out of the vertical blank period before the one
7234                  * targeted by the flip
7235                  */
7236                 while ((acrtc_attach->enabled &&
7237                         (amdgpu_display_get_crtc_scanoutpos(dm->ddev, acrtc_attach->crtc_id,
7238                                                             0, &vpos, &hpos, NULL,
7239                                                             NULL, &pcrtc->hwmode)
7240                          & (DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_IN_VBLANK)) ==
7241                         (DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_IN_VBLANK) &&
7242                         (int)(target_vblank -
7243                           amdgpu_get_vblank_counter_kms(pcrtc)) > 0)) {
7244                         usleep_range(1000, 1100);
7245                 }
7246
7247                 /**
7248                  * Prepare the flip event for the pageflip interrupt to handle.
7249                  *
7250                  * This only works in the case where we've already turned on the
7251                  * appropriate hardware blocks (eg. HUBP) so in the transition case
7252                  * from 0 -> n planes we have to skip a hardware generated event
7253                  * and rely on sending it from software.
7254                  */
7255                 if (acrtc_attach->base.state->event &&
7256                     acrtc_state->active_planes > 0) {
7257                         drm_crtc_vblank_get(pcrtc);
7258
7259                         spin_lock_irqsave(&pcrtc->dev->event_lock, flags);
7260
7261                         WARN_ON(acrtc_attach->pflip_status != AMDGPU_FLIP_NONE);
7262                         prepare_flip_isr(acrtc_attach);
7263
7264                         spin_unlock_irqrestore(&pcrtc->dev->event_lock, flags);
7265                 }
7266
7267                 if (acrtc_state->stream) {
7268                         if (acrtc_state->freesync_vrr_info_changed)
7269                                 bundle->stream_update.vrr_infopacket =
7270                                         &acrtc_state->stream->vrr_infopacket;
7271                 }
7272         }
7273
7274         /* Update the planes if changed or disable if we don't have any. */
7275         if ((planes_count || acrtc_state->active_planes == 0) &&
7276                 acrtc_state->stream) {
7277                 bundle->stream_update.stream = acrtc_state->stream;
7278                 if (new_pcrtc_state->mode_changed) {
7279                         bundle->stream_update.src = acrtc_state->stream->src;
7280                         bundle->stream_update.dst = acrtc_state->stream->dst;
7281                 }
7282
7283                 if (new_pcrtc_state->color_mgmt_changed) {
7284                         /*
7285                          * TODO: This isn't fully correct since we've actually
7286                          * already modified the stream in place.
7287                          */
7288                         bundle->stream_update.gamut_remap =
7289                                 &acrtc_state->stream->gamut_remap_matrix;
7290                         bundle->stream_update.output_csc_transform =
7291                                 &acrtc_state->stream->csc_color_matrix;
7292                         bundle->stream_update.out_transfer_func =
7293                                 acrtc_state->stream->out_transfer_func;
7294                 }
7295
7296                 acrtc_state->stream->abm_level = acrtc_state->abm_level;
7297                 if (acrtc_state->abm_level != dm_old_crtc_state->abm_level)
7298                         bundle->stream_update.abm_level = &acrtc_state->abm_level;
7299
7300                 /*
7301                  * If FreeSync state on the stream has changed then we need to
7302                  * re-adjust the min/max bounds now that DC doesn't handle this
7303                  * as part of commit.
7304                  */
7305                 if (amdgpu_dm_vrr_active(dm_old_crtc_state) !=
7306                     amdgpu_dm_vrr_active(acrtc_state)) {
7307                         spin_lock_irqsave(&pcrtc->dev->event_lock, flags);
7308                         dc_stream_adjust_vmin_vmax(
7309                                 dm->dc, acrtc_state->stream,
7310                                 &acrtc_attach->dm_irq_params.vrr_params.adjust);
7311                         spin_unlock_irqrestore(&pcrtc->dev->event_lock, flags);
7312                 }
7313                 mutex_lock(&dm->dc_lock);
7314                 if ((acrtc_state->update_type > UPDATE_TYPE_FAST) &&
7315                                 acrtc_state->stream->link->psr_settings.psr_allow_active)
7316                         amdgpu_dm_psr_disable(acrtc_state->stream);
7317
7318                 dc_commit_updates_for_stream(dm->dc,
7319                                                      bundle->surface_updates,
7320                                                      planes_count,
7321                                                      acrtc_state->stream,
7322                                                      &bundle->stream_update,
7323                                                      dc_state);
7324
7325                 /**
7326                  * Enable or disable the interrupts on the backend.
7327                  *
7328                  * Most pipes are put into power gating when unused.
7329                  *
7330                  * When power gating is enabled on a pipe we lose the
7331                  * interrupt enablement state when power gating is disabled.
7332                  *
7333                  * So we need to update the IRQ control state in hardware
7334                  * whenever the pipe turns on (since it could be previously
7335                  * power gated) or off (since some pipes can't be power gated
7336                  * on some ASICs).
7337                  */
7338                 if (dm_old_crtc_state->active_planes != acrtc_state->active_planes)
7339                         dm_update_pflip_irq_state(drm_to_adev(dev),
7340                                                   acrtc_attach);
7341
7342                 if ((acrtc_state->update_type > UPDATE_TYPE_FAST) &&
7343                                 acrtc_state->stream->link->psr_settings.psr_version != DC_PSR_VERSION_UNSUPPORTED &&
7344                                 !acrtc_state->stream->link->psr_settings.psr_feature_enabled)
7345                         amdgpu_dm_link_setup_psr(acrtc_state->stream);
7346                 else if ((acrtc_state->update_type == UPDATE_TYPE_FAST) &&
7347                                 acrtc_state->stream->link->psr_settings.psr_feature_enabled &&
7348                                 !acrtc_state->stream->link->psr_settings.psr_allow_active) {
7349                         amdgpu_dm_psr_enable(acrtc_state->stream);
7350                 }
7351
7352                 mutex_unlock(&dm->dc_lock);
7353         }
7354
7355         /*
7356          * Update cursor state *after* programming all the planes.
7357          * This avoids redundant programming in the case where we're going
7358          * to be disabling a single plane - those pipes are being disabled.
7359          */
7360         if (acrtc_state->active_planes)
7361                 amdgpu_dm_commit_cursors(state);
7362
7363 cleanup:
7364         kfree(bundle);
7365 }
7366
7367 static void amdgpu_dm_commit_audio(struct drm_device *dev,
7368                                    struct drm_atomic_state *state)
7369 {
7370         struct amdgpu_device *adev = drm_to_adev(dev);
7371         struct amdgpu_dm_connector *aconnector;
7372         struct drm_connector *connector;
7373         struct drm_connector_state *old_con_state, *new_con_state;
7374         struct drm_crtc_state *new_crtc_state;
7375         struct dm_crtc_state *new_dm_crtc_state;
7376         const struct dc_stream_status *status;
7377         int i, inst;
7378
7379         /* Notify device removals. */
7380         for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
7381                 if (old_con_state->crtc != new_con_state->crtc) {
7382                         /* CRTC changes require notification. */
7383                         goto notify;
7384                 }
7385
7386                 if (!new_con_state->crtc)
7387                         continue;
7388
7389                 new_crtc_state = drm_atomic_get_new_crtc_state(
7390                         state, new_con_state->crtc);
7391
7392                 if (!new_crtc_state)
7393                         continue;
7394
7395                 if (!drm_atomic_crtc_needs_modeset(new_crtc_state))
7396                         continue;
7397
7398         notify:
7399                 aconnector = to_amdgpu_dm_connector(connector);
7400
7401                 mutex_lock(&adev->dm.audio_lock);
7402                 inst = aconnector->audio_inst;
7403                 aconnector->audio_inst = -1;
7404                 mutex_unlock(&adev->dm.audio_lock);
7405
7406                 amdgpu_dm_audio_eld_notify(adev, inst);
7407         }
7408
7409         /* Notify audio device additions. */
7410         for_each_new_connector_in_state(state, connector, new_con_state, i) {
7411                 if (!new_con_state->crtc)
7412                         continue;
7413
7414                 new_crtc_state = drm_atomic_get_new_crtc_state(
7415                         state, new_con_state->crtc);
7416
7417                 if (!new_crtc_state)
7418                         continue;
7419
7420                 if (!drm_atomic_crtc_needs_modeset(new_crtc_state))
7421                         continue;
7422
7423                 new_dm_crtc_state = to_dm_crtc_state(new_crtc_state);
7424                 if (!new_dm_crtc_state->stream)
7425                         continue;
7426
7427                 status = dc_stream_get_status(new_dm_crtc_state->stream);
7428                 if (!status)
7429                         continue;
7430
7431                 aconnector = to_amdgpu_dm_connector(connector);
7432
7433                 mutex_lock(&adev->dm.audio_lock);
7434                 inst = status->audio_inst;
7435                 aconnector->audio_inst = inst;
7436                 mutex_unlock(&adev->dm.audio_lock);
7437
7438                 amdgpu_dm_audio_eld_notify(adev, inst);
7439         }
7440 }
7441
7442 /*
7443  * amdgpu_dm_crtc_copy_transient_flags - copy mirrored flags from DRM to DC
7444  * @crtc_state: the DRM CRTC state
7445  * @stream_state: the DC stream state.
7446  *
7447  * Copy the mirrored transient state flags from DRM, to DC. It is used to bring
7448  * a dc_stream_state's flags in sync with a drm_crtc_state's flags.
7449  */
7450 static void amdgpu_dm_crtc_copy_transient_flags(struct drm_crtc_state *crtc_state,
7451                                                 struct dc_stream_state *stream_state)
7452 {
7453         stream_state->mode_changed = drm_atomic_crtc_needs_modeset(crtc_state);
7454 }
7455
7456 static int amdgpu_dm_atomic_commit(struct drm_device *dev,
7457                                    struct drm_atomic_state *state,
7458                                    bool nonblock)
7459 {
7460         /*
7461          * Add check here for SoC's that support hardware cursor plane, to
7462          * unset legacy_cursor_update
7463          */
7464
7465         return drm_atomic_helper_commit(dev, state, nonblock);
7466
7467         /*TODO Handle EINTR, reenable IRQ*/
7468 }
7469
7470 /**
7471  * amdgpu_dm_atomic_commit_tail() - AMDgpu DM's commit tail implementation.
7472  * @state: The atomic state to commit
7473  *
7474  * This will tell DC to commit the constructed DC state from atomic_check,
7475  * programming the hardware. Any failures here implies a hardware failure, since
7476  * atomic check should have filtered anything non-kosher.
7477  */
7478 static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state)
7479 {
7480         struct drm_device *dev = state->dev;
7481         struct amdgpu_device *adev = drm_to_adev(dev);
7482         struct amdgpu_display_manager *dm = &adev->dm;
7483         struct dm_atomic_state *dm_state;
7484         struct dc_state *dc_state = NULL, *dc_state_temp = NULL;
7485         uint32_t i, j;
7486         struct drm_crtc *crtc;
7487         struct drm_crtc_state *old_crtc_state, *new_crtc_state;
7488         unsigned long flags;
7489         bool wait_for_vblank = true;
7490         struct drm_connector *connector;
7491         struct drm_connector_state *old_con_state, *new_con_state;
7492         struct dm_crtc_state *dm_old_crtc_state, *dm_new_crtc_state;
7493         int crtc_disable_count = 0;
7494         bool mode_set_reset_required = false;
7495
7496         drm_atomic_helper_update_legacy_modeset_state(dev, state);
7497         drm_atomic_helper_calc_timestamping_constants(state);
7498
7499         dm_state = dm_atomic_get_new_state(state);
7500         if (dm_state && dm_state->context) {
7501                 dc_state = dm_state->context;
7502         } else {
7503                 /* No state changes, retain current state. */
7504                 dc_state_temp = dc_create_state(dm->dc);
7505                 ASSERT(dc_state_temp);
7506                 dc_state = dc_state_temp;
7507                 dc_resource_state_copy_construct_current(dm->dc, dc_state);
7508         }
7509
7510         for_each_oldnew_crtc_in_state (state, crtc, old_crtc_state,
7511                                        new_crtc_state, i) {
7512                 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
7513
7514                 dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
7515
7516                 if (old_crtc_state->active &&
7517                     (!new_crtc_state->active ||
7518                      drm_atomic_crtc_needs_modeset(new_crtc_state))) {
7519                         manage_dm_interrupts(adev, acrtc, false);
7520                         dc_stream_release(dm_old_crtc_state->stream);
7521                 }
7522         }
7523
7524         /* update changed items */
7525         for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
7526                 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
7527
7528                 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
7529                 dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
7530
7531                 DRM_DEBUG_DRIVER(
7532                         "amdgpu_crtc id:%d crtc_state_flags: enable:%d, active:%d, "
7533                         "planes_changed:%d, mode_changed:%d,active_changed:%d,"
7534                         "connectors_changed:%d\n",
7535                         acrtc->crtc_id,
7536                         new_crtc_state->enable,
7537                         new_crtc_state->active,
7538                         new_crtc_state->planes_changed,
7539                         new_crtc_state->mode_changed,
7540                         new_crtc_state->active_changed,
7541                         new_crtc_state->connectors_changed);
7542
7543                 /* Copy all transient state flags into dc state */
7544                 if (dm_new_crtc_state->stream) {
7545                         amdgpu_dm_crtc_copy_transient_flags(&dm_new_crtc_state->base,
7546                                                             dm_new_crtc_state->stream);
7547                 }
7548
7549                 /* handles headless hotplug case, updating new_state and
7550                  * aconnector as needed
7551                  */
7552
7553                 if (modeset_required(new_crtc_state, dm_new_crtc_state->stream, dm_old_crtc_state->stream)) {
7554
7555                         DRM_DEBUG_DRIVER("Atomic commit: SET crtc id %d: [%p]\n", acrtc->crtc_id, acrtc);
7556
7557                         if (!dm_new_crtc_state->stream) {
7558                                 /*
7559                                  * this could happen because of issues with
7560                                  * userspace notifications delivery.
7561                                  * In this case userspace tries to set mode on
7562                                  * display which is disconnected in fact.
7563                                  * dc_sink is NULL in this case on aconnector.
7564                                  * We expect reset mode will come soon.
7565                                  *
7566                                  * This can also happen when unplug is done
7567                                  * during resume sequence ended
7568                                  *
7569                                  * In this case, we want to pretend we still
7570                                  * have a sink to keep the pipe running so that
7571                                  * hw state is consistent with the sw state
7572                                  */
7573                                 DRM_DEBUG_DRIVER("%s: Failed to create new stream for crtc %d\n",
7574                                                 __func__, acrtc->base.base.id);
7575                                 continue;
7576                         }
7577
7578                         if (dm_old_crtc_state->stream)
7579                                 remove_stream(adev, acrtc, dm_old_crtc_state->stream);
7580
7581                         pm_runtime_get_noresume(dev->dev);
7582
7583                         acrtc->enabled = true;
7584                         acrtc->hw_mode = new_crtc_state->mode;
7585                         crtc->hwmode = new_crtc_state->mode;
7586                         mode_set_reset_required = true;
7587                 } else if (modereset_required(new_crtc_state)) {
7588                         DRM_DEBUG_DRIVER("Atomic commit: RESET. crtc id %d:[%p]\n", acrtc->crtc_id, acrtc);
7589                         /* i.e. reset mode */
7590                         if (dm_old_crtc_state->stream)
7591                                 remove_stream(adev, acrtc, dm_old_crtc_state->stream);
7592                         mode_set_reset_required = true;
7593                 }
7594         } /* for_each_crtc_in_state() */
7595
7596         if (dc_state) {
7597                 /* if there mode set or reset, disable eDP PSR */
7598                 if (mode_set_reset_required)
7599                         amdgpu_dm_psr_disable_all(dm);
7600
7601                 dm_enable_per_frame_crtc_master_sync(dc_state);
7602                 mutex_lock(&dm->dc_lock);
7603                 WARN_ON(!dc_commit_state(dm->dc, dc_state));
7604                 mutex_unlock(&dm->dc_lock);
7605         }
7606
7607         for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) {
7608                 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
7609
7610                 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
7611
7612                 if (dm_new_crtc_state->stream != NULL) {
7613                         const struct dc_stream_status *status =
7614                                         dc_stream_get_status(dm_new_crtc_state->stream);
7615
7616                         if (!status)
7617                                 status = dc_stream_get_status_from_state(dc_state,
7618                                                                          dm_new_crtc_state->stream);
7619                         if (!status)
7620                                 DC_ERR("got no status for stream %p on acrtc%p\n", dm_new_crtc_state->stream, acrtc);
7621                         else
7622                                 acrtc->otg_inst = status->primary_otg_inst;
7623                 }
7624         }
7625 #ifdef CONFIG_DRM_AMD_DC_HDCP
7626         for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
7627                 struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state);
7628                 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(dm_new_con_state->base.crtc);
7629                 struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
7630
7631                 new_crtc_state = NULL;
7632
7633                 if (acrtc)
7634                         new_crtc_state = drm_atomic_get_new_crtc_state(state, &acrtc->base);
7635
7636                 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
7637
7638                 if (dm_new_crtc_state && dm_new_crtc_state->stream == NULL &&
7639                     connector->state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED) {
7640                         hdcp_reset_display(adev->dm.hdcp_workqueue, aconnector->dc_link->link_index);
7641                         new_con_state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED;
7642                         continue;
7643                 }
7644
7645                 if (is_content_protection_different(new_con_state, old_con_state, connector, adev->dm.hdcp_workqueue))
7646                         hdcp_update_display(
7647                                 adev->dm.hdcp_workqueue, aconnector->dc_link->link_index, aconnector,
7648                                 new_con_state->hdcp_content_type,
7649                                 new_con_state->content_protection == DRM_MODE_CONTENT_PROTECTION_DESIRED ? true
7650                                                                                                          : false);
7651         }
7652 #endif
7653
7654         /* Handle connector state changes */
7655         for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
7656                 struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state);
7657                 struct dm_connector_state *dm_old_con_state = to_dm_connector_state(old_con_state);
7658                 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(dm_new_con_state->base.crtc);
7659                 struct dc_surface_update dummy_updates[MAX_SURFACES];
7660                 struct dc_stream_update stream_update;
7661                 struct dc_info_packet hdr_packet;
7662                 struct dc_stream_status *status = NULL;
7663                 bool abm_changed, hdr_changed, scaling_changed;
7664
7665                 memset(&dummy_updates, 0, sizeof(dummy_updates));
7666                 memset(&stream_update, 0, sizeof(stream_update));
7667
7668                 if (acrtc) {
7669                         new_crtc_state = drm_atomic_get_new_crtc_state(state, &acrtc->base);
7670                         old_crtc_state = drm_atomic_get_old_crtc_state(state, &acrtc->base);
7671                 }
7672
7673                 /* Skip any modesets/resets */
7674                 if (!acrtc || drm_atomic_crtc_needs_modeset(new_crtc_state))
7675                         continue;
7676
7677                 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
7678                 dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
7679
7680                 scaling_changed = is_scaling_state_different(dm_new_con_state,
7681                                                              dm_old_con_state);
7682
7683                 abm_changed = dm_new_crtc_state->abm_level !=
7684                               dm_old_crtc_state->abm_level;
7685
7686                 hdr_changed =
7687                         is_hdr_metadata_different(old_con_state, new_con_state);
7688
7689                 if (!scaling_changed && !abm_changed && !hdr_changed)
7690                         continue;
7691
7692                 stream_update.stream = dm_new_crtc_state->stream;
7693                 if (scaling_changed) {
7694                         update_stream_scaling_settings(&dm_new_con_state->base.crtc->mode,
7695                                         dm_new_con_state, dm_new_crtc_state->stream);
7696
7697                         stream_update.src = dm_new_crtc_state->stream->src;
7698                         stream_update.dst = dm_new_crtc_state->stream->dst;
7699                 }
7700
7701                 if (abm_changed) {
7702                         dm_new_crtc_state->stream->abm_level = dm_new_crtc_state->abm_level;
7703
7704                         stream_update.abm_level = &dm_new_crtc_state->abm_level;
7705                 }
7706
7707                 if (hdr_changed) {
7708                         fill_hdr_info_packet(new_con_state, &hdr_packet);
7709                         stream_update.hdr_static_metadata = &hdr_packet;
7710                 }
7711
7712                 status = dc_stream_get_status(dm_new_crtc_state->stream);
7713                 WARN_ON(!status);
7714                 WARN_ON(!status->plane_count);
7715
7716                 /*
7717                  * TODO: DC refuses to perform stream updates without a dc_surface_update.
7718                  * Here we create an empty update on each plane.
7719                  * To fix this, DC should permit updating only stream properties.
7720                  */
7721                 for (j = 0; j < status->plane_count; j++)
7722                         dummy_updates[j].surface = status->plane_states[0];
7723
7724
7725                 mutex_lock(&dm->dc_lock);
7726                 dc_commit_updates_for_stream(dm->dc,
7727                                                      dummy_updates,
7728                                                      status->plane_count,
7729                                                      dm_new_crtc_state->stream,
7730                                                      &stream_update,
7731                                                      dc_state);
7732                 mutex_unlock(&dm->dc_lock);
7733         }
7734
7735         /* Count number of newly disabled CRTCs for dropping PM refs later. */
7736         for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state,
7737                                       new_crtc_state, i) {
7738                 if (old_crtc_state->active && !new_crtc_state->active)
7739                         crtc_disable_count++;
7740
7741                 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
7742                 dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
7743
7744                 /* For freesync config update on crtc state and params for irq */
7745                 update_stream_irq_parameters(dm, dm_new_crtc_state);
7746
7747                 /* Handle vrr on->off / off->on transitions */
7748                 amdgpu_dm_handle_vrr_transition(dm_old_crtc_state,
7749                                                 dm_new_crtc_state);
7750         }
7751
7752         /**
7753          * Enable interrupts for CRTCs that are newly enabled or went through
7754          * a modeset. It was intentionally deferred until after the front end
7755          * state was modified to wait until the OTG was on and so the IRQ
7756          * handlers didn't access stale or invalid state.
7757          */
7758         for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
7759                 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
7760
7761                 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
7762
7763                 if (new_crtc_state->active &&
7764                     (!old_crtc_state->active ||
7765                      drm_atomic_crtc_needs_modeset(new_crtc_state))) {
7766                         dc_stream_retain(dm_new_crtc_state->stream);
7767                         acrtc->dm_irq_params.stream = dm_new_crtc_state->stream;
7768                         manage_dm_interrupts(adev, acrtc, true);
7769
7770 #ifdef CONFIG_DEBUG_FS
7771                         /**
7772                          * Frontend may have changed so reapply the CRC capture
7773                          * settings for the stream.
7774                          */
7775                         dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
7776
7777                         if (amdgpu_dm_is_valid_crc_source(dm_new_crtc_state->crc_src)) {
7778                                 amdgpu_dm_crtc_configure_crc_source(
7779                                         crtc, dm_new_crtc_state,
7780                                         dm_new_crtc_state->crc_src);
7781                         }
7782 #endif
7783                 }
7784         }
7785
7786         for_each_new_crtc_in_state(state, crtc, new_crtc_state, j)
7787                 if (new_crtc_state->async_flip)
7788                         wait_for_vblank = false;
7789
7790         /* update planes when needed per crtc*/
7791         for_each_new_crtc_in_state(state, crtc, new_crtc_state, j) {
7792                 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
7793
7794                 if (dm_new_crtc_state->stream)
7795                         amdgpu_dm_commit_planes(state, dc_state, dev,
7796                                                 dm, crtc, wait_for_vblank);
7797         }
7798
7799         /* Update audio instances for each connector. */
7800         amdgpu_dm_commit_audio(dev, state);
7801
7802         /*
7803          * send vblank event on all events not handled in flip and
7804          * mark consumed event for drm_atomic_helper_commit_hw_done
7805          */
7806         spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
7807         for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) {
7808
7809                 if (new_crtc_state->event)
7810                         drm_send_event_locked(dev, &new_crtc_state->event->base);
7811
7812                 new_crtc_state->event = NULL;
7813         }
7814         spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
7815
7816         /* Signal HW programming completion */
7817         drm_atomic_helper_commit_hw_done(state);
7818
7819         if (wait_for_vblank)
7820                 drm_atomic_helper_wait_for_flip_done(dev, state);
7821
7822         drm_atomic_helper_cleanup_planes(dev, state);
7823
7824         /*
7825          * Finally, drop a runtime PM reference for each newly disabled CRTC,
7826          * so we can put the GPU into runtime suspend if we're not driving any
7827          * displays anymore
7828          */
7829         for (i = 0; i < crtc_disable_count; i++)
7830                 pm_runtime_put_autosuspend(dev->dev);
7831         pm_runtime_mark_last_busy(dev->dev);
7832
7833         if (dc_state_temp)
7834                 dc_release_state(dc_state_temp);
7835 }
7836
7837
7838 static int dm_force_atomic_commit(struct drm_connector *connector)
7839 {
7840         int ret = 0;
7841         struct drm_device *ddev = connector->dev;
7842         struct drm_atomic_state *state = drm_atomic_state_alloc(ddev);
7843         struct amdgpu_crtc *disconnected_acrtc = to_amdgpu_crtc(connector->encoder->crtc);
7844         struct drm_plane *plane = disconnected_acrtc->base.primary;
7845         struct drm_connector_state *conn_state;
7846         struct drm_crtc_state *crtc_state;
7847         struct drm_plane_state *plane_state;
7848
7849         if (!state)
7850                 return -ENOMEM;
7851
7852         state->acquire_ctx = ddev->mode_config.acquire_ctx;
7853
7854         /* Construct an atomic state to restore previous display setting */
7855
7856         /*
7857          * Attach connectors to drm_atomic_state
7858          */
7859         conn_state = drm_atomic_get_connector_state(state, connector);
7860
7861         ret = PTR_ERR_OR_ZERO(conn_state);
7862         if (ret)
7863                 goto err;
7864
7865         /* Attach crtc to drm_atomic_state*/
7866         crtc_state = drm_atomic_get_crtc_state(state, &disconnected_acrtc->base);
7867
7868         ret = PTR_ERR_OR_ZERO(crtc_state);
7869         if (ret)
7870                 goto err;
7871
7872         /* force a restore */
7873         crtc_state->mode_changed = true;
7874
7875         /* Attach plane to drm_atomic_state */
7876         plane_state = drm_atomic_get_plane_state(state, plane);
7877
7878         ret = PTR_ERR_OR_ZERO(plane_state);
7879         if (ret)
7880                 goto err;
7881
7882
7883         /* Call commit internally with the state we just constructed */
7884         ret = drm_atomic_commit(state);
7885         if (!ret)
7886                 return 0;
7887
7888 err:
7889         DRM_ERROR("Restoring old state failed with %i\n", ret);
7890         drm_atomic_state_put(state);
7891
7892         return ret;
7893 }
7894
7895 /*
7896  * This function handles all cases when set mode does not come upon hotplug.
7897  * This includes when a display is unplugged then plugged back into the
7898  * same port and when running without usermode desktop manager supprot
7899  */
7900 void dm_restore_drm_connector_state(struct drm_device *dev,
7901                                     struct drm_connector *connector)
7902 {
7903         struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
7904         struct amdgpu_crtc *disconnected_acrtc;
7905         struct dm_crtc_state *acrtc_state;
7906
7907         if (!aconnector->dc_sink || !connector->state || !connector->encoder)
7908                 return;
7909
7910         disconnected_acrtc = to_amdgpu_crtc(connector->encoder->crtc);
7911         if (!disconnected_acrtc)
7912                 return;
7913
7914         acrtc_state = to_dm_crtc_state(disconnected_acrtc->base.state);
7915         if (!acrtc_state->stream)
7916                 return;
7917
7918         /*
7919          * If the previous sink is not released and different from the current,
7920          * we deduce we are in a state where we can not rely on usermode call
7921          * to turn on the display, so we do it here
7922          */
7923         if (acrtc_state->stream->sink != aconnector->dc_sink)
7924                 dm_force_atomic_commit(&aconnector->base);
7925 }
7926
7927 /*
7928  * Grabs all modesetting locks to serialize against any blocking commits,
7929  * Waits for completion of all non blocking commits.
7930  */
7931 static int do_aquire_global_lock(struct drm_device *dev,
7932                                  struct drm_atomic_state *state)
7933 {
7934         struct drm_crtc *crtc;
7935         struct drm_crtc_commit *commit;
7936         long ret;
7937
7938         /*
7939          * Adding all modeset locks to aquire_ctx will
7940          * ensure that when the framework release it the
7941          * extra locks we are locking here will get released to
7942          */
7943         ret = drm_modeset_lock_all_ctx(dev, state->acquire_ctx);
7944         if (ret)
7945                 return ret;
7946
7947         list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
7948                 spin_lock(&crtc->commit_lock);
7949                 commit = list_first_entry_or_null(&crtc->commit_list,
7950                                 struct drm_crtc_commit, commit_entry);
7951                 if (commit)
7952                         drm_crtc_commit_get(commit);
7953                 spin_unlock(&crtc->commit_lock);
7954
7955                 if (!commit)
7956                         continue;
7957
7958                 /*
7959                  * Make sure all pending HW programming completed and
7960                  * page flips done
7961                  */
7962                 ret = wait_for_completion_interruptible_timeout(&commit->hw_done, 10*HZ);
7963
7964                 if (ret > 0)
7965                         ret = wait_for_completion_interruptible_timeout(
7966                                         &commit->flip_done, 10*HZ);
7967
7968                 if (ret == 0)
7969                         DRM_ERROR("[CRTC:%d:%s] hw_done or flip_done "
7970                                   "timed out\n", crtc->base.id, crtc->name);
7971
7972                 drm_crtc_commit_put(commit);
7973         }
7974
7975         return ret < 0 ? ret : 0;
7976 }
7977
7978 static void get_freesync_config_for_crtc(
7979         struct dm_crtc_state *new_crtc_state,
7980         struct dm_connector_state *new_con_state)
7981 {
7982         struct mod_freesync_config config = {0};
7983         struct amdgpu_dm_connector *aconnector =
7984                         to_amdgpu_dm_connector(new_con_state->base.connector);
7985         struct drm_display_mode *mode = &new_crtc_state->base.mode;
7986         int vrefresh = drm_mode_vrefresh(mode);
7987
7988         new_crtc_state->vrr_supported = new_con_state->freesync_capable &&
7989                                         vrefresh >= aconnector->min_vfreq &&
7990                                         vrefresh <= aconnector->max_vfreq;
7991
7992         if (new_crtc_state->vrr_supported) {
7993                 new_crtc_state->stream->ignore_msa_timing_param = true;
7994                 config.state = new_crtc_state->base.vrr_enabled ?
7995                                 VRR_STATE_ACTIVE_VARIABLE :
7996                                 VRR_STATE_INACTIVE;
7997                 config.min_refresh_in_uhz =
7998                                 aconnector->min_vfreq * 1000000;
7999                 config.max_refresh_in_uhz =
8000                                 aconnector->max_vfreq * 1000000;
8001                 config.vsif_supported = true;
8002                 config.btr = true;
8003         }
8004
8005         new_crtc_state->freesync_config = config;
8006 }
8007
8008 static void reset_freesync_config_for_crtc(
8009         struct dm_crtc_state *new_crtc_state)
8010 {
8011         new_crtc_state->vrr_supported = false;
8012
8013         memset(&new_crtc_state->vrr_infopacket, 0,
8014                sizeof(new_crtc_state->vrr_infopacket));
8015 }
8016
8017 static int dm_update_crtc_state(struct amdgpu_display_manager *dm,
8018                                 struct drm_atomic_state *state,
8019                                 struct drm_crtc *crtc,
8020                                 struct drm_crtc_state *old_crtc_state,
8021                                 struct drm_crtc_state *new_crtc_state,
8022                                 bool enable,
8023                                 bool *lock_and_validation_needed)
8024 {
8025         struct dm_atomic_state *dm_state = NULL;
8026         struct dm_crtc_state *dm_old_crtc_state, *dm_new_crtc_state;
8027         struct dc_stream_state *new_stream;
8028         int ret = 0;
8029
8030         /*
8031          * TODO Move this code into dm_crtc_atomic_check once we get rid of dc_validation_set
8032          * update changed items
8033          */
8034         struct amdgpu_crtc *acrtc = NULL;
8035         struct amdgpu_dm_connector *aconnector = NULL;
8036         struct drm_connector_state *drm_new_conn_state = NULL, *drm_old_conn_state = NULL;
8037         struct dm_connector_state *dm_new_conn_state = NULL, *dm_old_conn_state = NULL;
8038
8039         new_stream = NULL;
8040
8041         dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
8042         dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
8043         acrtc = to_amdgpu_crtc(crtc);
8044         aconnector = amdgpu_dm_find_first_crtc_matching_connector(state, crtc);
8045
8046         /* TODO This hack should go away */
8047         if (aconnector && enable) {
8048                 /* Make sure fake sink is created in plug-in scenario */
8049                 drm_new_conn_state = drm_atomic_get_new_connector_state(state,
8050                                                             &aconnector->base);
8051                 drm_old_conn_state = drm_atomic_get_old_connector_state(state,
8052                                                             &aconnector->base);
8053
8054                 if (IS_ERR(drm_new_conn_state)) {
8055                         ret = PTR_ERR_OR_ZERO(drm_new_conn_state);
8056                         goto fail;
8057                 }
8058
8059                 dm_new_conn_state = to_dm_connector_state(drm_new_conn_state);
8060                 dm_old_conn_state = to_dm_connector_state(drm_old_conn_state);
8061
8062                 if (!drm_atomic_crtc_needs_modeset(new_crtc_state))
8063                         goto skip_modeset;
8064
8065                 new_stream = create_validate_stream_for_sink(aconnector,
8066                                                              &new_crtc_state->mode,
8067                                                              dm_new_conn_state,
8068                                                              dm_old_crtc_state->stream);
8069
8070                 /*
8071                  * we can have no stream on ACTION_SET if a display
8072                  * was disconnected during S3, in this case it is not an
8073                  * error, the OS will be updated after detection, and
8074                  * will do the right thing on next atomic commit
8075                  */
8076
8077                 if (!new_stream) {
8078                         DRM_DEBUG_DRIVER("%s: Failed to create new stream for crtc %d\n",
8079                                         __func__, acrtc->base.base.id);
8080                         ret = -ENOMEM;
8081                         goto fail;
8082                 }
8083
8084                 /*
8085                  * TODO: Check VSDB bits to decide whether this should
8086                  * be enabled or not.
8087                  */
8088                 new_stream->triggered_crtc_reset.enabled =
8089                         dm->force_timing_sync;
8090
8091                 dm_new_crtc_state->abm_level = dm_new_conn_state->abm_level;
8092
8093                 ret = fill_hdr_info_packet(drm_new_conn_state,
8094                                            &new_stream->hdr_static_metadata);
8095                 if (ret)
8096                         goto fail;
8097
8098                 /*
8099                  * If we already removed the old stream from the context
8100                  * (and set the new stream to NULL) then we can't reuse
8101                  * the old stream even if the stream and scaling are unchanged.
8102                  * We'll hit the BUG_ON and black screen.
8103                  *
8104                  * TODO: Refactor this function to allow this check to work
8105                  * in all conditions.
8106                  */
8107                 if (dm_new_crtc_state->stream &&
8108                     dc_is_stream_unchanged(new_stream, dm_old_crtc_state->stream) &&
8109                     dc_is_stream_scaling_unchanged(new_stream, dm_old_crtc_state->stream)) {
8110                         new_crtc_state->mode_changed = false;
8111                         DRM_DEBUG_DRIVER("Mode change not required, setting mode_changed to %d",
8112                                          new_crtc_state->mode_changed);
8113                 }
8114         }
8115
8116         /* mode_changed flag may get updated above, need to check again */
8117         if (!drm_atomic_crtc_needs_modeset(new_crtc_state))
8118                 goto skip_modeset;
8119
8120         DRM_DEBUG_DRIVER(
8121                 "amdgpu_crtc id:%d crtc_state_flags: enable:%d, active:%d, "
8122                 "planes_changed:%d, mode_changed:%d,active_changed:%d,"
8123                 "connectors_changed:%d\n",
8124                 acrtc->crtc_id,
8125                 new_crtc_state->enable,
8126                 new_crtc_state->active,
8127                 new_crtc_state->planes_changed,
8128                 new_crtc_state->mode_changed,
8129                 new_crtc_state->active_changed,
8130                 new_crtc_state->connectors_changed);
8131
8132         /* Remove stream for any changed/disabled CRTC */
8133         if (!enable) {
8134
8135                 if (!dm_old_crtc_state->stream)
8136                         goto skip_modeset;
8137
8138                 ret = dm_atomic_get_state(state, &dm_state);
8139                 if (ret)
8140                         goto fail;
8141
8142                 DRM_DEBUG_DRIVER("Disabling DRM crtc: %d\n",
8143                                 crtc->base.id);
8144
8145                 /* i.e. reset mode */
8146                 if (dc_remove_stream_from_ctx(
8147                                 dm->dc,
8148                                 dm_state->context,
8149                                 dm_old_crtc_state->stream) != DC_OK) {
8150                         ret = -EINVAL;
8151                         goto fail;
8152                 }
8153
8154                 dc_stream_release(dm_old_crtc_state->stream);
8155                 dm_new_crtc_state->stream = NULL;
8156
8157                 reset_freesync_config_for_crtc(dm_new_crtc_state);
8158
8159                 *lock_and_validation_needed = true;
8160
8161         } else {/* Add stream for any updated/enabled CRTC */
8162                 /*
8163                  * Quick fix to prevent NULL pointer on new_stream when
8164                  * added MST connectors not found in existing crtc_state in the chained mode
8165                  * TODO: need to dig out the root cause of that
8166                  */
8167                 if (!aconnector || (!aconnector->dc_sink && aconnector->mst_port))
8168                         goto skip_modeset;
8169
8170                 if (modereset_required(new_crtc_state))
8171                         goto skip_modeset;
8172
8173                 if (modeset_required(new_crtc_state, new_stream,
8174                                      dm_old_crtc_state->stream)) {
8175
8176                         WARN_ON(dm_new_crtc_state->stream);
8177
8178                         ret = dm_atomic_get_state(state, &dm_state);
8179                         if (ret)
8180                                 goto fail;
8181
8182                         dm_new_crtc_state->stream = new_stream;
8183
8184                         dc_stream_retain(new_stream);
8185
8186                         DRM_DEBUG_DRIVER("Enabling DRM crtc: %d\n",
8187                                                 crtc->base.id);
8188
8189                         if (dc_add_stream_to_ctx(
8190                                         dm->dc,
8191                                         dm_state->context,
8192                                         dm_new_crtc_state->stream) != DC_OK) {
8193                                 ret = -EINVAL;
8194                                 goto fail;
8195                         }
8196
8197                         *lock_and_validation_needed = true;
8198                 }
8199         }
8200
8201 skip_modeset:
8202         /* Release extra reference */
8203         if (new_stream)
8204                  dc_stream_release(new_stream);
8205
8206         /*
8207          * We want to do dc stream updates that do not require a
8208          * full modeset below.
8209          */
8210         if (!(enable && aconnector && new_crtc_state->active))
8211                 return 0;
8212         /*
8213          * Given above conditions, the dc state cannot be NULL because:
8214          * 1. We're in the process of enabling CRTCs (just been added
8215          *    to the dc context, or already is on the context)
8216          * 2. Has a valid connector attached, and
8217          * 3. Is currently active and enabled.
8218          * => The dc stream state currently exists.
8219          */
8220         BUG_ON(dm_new_crtc_state->stream == NULL);
8221
8222         /* Scaling or underscan settings */
8223         if (is_scaling_state_different(dm_old_conn_state, dm_new_conn_state))
8224                 update_stream_scaling_settings(
8225                         &new_crtc_state->mode, dm_new_conn_state, dm_new_crtc_state->stream);
8226
8227         /* ABM settings */
8228         dm_new_crtc_state->abm_level = dm_new_conn_state->abm_level;
8229
8230         /*
8231          * Color management settings. We also update color properties
8232          * when a modeset is needed, to ensure it gets reprogrammed.
8233          */
8234         if (dm_new_crtc_state->base.color_mgmt_changed ||
8235             drm_atomic_crtc_needs_modeset(new_crtc_state)) {
8236                 ret = amdgpu_dm_update_crtc_color_mgmt(dm_new_crtc_state);
8237                 if (ret)
8238                         goto fail;
8239         }
8240
8241         /* Update Freesync settings. */
8242         get_freesync_config_for_crtc(dm_new_crtc_state,
8243                                      dm_new_conn_state);
8244
8245         return ret;
8246
8247 fail:
8248         if (new_stream)
8249                 dc_stream_release(new_stream);
8250         return ret;
8251 }
8252
8253 static bool should_reset_plane(struct drm_atomic_state *state,
8254                                struct drm_plane *plane,
8255                                struct drm_plane_state *old_plane_state,
8256                                struct drm_plane_state *new_plane_state)
8257 {
8258         struct drm_plane *other;
8259         struct drm_plane_state *old_other_state, *new_other_state;
8260         struct drm_crtc_state *new_crtc_state;
8261         int i;
8262
8263         /*
8264          * TODO: Remove this hack once the checks below are sufficient
8265          * enough to determine when we need to reset all the planes on
8266          * the stream.
8267          */
8268         if (state->allow_modeset)
8269                 return true;
8270
8271         /* Exit early if we know that we're adding or removing the plane. */
8272         if (old_plane_state->crtc != new_plane_state->crtc)
8273                 return true;
8274
8275         /* old crtc == new_crtc == NULL, plane not in context. */
8276         if (!new_plane_state->crtc)
8277                 return false;
8278
8279         new_crtc_state =
8280                 drm_atomic_get_new_crtc_state(state, new_plane_state->crtc);
8281
8282         if (!new_crtc_state)
8283                 return true;
8284
8285         /* CRTC Degamma changes currently require us to recreate planes. */
8286         if (new_crtc_state->color_mgmt_changed)
8287                 return true;
8288
8289         if (drm_atomic_crtc_needs_modeset(new_crtc_state))
8290                 return true;
8291
8292         /*
8293          * If there are any new primary or overlay planes being added or
8294          * removed then the z-order can potentially change. To ensure
8295          * correct z-order and pipe acquisition the current DC architecture
8296          * requires us to remove and recreate all existing planes.
8297          *
8298          * TODO: Come up with a more elegant solution for this.
8299          */
8300         for_each_oldnew_plane_in_state(state, other, old_other_state, new_other_state, i) {
8301                 struct dm_plane_state *old_dm_plane_state, *new_dm_plane_state;
8302
8303                 if (other->type == DRM_PLANE_TYPE_CURSOR)
8304                         continue;
8305
8306                 if (old_other_state->crtc != new_plane_state->crtc &&
8307                     new_other_state->crtc != new_plane_state->crtc)
8308                         continue;
8309
8310                 if (old_other_state->crtc != new_other_state->crtc)
8311                         return true;
8312
8313                 /* Src/dst size and scaling updates. */
8314                 if (old_other_state->src_w != new_other_state->src_w ||
8315                     old_other_state->src_h != new_other_state->src_h ||
8316                     old_other_state->crtc_w != new_other_state->crtc_w ||
8317                     old_other_state->crtc_h != new_other_state->crtc_h)
8318                         return true;
8319
8320                 /* Rotation / mirroring updates. */
8321                 if (old_other_state->rotation != new_other_state->rotation)
8322                         return true;
8323
8324                 /* Blending updates. */
8325                 if (old_other_state->pixel_blend_mode !=
8326                     new_other_state->pixel_blend_mode)
8327                         return true;
8328
8329                 /* Alpha updates. */
8330                 if (old_other_state->alpha != new_other_state->alpha)
8331                         return true;
8332
8333                 /* Colorspace changes. */
8334                 if (old_other_state->color_range != new_other_state->color_range ||
8335                     old_other_state->color_encoding != new_other_state->color_encoding)
8336                         return true;
8337
8338                 /* Framebuffer checks fall at the end. */
8339                 if (!old_other_state->fb || !new_other_state->fb)
8340                         continue;
8341
8342                 /* Pixel format changes can require bandwidth updates. */
8343                 if (old_other_state->fb->format != new_other_state->fb->format)
8344                         return true;
8345
8346                 old_dm_plane_state = to_dm_plane_state(old_other_state);
8347                 new_dm_plane_state = to_dm_plane_state(new_other_state);
8348
8349                 /* Tiling and DCC changes also require bandwidth updates. */
8350                 if (old_dm_plane_state->tiling_flags !=
8351                     new_dm_plane_state->tiling_flags)
8352                         return true;
8353         }
8354
8355         return false;
8356 }
8357
8358 static int dm_update_plane_state(struct dc *dc,
8359                                  struct drm_atomic_state *state,
8360                                  struct drm_plane *plane,
8361                                  struct drm_plane_state *old_plane_state,
8362                                  struct drm_plane_state *new_plane_state,
8363                                  bool enable,
8364                                  bool *lock_and_validation_needed)
8365 {
8366
8367         struct dm_atomic_state *dm_state = NULL;
8368         struct drm_crtc *new_plane_crtc, *old_plane_crtc;
8369         struct drm_crtc_state *old_crtc_state, *new_crtc_state;
8370         struct dm_crtc_state *dm_new_crtc_state, *dm_old_crtc_state;
8371         struct dm_plane_state *dm_new_plane_state, *dm_old_plane_state;
8372         struct amdgpu_crtc *new_acrtc;
8373         bool needs_reset;
8374         int ret = 0;
8375
8376
8377         new_plane_crtc = new_plane_state->crtc;
8378         old_plane_crtc = old_plane_state->crtc;
8379         dm_new_plane_state = to_dm_plane_state(new_plane_state);
8380         dm_old_plane_state = to_dm_plane_state(old_plane_state);
8381
8382         /*TODO Implement better atomic check for cursor plane */
8383         if (plane->type == DRM_PLANE_TYPE_CURSOR) {
8384                 if (!enable || !new_plane_crtc ||
8385                         drm_atomic_plane_disabling(plane->state, new_plane_state))
8386                         return 0;
8387
8388                 new_acrtc = to_amdgpu_crtc(new_plane_crtc);
8389
8390                 if ((new_plane_state->crtc_w > new_acrtc->max_cursor_width) ||
8391                         (new_plane_state->crtc_h > new_acrtc->max_cursor_height)) {
8392                         DRM_DEBUG_ATOMIC("Bad cursor size %d x %d\n",
8393                                                          new_plane_state->crtc_w, new_plane_state->crtc_h);
8394                         return -EINVAL;
8395                 }
8396
8397                 return 0;
8398         }
8399
8400         needs_reset = should_reset_plane(state, plane, old_plane_state,
8401                                          new_plane_state);
8402
8403         /* Remove any changed/removed planes */
8404         if (!enable) {
8405                 if (!needs_reset)
8406                         return 0;
8407
8408                 if (!old_plane_crtc)
8409                         return 0;
8410
8411                 old_crtc_state = drm_atomic_get_old_crtc_state(
8412                                 state, old_plane_crtc);
8413                 dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
8414
8415                 if (!dm_old_crtc_state->stream)
8416                         return 0;
8417
8418                 DRM_DEBUG_ATOMIC("Disabling DRM plane: %d on DRM crtc %d\n",
8419                                 plane->base.id, old_plane_crtc->base.id);
8420
8421                 ret = dm_atomic_get_state(state, &dm_state);
8422                 if (ret)
8423                         return ret;
8424
8425                 if (!dc_remove_plane_from_context(
8426                                 dc,
8427                                 dm_old_crtc_state->stream,
8428                                 dm_old_plane_state->dc_state,
8429                                 dm_state->context)) {
8430
8431                         return -EINVAL;
8432                 }
8433
8434
8435                 dc_plane_state_release(dm_old_plane_state->dc_state);
8436                 dm_new_plane_state->dc_state = NULL;
8437
8438                 *lock_and_validation_needed = true;
8439
8440         } else { /* Add new planes */
8441                 struct dc_plane_state *dc_new_plane_state;
8442
8443                 if (drm_atomic_plane_disabling(plane->state, new_plane_state))
8444                         return 0;
8445
8446                 if (!new_plane_crtc)
8447                         return 0;
8448
8449                 new_crtc_state = drm_atomic_get_new_crtc_state(state, new_plane_crtc);
8450                 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
8451
8452                 if (!dm_new_crtc_state->stream)
8453                         return 0;
8454
8455                 if (!needs_reset)
8456                         return 0;
8457
8458                 ret = dm_plane_helper_check_state(new_plane_state, new_crtc_state);
8459                 if (ret)
8460                         return ret;
8461
8462                 WARN_ON(dm_new_plane_state->dc_state);
8463
8464                 dc_new_plane_state = dc_create_plane_state(dc);
8465                 if (!dc_new_plane_state)
8466                         return -ENOMEM;
8467
8468                 DRM_DEBUG_DRIVER("Enabling DRM plane: %d on DRM crtc %d\n",
8469                                 plane->base.id, new_plane_crtc->base.id);
8470
8471                 ret = fill_dc_plane_attributes(
8472                         drm_to_adev(new_plane_crtc->dev),
8473                         dc_new_plane_state,
8474                         new_plane_state,
8475                         new_crtc_state);
8476                 if (ret) {
8477                         dc_plane_state_release(dc_new_plane_state);
8478                         return ret;
8479                 }
8480
8481                 ret = dm_atomic_get_state(state, &dm_state);
8482                 if (ret) {
8483                         dc_plane_state_release(dc_new_plane_state);
8484                         return ret;
8485                 }
8486
8487                 /*
8488                  * Any atomic check errors that occur after this will
8489                  * not need a release. The plane state will be attached
8490                  * to the stream, and therefore part of the atomic
8491                  * state. It'll be released when the atomic state is
8492                  * cleaned.
8493                  */
8494                 if (!dc_add_plane_to_context(
8495                                 dc,
8496                                 dm_new_crtc_state->stream,
8497                                 dc_new_plane_state,
8498                                 dm_state->context)) {
8499
8500                         dc_plane_state_release(dc_new_plane_state);
8501                         return -EINVAL;
8502                 }
8503
8504                 dm_new_plane_state->dc_state = dc_new_plane_state;
8505
8506                 /* Tell DC to do a full surface update every time there
8507                  * is a plane change. Inefficient, but works for now.
8508                  */
8509                 dm_new_plane_state->dc_state->update_flags.bits.full_update = 1;
8510
8511                 *lock_and_validation_needed = true;
8512         }
8513
8514
8515         return ret;
8516 }
8517
8518 #if defined(CONFIG_DRM_AMD_DC_DCN)
8519 static int add_affected_mst_dsc_crtcs(struct drm_atomic_state *state, struct drm_crtc *crtc)
8520 {
8521         struct drm_connector *connector;
8522         struct drm_connector_state *conn_state;
8523         struct amdgpu_dm_connector *aconnector = NULL;
8524         int i;
8525         for_each_new_connector_in_state(state, connector, conn_state, i) {
8526                 if (conn_state->crtc != crtc)
8527                         continue;
8528
8529                 aconnector = to_amdgpu_dm_connector(connector);
8530                 if (!aconnector->port || !aconnector->mst_port)
8531                         aconnector = NULL;
8532                 else
8533                         break;
8534         }
8535
8536         if (!aconnector)
8537                 return 0;
8538
8539         return drm_dp_mst_add_affected_dsc_crtcs(state, &aconnector->mst_port->mst_mgr);
8540 }
8541 #endif
8542
8543 /**
8544  * amdgpu_dm_atomic_check() - Atomic check implementation for AMDgpu DM.
8545  * @dev: The DRM device
8546  * @state: The atomic state to commit
8547  *
8548  * Validate that the given atomic state is programmable by DC into hardware.
8549  * This involves constructing a &struct dc_state reflecting the new hardware
8550  * state we wish to commit, then querying DC to see if it is programmable. It's
8551  * important not to modify the existing DC state. Otherwise, atomic_check
8552  * may unexpectedly commit hardware changes.
8553  *
8554  * When validating the DC state, it's important that the right locks are
8555  * acquired. For full updates case which removes/adds/updates streams on one
8556  * CRTC while flipping on another CRTC, acquiring global lock will guarantee
8557  * that any such full update commit will wait for completion of any outstanding
8558  * flip using DRMs synchronization events.
8559  *
8560  * Note that DM adds the affected connectors for all CRTCs in state, when that
8561  * might not seem necessary. This is because DC stream creation requires the
8562  * DC sink, which is tied to the DRM connector state. Cleaning this up should
8563  * be possible but non-trivial - a possible TODO item.
8564  *
8565  * Return: -Error code if validation failed.
8566  */
8567 static int amdgpu_dm_atomic_check(struct drm_device *dev,
8568                                   struct drm_atomic_state *state)
8569 {
8570         struct amdgpu_device *adev = drm_to_adev(dev);
8571         struct dm_atomic_state *dm_state = NULL;
8572         struct dc *dc = adev->dm.dc;
8573         struct drm_connector *connector;
8574         struct drm_connector_state *old_con_state, *new_con_state;
8575         struct drm_crtc *crtc;
8576         struct drm_crtc_state *old_crtc_state, *new_crtc_state;
8577         struct drm_plane *plane;
8578         struct drm_plane_state *old_plane_state, *new_plane_state;
8579         enum dc_status status;
8580         int ret, i;
8581         bool lock_and_validation_needed = false;
8582
8583         amdgpu_check_debugfs_connector_property_change(adev, state);
8584
8585         ret = drm_atomic_helper_check_modeset(dev, state);
8586         if (ret)
8587                 goto fail;
8588
8589         /* Check connector changes */
8590         for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
8591                 struct dm_connector_state *dm_old_con_state = to_dm_connector_state(old_con_state);
8592                 struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state);
8593
8594                 /* Skip connectors that are disabled or part of modeset already. */
8595                 if (!old_con_state->crtc && !new_con_state->crtc)
8596                         continue;
8597
8598                 if (!new_con_state->crtc)
8599                         continue;
8600
8601                 new_crtc_state = drm_atomic_get_crtc_state(state, new_con_state->crtc);
8602                 if (IS_ERR(new_crtc_state)) {
8603                         ret = PTR_ERR(new_crtc_state);
8604                         goto fail;
8605                 }
8606
8607                 if (dm_old_con_state->abm_level !=
8608                     dm_new_con_state->abm_level)
8609                         new_crtc_state->connectors_changed = true;
8610         }
8611
8612 #if defined(CONFIG_DRM_AMD_DC_DCN)
8613         if (adev->asic_type >= CHIP_NAVI10) {
8614                 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
8615                         if (drm_atomic_crtc_needs_modeset(new_crtc_state)) {
8616                                 ret = add_affected_mst_dsc_crtcs(state, crtc);
8617                                 if (ret)
8618                                         goto fail;
8619                         }
8620                 }
8621         }
8622 #endif
8623         for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
8624                 if (!drm_atomic_crtc_needs_modeset(new_crtc_state) &&
8625                     !new_crtc_state->color_mgmt_changed &&
8626                     old_crtc_state->vrr_enabled == new_crtc_state->vrr_enabled)
8627                         continue;
8628
8629                 if (!new_crtc_state->enable)
8630                         continue;
8631
8632                 ret = drm_atomic_add_affected_connectors(state, crtc);
8633                 if (ret)
8634                         return ret;
8635
8636                 ret = drm_atomic_add_affected_planes(state, crtc);
8637                 if (ret)
8638                         goto fail;
8639         }
8640
8641         /*
8642          * Add all primary and overlay planes on the CRTC to the state
8643          * whenever a plane is enabled to maintain correct z-ordering
8644          * and to enable fast surface updates.
8645          */
8646         drm_for_each_crtc(crtc, dev) {
8647                 bool modified = false;
8648
8649                 for_each_oldnew_plane_in_state(state, plane, old_plane_state, new_plane_state, i) {
8650                         if (plane->type == DRM_PLANE_TYPE_CURSOR)
8651                                 continue;
8652
8653                         if (new_plane_state->crtc == crtc ||
8654                             old_plane_state->crtc == crtc) {
8655                                 modified = true;
8656                                 break;
8657                         }
8658                 }
8659
8660                 if (!modified)
8661                         continue;
8662
8663                 drm_for_each_plane_mask(plane, state->dev, crtc->state->plane_mask) {
8664                         if (plane->type == DRM_PLANE_TYPE_CURSOR)
8665                                 continue;
8666
8667                         new_plane_state =
8668                                 drm_atomic_get_plane_state(state, plane);
8669
8670                         if (IS_ERR(new_plane_state)) {
8671                                 ret = PTR_ERR(new_plane_state);
8672                                 goto fail;
8673                         }
8674                 }
8675         }
8676
8677         /* Prepass for updating tiling flags on new planes. */
8678         for_each_new_plane_in_state(state, plane, new_plane_state, i) {
8679                 struct dm_plane_state *new_dm_plane_state = to_dm_plane_state(new_plane_state);
8680                 struct amdgpu_framebuffer *new_afb = to_amdgpu_framebuffer(new_plane_state->fb);
8681
8682                 ret = get_fb_info(new_afb, &new_dm_plane_state->tiling_flags,
8683                                   &new_dm_plane_state->tmz_surface);
8684                 if (ret)
8685                         goto fail;
8686         }
8687
8688         /* Remove exiting planes if they are modified */
8689         for_each_oldnew_plane_in_state_reverse(state, plane, old_plane_state, new_plane_state, i) {
8690                 ret = dm_update_plane_state(dc, state, plane,
8691                                             old_plane_state,
8692                                             new_plane_state,
8693                                             false,
8694                                             &lock_and_validation_needed);
8695                 if (ret)
8696                         goto fail;
8697         }
8698
8699         /* Disable all crtcs which require disable */
8700         for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
8701                 ret = dm_update_crtc_state(&adev->dm, state, crtc,
8702                                            old_crtc_state,
8703                                            new_crtc_state,
8704                                            false,
8705                                            &lock_and_validation_needed);
8706                 if (ret)
8707                         goto fail;
8708         }
8709
8710         /* Enable all crtcs which require enable */
8711         for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
8712                 ret = dm_update_crtc_state(&adev->dm, state, crtc,
8713                                            old_crtc_state,
8714                                            new_crtc_state,
8715                                            true,
8716                                            &lock_and_validation_needed);
8717                 if (ret)
8718                         goto fail;
8719         }
8720
8721         /* Add new/modified planes */
8722         for_each_oldnew_plane_in_state_reverse(state, plane, old_plane_state, new_plane_state, i) {
8723                 ret = dm_update_plane_state(dc, state, plane,
8724                                             old_plane_state,
8725                                             new_plane_state,
8726                                             true,
8727                                             &lock_and_validation_needed);
8728                 if (ret)
8729                         goto fail;
8730         }
8731
8732         /* Run this here since we want to validate the streams we created */
8733         ret = drm_atomic_helper_check_planes(dev, state);
8734         if (ret)
8735                 goto fail;
8736
8737         if (state->legacy_cursor_update) {
8738                 /*
8739                  * This is a fast cursor update coming from the plane update
8740                  * helper, check if it can be done asynchronously for better
8741                  * performance.
8742                  */
8743                 state->async_update =
8744                         !drm_atomic_helper_async_check(dev, state);
8745
8746                 /*
8747                  * Skip the remaining global validation if this is an async
8748                  * update. Cursor updates can be done without affecting
8749                  * state or bandwidth calcs and this avoids the performance
8750                  * penalty of locking the private state object and
8751                  * allocating a new dc_state.
8752                  */
8753                 if (state->async_update)
8754                         return 0;
8755         }
8756
8757         /* Check scaling and underscan changes*/
8758         /* TODO Removed scaling changes validation due to inability to commit
8759          * new stream into context w\o causing full reset. Need to
8760          * decide how to handle.
8761          */
8762         for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
8763                 struct dm_connector_state *dm_old_con_state = to_dm_connector_state(old_con_state);
8764                 struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state);
8765                 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(dm_new_con_state->base.crtc);
8766
8767                 /* Skip any modesets/resets */
8768                 if (!acrtc || drm_atomic_crtc_needs_modeset(
8769                                 drm_atomic_get_new_crtc_state(state, &acrtc->base)))
8770                         continue;
8771
8772                 /* Skip any thing not scale or underscan changes */
8773                 if (!is_scaling_state_different(dm_new_con_state, dm_old_con_state))
8774                         continue;
8775
8776                 lock_and_validation_needed = true;
8777         }
8778
8779         /**
8780          * Streams and planes are reset when there are changes that affect
8781          * bandwidth. Anything that affects bandwidth needs to go through
8782          * DC global validation to ensure that the configuration can be applied
8783          * to hardware.
8784          *
8785          * We have to currently stall out here in atomic_check for outstanding
8786          * commits to finish in this case because our IRQ handlers reference
8787          * DRM state directly - we can end up disabling interrupts too early
8788          * if we don't.
8789          *
8790          * TODO: Remove this stall and drop DM state private objects.
8791          */
8792         if (lock_and_validation_needed) {
8793                 ret = dm_atomic_get_state(state, &dm_state);
8794                 if (ret)
8795                         goto fail;
8796
8797                 ret = do_aquire_global_lock(dev, state);
8798                 if (ret)
8799                         goto fail;
8800
8801 #if defined(CONFIG_DRM_AMD_DC_DCN)
8802                 if (!compute_mst_dsc_configs_for_state(state, dm_state->context))
8803                         goto fail;
8804
8805                 ret = dm_update_mst_vcpi_slots_for_dsc(state, dm_state->context);
8806                 if (ret)
8807                         goto fail;
8808 #endif
8809
8810                 /*
8811                  * Perform validation of MST topology in the state:
8812                  * We need to perform MST atomic check before calling
8813                  * dc_validate_global_state(), or there is a chance
8814                  * to get stuck in an infinite loop and hang eventually.
8815                  */
8816                 ret = drm_dp_mst_atomic_check(state);
8817                 if (ret)
8818                         goto fail;
8819                 status = dc_validate_global_state(dc, dm_state->context, false);
8820                 if (status != DC_OK) {
8821                         DC_LOG_WARNING("DC global validation failure: %s (%d)",
8822                                        dc_status_to_str(status), status);
8823                         ret = -EINVAL;
8824                         goto fail;
8825                 }
8826         } else {
8827                 /*
8828                  * The commit is a fast update. Fast updates shouldn't change
8829                  * the DC context, affect global validation, and can have their
8830                  * commit work done in parallel with other commits not touching
8831                  * the same resource. If we have a new DC context as part of
8832                  * the DM atomic state from validation we need to free it and
8833                  * retain the existing one instead.
8834                  *
8835                  * Furthermore, since the DM atomic state only contains the DC
8836                  * context and can safely be annulled, we can free the state
8837                  * and clear the associated private object now to free
8838                  * some memory and avoid a possible use-after-free later.
8839                  */
8840
8841                 for (i = 0; i < state->num_private_objs; i++) {
8842                         struct drm_private_obj *obj = state->private_objs[i].ptr;
8843
8844                         if (obj->funcs == adev->dm.atomic_obj.funcs) {
8845                                 int j = state->num_private_objs-1;
8846
8847                                 dm_atomic_destroy_state(obj,
8848                                                 state->private_objs[i].state);
8849
8850                                 /* If i is not at the end of the array then the
8851                                  * last element needs to be moved to where i was
8852                                  * before the array can safely be truncated.
8853                                  */
8854                                 if (i != j)
8855                                         state->private_objs[i] =
8856                                                 state->private_objs[j];
8857
8858                                 state->private_objs[j].ptr = NULL;
8859                                 state->private_objs[j].state = NULL;
8860                                 state->private_objs[j].old_state = NULL;
8861                                 state->private_objs[j].new_state = NULL;
8862
8863                                 state->num_private_objs = j;
8864                                 break;
8865                         }
8866                 }
8867         }
8868
8869         /* Store the overall update type for use later in atomic check. */
8870         for_each_new_crtc_in_state (state, crtc, new_crtc_state, i) {
8871                 struct dm_crtc_state *dm_new_crtc_state =
8872                         to_dm_crtc_state(new_crtc_state);
8873
8874                 dm_new_crtc_state->update_type = lock_and_validation_needed ?
8875                                                          UPDATE_TYPE_FULL :
8876                                                          UPDATE_TYPE_FAST;
8877         }
8878
8879         /* Must be success */
8880         WARN_ON(ret);
8881         return ret;
8882
8883 fail:
8884         if (ret == -EDEADLK)
8885                 DRM_DEBUG_DRIVER("Atomic check stopped to avoid deadlock.\n");
8886         else if (ret == -EINTR || ret == -EAGAIN || ret == -ERESTARTSYS)
8887                 DRM_DEBUG_DRIVER("Atomic check stopped due to signal.\n");
8888         else
8889                 DRM_DEBUG_DRIVER("Atomic check failed with err: %d \n", ret);
8890
8891         return ret;
8892 }
8893
8894 static bool is_dp_capable_without_timing_msa(struct dc *dc,
8895                                              struct amdgpu_dm_connector *amdgpu_dm_connector)
8896 {
8897         uint8_t dpcd_data;
8898         bool capable = false;
8899
8900         if (amdgpu_dm_connector->dc_link &&
8901                 dm_helpers_dp_read_dpcd(
8902                                 NULL,
8903                                 amdgpu_dm_connector->dc_link,
8904                                 DP_DOWN_STREAM_PORT_COUNT,
8905                                 &dpcd_data,
8906                                 sizeof(dpcd_data))) {
8907                 capable = (dpcd_data & DP_MSA_TIMING_PAR_IGNORED) ? true:false;
8908         }
8909
8910         return capable;
8911 }
8912 void amdgpu_dm_update_freesync_caps(struct drm_connector *connector,
8913                                         struct edid *edid)
8914 {
8915         int i;
8916         bool edid_check_required;
8917         struct detailed_timing *timing;
8918         struct detailed_non_pixel *data;
8919         struct detailed_data_monitor_range *range;
8920         struct amdgpu_dm_connector *amdgpu_dm_connector =
8921                         to_amdgpu_dm_connector(connector);
8922         struct dm_connector_state *dm_con_state = NULL;
8923
8924         struct drm_device *dev = connector->dev;
8925         struct amdgpu_device *adev = drm_to_adev(dev);
8926         bool freesync_capable = false;
8927
8928         if (!connector->state) {
8929                 DRM_ERROR("%s - Connector has no state", __func__);
8930                 goto update;
8931         }
8932
8933         if (!edid) {
8934                 dm_con_state = to_dm_connector_state(connector->state);
8935
8936                 amdgpu_dm_connector->min_vfreq = 0;
8937                 amdgpu_dm_connector->max_vfreq = 0;
8938                 amdgpu_dm_connector->pixel_clock_mhz = 0;
8939
8940                 goto update;
8941         }
8942
8943         dm_con_state = to_dm_connector_state(connector->state);
8944
8945         edid_check_required = false;
8946         if (!amdgpu_dm_connector->dc_sink) {
8947                 DRM_ERROR("dc_sink NULL, could not add free_sync module.\n");
8948                 goto update;
8949         }
8950         if (!adev->dm.freesync_module)
8951                 goto update;
8952         /*
8953          * if edid non zero restrict freesync only for dp and edp
8954          */
8955         if (edid) {
8956                 if (amdgpu_dm_connector->dc_sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT
8957                         || amdgpu_dm_connector->dc_sink->sink_signal == SIGNAL_TYPE_EDP) {
8958                         edid_check_required = is_dp_capable_without_timing_msa(
8959                                                 adev->dm.dc,
8960                                                 amdgpu_dm_connector);
8961                 }
8962         }
8963         if (edid_check_required == true && (edid->version > 1 ||
8964            (edid->version == 1 && edid->revision > 1))) {
8965                 for (i = 0; i < 4; i++) {
8966
8967                         timing  = &edid->detailed_timings[i];
8968                         data    = &timing->data.other_data;
8969                         range   = &data->data.range;
8970                         /*
8971                          * Check if monitor has continuous frequency mode
8972                          */
8973                         if (data->type != EDID_DETAIL_MONITOR_RANGE)
8974                                 continue;
8975                         /*
8976                          * Check for flag range limits only. If flag == 1 then
8977                          * no additional timing information provided.
8978                          * Default GTF, GTF Secondary curve and CVT are not
8979                          * supported
8980                          */
8981                         if (range->flags != 1)
8982                                 continue;
8983
8984                         amdgpu_dm_connector->min_vfreq = range->min_vfreq;
8985                         amdgpu_dm_connector->max_vfreq = range->max_vfreq;
8986                         amdgpu_dm_connector->pixel_clock_mhz =
8987                                 range->pixel_clock_mhz * 10;
8988                         break;
8989                 }
8990
8991                 if (amdgpu_dm_connector->max_vfreq -
8992                     amdgpu_dm_connector->min_vfreq > 10) {
8993
8994                         freesync_capable = true;
8995                 }
8996         }
8997
8998 update:
8999         if (dm_con_state)
9000                 dm_con_state->freesync_capable = freesync_capable;
9001
9002         if (connector->vrr_capable_property)
9003                 drm_connector_set_vrr_capable_property(connector,
9004                                                        freesync_capable);
9005 }
9006
9007 static void amdgpu_dm_set_psr_caps(struct dc_link *link)
9008 {
9009         uint8_t dpcd_data[EDP_PSR_RECEIVER_CAP_SIZE];
9010
9011         if (!(link->connector_signal & SIGNAL_TYPE_EDP))
9012                 return;
9013         if (link->type == dc_connection_none)
9014                 return;
9015         if (dm_helpers_dp_read_dpcd(NULL, link, DP_PSR_SUPPORT,
9016                                         dpcd_data, sizeof(dpcd_data))) {
9017                 link->dpcd_caps.psr_caps.psr_version = dpcd_data[0];
9018
9019                 if (dpcd_data[0] == 0) {
9020                         link->psr_settings.psr_version = DC_PSR_VERSION_UNSUPPORTED;
9021                         link->psr_settings.psr_feature_enabled = false;
9022                 } else {
9023                         link->psr_settings.psr_version = DC_PSR_VERSION_1;
9024                         link->psr_settings.psr_feature_enabled = true;
9025                 }
9026
9027                 DRM_INFO("PSR support:%d\n", link->psr_settings.psr_feature_enabled);
9028         }
9029 }
9030
9031 /*
9032  * amdgpu_dm_link_setup_psr() - configure psr link
9033  * @stream: stream state
9034  *
9035  * Return: true if success
9036  */
9037 static bool amdgpu_dm_link_setup_psr(struct dc_stream_state *stream)
9038 {
9039         struct dc_link *link = NULL;
9040         struct psr_config psr_config = {0};
9041         struct psr_context psr_context = {0};
9042         bool ret = false;
9043
9044         if (stream == NULL)
9045                 return false;
9046
9047         link = stream->link;
9048
9049         psr_config.psr_version = link->dpcd_caps.psr_caps.psr_version;
9050
9051         if (psr_config.psr_version > 0) {
9052                 psr_config.psr_exit_link_training_required = 0x1;
9053                 psr_config.psr_frame_capture_indication_req = 0;
9054                 psr_config.psr_rfb_setup_time = 0x37;
9055                 psr_config.psr_sdp_transmit_line_num_deadline = 0x20;
9056                 psr_config.allow_smu_optimizations = 0x0;
9057
9058                 ret = dc_link_setup_psr(link, stream, &psr_config, &psr_context);
9059
9060         }
9061         DRM_DEBUG_DRIVER("PSR link: %d\n",      link->psr_settings.psr_feature_enabled);
9062
9063         return ret;
9064 }
9065
9066 /*
9067  * amdgpu_dm_psr_enable() - enable psr f/w
9068  * @stream: stream state
9069  *
9070  * Return: true if success
9071  */
9072 bool amdgpu_dm_psr_enable(struct dc_stream_state *stream)
9073 {
9074         struct dc_link *link = stream->link;
9075         unsigned int vsync_rate_hz = 0;
9076         struct dc_static_screen_params params = {0};
9077         /* Calculate number of static frames before generating interrupt to
9078          * enter PSR.
9079          */
9080         // Init fail safe of 2 frames static
9081         unsigned int num_frames_static = 2;
9082
9083         DRM_DEBUG_DRIVER("Enabling psr...\n");
9084
9085         vsync_rate_hz = div64_u64(div64_u64((
9086                         stream->timing.pix_clk_100hz * 100),
9087                         stream->timing.v_total),
9088                         stream->timing.h_total);
9089
9090         /* Round up
9091          * Calculate number of frames such that at least 30 ms of time has
9092          * passed.
9093          */
9094         if (vsync_rate_hz != 0) {
9095                 unsigned int frame_time_microsec = 1000000 / vsync_rate_hz;
9096                 num_frames_static = (30000 / frame_time_microsec) + 1;
9097         }
9098
9099         params.triggers.cursor_update = true;
9100         params.triggers.overlay_update = true;
9101         params.triggers.surface_update = true;
9102         params.num_frames = num_frames_static;
9103
9104         dc_stream_set_static_screen_params(link->ctx->dc,
9105                                            &stream, 1,
9106                                            &params);
9107
9108         return dc_link_set_psr_allow_active(link, true, false);
9109 }
9110
9111 /*
9112  * amdgpu_dm_psr_disable() - disable psr f/w
9113  * @stream:  stream state
9114  *
9115  * Return: true if success
9116  */
9117 static bool amdgpu_dm_psr_disable(struct dc_stream_state *stream)
9118 {
9119
9120         DRM_DEBUG_DRIVER("Disabling psr...\n");
9121
9122         return dc_link_set_psr_allow_active(stream->link, false, true);
9123 }
9124
9125 /*
9126  * amdgpu_dm_psr_disable() - disable psr f/w
9127  * if psr is enabled on any stream
9128  *
9129  * Return: true if success
9130  */
9131 static bool amdgpu_dm_psr_disable_all(struct amdgpu_display_manager *dm)
9132 {
9133         DRM_DEBUG_DRIVER("Disabling psr if psr is enabled on any stream\n");
9134         return dc_set_psr_allow_active(dm->dc, false);
9135 }
9136
9137 void amdgpu_dm_trigger_timing_sync(struct drm_device *dev)
9138 {
9139         struct amdgpu_device *adev = drm_to_adev(dev);
9140         struct dc *dc = adev->dm.dc;
9141         int i;
9142
9143         mutex_lock(&adev->dm.dc_lock);
9144         if (dc->current_state) {
9145                 for (i = 0; i < dc->current_state->stream_count; ++i)
9146                         dc->current_state->streams[i]
9147                                 ->triggered_crtc_reset.enabled =
9148                                 adev->dm.force_timing_sync;
9149
9150                 dm_enable_per_frame_crtc_master_sync(dc->current_state);
9151                 dc_trigger_sync(dc, dc->current_state);
9152         }
9153         mutex_unlock(&adev->dm.dc_lock);
9154 }
This page took 0.600409 seconds and 4 git commands to generate.