]> Git Repo - linux.git/blob - drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
Merge tag 'vfio-v5.8-rc3' of git://github.com/awilliam/linux-vfio
[linux.git] / drivers / gpu / drm / amd / display / amdgpu_dm / amdgpu_dm.c
1 /*
2  * Copyright 2015 Advanced Micro Devices, Inc.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20  * OTHER DEALINGS IN THE SOFTWARE.
21  *
22  * Authors: AMD
23  *
24  */
25
26 /* The caprices of the preprocessor require that this be declared right here */
27 #define CREATE_TRACE_POINTS
28
29 #include "dm_services_types.h"
30 #include "dc.h"
31 #include "dc/inc/core_types.h"
32 #include "dal_asic_id.h"
33 #include "dmub/dmub_srv.h"
34 #include "dc/inc/hw/dmcu.h"
35 #include "dc/inc/hw/abm.h"
36 #include "dc/dc_dmub_srv.h"
37
38 #include "vid.h"
39 #include "amdgpu.h"
40 #include "amdgpu_display.h"
41 #include "amdgpu_ucode.h"
42 #include "atom.h"
43 #include "amdgpu_dm.h"
44 #ifdef CONFIG_DRM_AMD_DC_HDCP
45 #include "amdgpu_dm_hdcp.h"
46 #include <drm/drm_hdcp.h>
47 #endif
48 #include "amdgpu_pm.h"
49
50 #include "amd_shared.h"
51 #include "amdgpu_dm_irq.h"
52 #include "dm_helpers.h"
53 #include "amdgpu_dm_mst_types.h"
54 #if defined(CONFIG_DEBUG_FS)
55 #include "amdgpu_dm_debugfs.h"
56 #endif
57
58 #include "ivsrcid/ivsrcid_vislands30.h"
59
60 #include <linux/module.h>
61 #include <linux/moduleparam.h>
62 #include <linux/version.h>
63 #include <linux/types.h>
64 #include <linux/pm_runtime.h>
65 #include <linux/pci.h>
66 #include <linux/firmware.h>
67 #include <linux/component.h>
68
69 #include <drm/drm_atomic.h>
70 #include <drm/drm_atomic_uapi.h>
71 #include <drm/drm_atomic_helper.h>
72 #include <drm/drm_dp_mst_helper.h>
73 #include <drm/drm_fb_helper.h>
74 #include <drm/drm_fourcc.h>
75 #include <drm/drm_edid.h>
76 #include <drm/drm_vblank.h>
77 #include <drm/drm_audio_component.h>
78 #include <drm/drm_hdcp.h>
79
80 #if defined(CONFIG_DRM_AMD_DC_DCN)
81 #include "ivsrcid/dcn/irqsrcs_dcn_1_0.h"
82
83 #include "dcn/dcn_1_0_offset.h"
84 #include "dcn/dcn_1_0_sh_mask.h"
85 #include "soc15_hw_ip.h"
86 #include "vega10_ip_offset.h"
87
88 #include "soc15_common.h"
89 #endif
90
91 #include "modules/inc/mod_freesync.h"
92 #include "modules/power/power_helpers.h"
93 #include "modules/inc/mod_info_packet.h"
94
95 #define FIRMWARE_RENOIR_DMUB "amdgpu/renoir_dmcub.bin"
96 MODULE_FIRMWARE(FIRMWARE_RENOIR_DMUB);
97
98 #define FIRMWARE_RAVEN_DMCU             "amdgpu/raven_dmcu.bin"
99 MODULE_FIRMWARE(FIRMWARE_RAVEN_DMCU);
100
101 #define FIRMWARE_NAVI12_DMCU            "amdgpu/navi12_dmcu.bin"
102 MODULE_FIRMWARE(FIRMWARE_NAVI12_DMCU);
103
104 /* Number of bytes in PSP header for firmware. */
105 #define PSP_HEADER_BYTES 0x100
106
107 /* Number of bytes in PSP footer for firmware. */
108 #define PSP_FOOTER_BYTES 0x100
109
110 /**
111  * DOC: overview
112  *
113  * The AMDgpu display manager, **amdgpu_dm** (or even simpler,
114  * **dm**) sits between DRM and DC. It acts as a liason, converting DRM
115  * requests into DC requests, and DC responses into DRM responses.
116  *
117  * The root control structure is &struct amdgpu_display_manager.
118  */
119
120 /* basic init/fini API */
121 static int amdgpu_dm_init(struct amdgpu_device *adev);
122 static void amdgpu_dm_fini(struct amdgpu_device *adev);
123
124 /*
125  * initializes drm_device display related structures, based on the information
126  * provided by DAL. The drm strcutures are: drm_crtc, drm_connector,
127  * drm_encoder, drm_mode_config
128  *
129  * Returns 0 on success
130  */
131 static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev);
132 /* removes and deallocates the drm structures, created by the above function */
133 static void amdgpu_dm_destroy_drm_device(struct amdgpu_display_manager *dm);
134
135 static int amdgpu_dm_plane_init(struct amdgpu_display_manager *dm,
136                                 struct drm_plane *plane,
137                                 unsigned long possible_crtcs,
138                                 const struct dc_plane_cap *plane_cap);
139 static int amdgpu_dm_crtc_init(struct amdgpu_display_manager *dm,
140                                struct drm_plane *plane,
141                                uint32_t link_index);
142 static int amdgpu_dm_connector_init(struct amdgpu_display_manager *dm,
143                                     struct amdgpu_dm_connector *amdgpu_dm_connector,
144                                     uint32_t link_index,
145                                     struct amdgpu_encoder *amdgpu_encoder);
146 static int amdgpu_dm_encoder_init(struct drm_device *dev,
147                                   struct amdgpu_encoder *aencoder,
148                                   uint32_t link_index);
149
150 static int amdgpu_dm_connector_get_modes(struct drm_connector *connector);
151
152 static int amdgpu_dm_atomic_commit(struct drm_device *dev,
153                                    struct drm_atomic_state *state,
154                                    bool nonblock);
155
156 static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state);
157
158 static int amdgpu_dm_atomic_check(struct drm_device *dev,
159                                   struct drm_atomic_state *state);
160
161 static void handle_cursor_update(struct drm_plane *plane,
162                                  struct drm_plane_state *old_plane_state);
163
164 static void amdgpu_dm_set_psr_caps(struct dc_link *link);
165 static bool amdgpu_dm_psr_enable(struct dc_stream_state *stream);
166 static bool amdgpu_dm_link_setup_psr(struct dc_stream_state *stream);
167 static bool amdgpu_dm_psr_disable(struct dc_stream_state *stream);
168
169
170 /*
171  * dm_vblank_get_counter
172  *
173  * @brief
174  * Get counter for number of vertical blanks
175  *
176  * @param
177  * struct amdgpu_device *adev - [in] desired amdgpu device
178  * int disp_idx - [in] which CRTC to get the counter from
179  *
180  * @return
181  * Counter for vertical blanks
182  */
183 static u32 dm_vblank_get_counter(struct amdgpu_device *adev, int crtc)
184 {
185         if (crtc >= adev->mode_info.num_crtc)
186                 return 0;
187         else {
188                 struct amdgpu_crtc *acrtc = adev->mode_info.crtcs[crtc];
189                 struct dm_crtc_state *acrtc_state = to_dm_crtc_state(
190                                 acrtc->base.state);
191
192
193                 if (acrtc_state->stream == NULL) {
194                         DRM_ERROR("dc_stream_state is NULL for crtc '%d'!\n",
195                                   crtc);
196                         return 0;
197                 }
198
199                 return dc_stream_get_vblank_counter(acrtc_state->stream);
200         }
201 }
202
203 static int dm_crtc_get_scanoutpos(struct amdgpu_device *adev, int crtc,
204                                   u32 *vbl, u32 *position)
205 {
206         uint32_t v_blank_start, v_blank_end, h_position, v_position;
207
208         if ((crtc < 0) || (crtc >= adev->mode_info.num_crtc))
209                 return -EINVAL;
210         else {
211                 struct amdgpu_crtc *acrtc = adev->mode_info.crtcs[crtc];
212                 struct dm_crtc_state *acrtc_state = to_dm_crtc_state(
213                                                 acrtc->base.state);
214
215                 if (acrtc_state->stream ==  NULL) {
216                         DRM_ERROR("dc_stream_state is NULL for crtc '%d'!\n",
217                                   crtc);
218                         return 0;
219                 }
220
221                 /*
222                  * TODO rework base driver to use values directly.
223                  * for now parse it back into reg-format
224                  */
225                 dc_stream_get_scanoutpos(acrtc_state->stream,
226                                          &v_blank_start,
227                                          &v_blank_end,
228                                          &h_position,
229                                          &v_position);
230
231                 *position = v_position | (h_position << 16);
232                 *vbl = v_blank_start | (v_blank_end << 16);
233         }
234
235         return 0;
236 }
237
238 static bool dm_is_idle(void *handle)
239 {
240         /* XXX todo */
241         return true;
242 }
243
244 static int dm_wait_for_idle(void *handle)
245 {
246         /* XXX todo */
247         return 0;
248 }
249
250 static bool dm_check_soft_reset(void *handle)
251 {
252         return false;
253 }
254
255 static int dm_soft_reset(void *handle)
256 {
257         /* XXX todo */
258         return 0;
259 }
260
261 static struct amdgpu_crtc *
262 get_crtc_by_otg_inst(struct amdgpu_device *adev,
263                      int otg_inst)
264 {
265         struct drm_device *dev = adev->ddev;
266         struct drm_crtc *crtc;
267         struct amdgpu_crtc *amdgpu_crtc;
268
269         if (otg_inst == -1) {
270                 WARN_ON(1);
271                 return adev->mode_info.crtcs[0];
272         }
273
274         list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
275                 amdgpu_crtc = to_amdgpu_crtc(crtc);
276
277                 if (amdgpu_crtc->otg_inst == otg_inst)
278                         return amdgpu_crtc;
279         }
280
281         return NULL;
282 }
283
284 static inline bool amdgpu_dm_vrr_active(struct dm_crtc_state *dm_state)
285 {
286         return dm_state->freesync_config.state == VRR_STATE_ACTIVE_VARIABLE ||
287                dm_state->freesync_config.state == VRR_STATE_ACTIVE_FIXED;
288 }
289
290 /**
291  * dm_pflip_high_irq() - Handle pageflip interrupt
292  * @interrupt_params: ignored
293  *
294  * Handles the pageflip interrupt by notifying all interested parties
295  * that the pageflip has been completed.
296  */
297 static void dm_pflip_high_irq(void *interrupt_params)
298 {
299         struct amdgpu_crtc *amdgpu_crtc;
300         struct common_irq_params *irq_params = interrupt_params;
301         struct amdgpu_device *adev = irq_params->adev;
302         unsigned long flags;
303         struct drm_pending_vblank_event *e;
304         struct dm_crtc_state *acrtc_state;
305         uint32_t vpos, hpos, v_blank_start, v_blank_end;
306         bool vrr_active;
307
308         amdgpu_crtc = get_crtc_by_otg_inst(adev, irq_params->irq_src - IRQ_TYPE_PFLIP);
309
310         /* IRQ could occur when in initial stage */
311         /* TODO work and BO cleanup */
312         if (amdgpu_crtc == NULL) {
313                 DRM_DEBUG_DRIVER("CRTC is null, returning.\n");
314                 return;
315         }
316
317         spin_lock_irqsave(&adev->ddev->event_lock, flags);
318
319         if (amdgpu_crtc->pflip_status != AMDGPU_FLIP_SUBMITTED){
320                 DRM_DEBUG_DRIVER("amdgpu_crtc->pflip_status = %d !=AMDGPU_FLIP_SUBMITTED(%d) on crtc:%d[%p] \n",
321                                                  amdgpu_crtc->pflip_status,
322                                                  AMDGPU_FLIP_SUBMITTED,
323                                                  amdgpu_crtc->crtc_id,
324                                                  amdgpu_crtc);
325                 spin_unlock_irqrestore(&adev->ddev->event_lock, flags);
326                 return;
327         }
328
329         /* page flip completed. */
330         e = amdgpu_crtc->event;
331         amdgpu_crtc->event = NULL;
332
333         if (!e)
334                 WARN_ON(1);
335
336         acrtc_state = to_dm_crtc_state(amdgpu_crtc->base.state);
337         vrr_active = amdgpu_dm_vrr_active(acrtc_state);
338
339         /* Fixed refresh rate, or VRR scanout position outside front-porch? */
340         if (!vrr_active ||
341             !dc_stream_get_scanoutpos(acrtc_state->stream, &v_blank_start,
342                                       &v_blank_end, &hpos, &vpos) ||
343             (vpos < v_blank_start)) {
344                 /* Update to correct count and vblank timestamp if racing with
345                  * vblank irq. This also updates to the correct vblank timestamp
346                  * even in VRR mode, as scanout is past the front-porch atm.
347                  */
348                 drm_crtc_accurate_vblank_count(&amdgpu_crtc->base);
349
350                 /* Wake up userspace by sending the pageflip event with proper
351                  * count and timestamp of vblank of flip completion.
352                  */
353                 if (e) {
354                         drm_crtc_send_vblank_event(&amdgpu_crtc->base, e);
355
356                         /* Event sent, so done with vblank for this flip */
357                         drm_crtc_vblank_put(&amdgpu_crtc->base);
358                 }
359         } else if (e) {
360                 /* VRR active and inside front-porch: vblank count and
361                  * timestamp for pageflip event will only be up to date after
362                  * drm_crtc_handle_vblank() has been executed from late vblank
363                  * irq handler after start of back-porch (vline 0). We queue the
364                  * pageflip event for send-out by drm_crtc_handle_vblank() with
365                  * updated timestamp and count, once it runs after us.
366                  *
367                  * We need to open-code this instead of using the helper
368                  * drm_crtc_arm_vblank_event(), as that helper would
369                  * call drm_crtc_accurate_vblank_count(), which we must
370                  * not call in VRR mode while we are in front-porch!
371                  */
372
373                 /* sequence will be replaced by real count during send-out. */
374                 e->sequence = drm_crtc_vblank_count(&amdgpu_crtc->base);
375                 e->pipe = amdgpu_crtc->crtc_id;
376
377                 list_add_tail(&e->base.link, &adev->ddev->vblank_event_list);
378                 e = NULL;
379         }
380
381         /* Keep track of vblank of this flip for flip throttling. We use the
382          * cooked hw counter, as that one incremented at start of this vblank
383          * of pageflip completion, so last_flip_vblank is the forbidden count
384          * for queueing new pageflips if vsync + VRR is enabled.
385          */
386         amdgpu_crtc->last_flip_vblank =
387                 amdgpu_get_vblank_counter_kms(&amdgpu_crtc->base);
388
389         amdgpu_crtc->pflip_status = AMDGPU_FLIP_NONE;
390         spin_unlock_irqrestore(&adev->ddev->event_lock, flags);
391
392         DRM_DEBUG_DRIVER("crtc:%d[%p], pflip_stat:AMDGPU_FLIP_NONE, vrr[%d]-fp %d\n",
393                          amdgpu_crtc->crtc_id, amdgpu_crtc,
394                          vrr_active, (int) !e);
395 }
396
397 static void dm_vupdate_high_irq(void *interrupt_params)
398 {
399         struct common_irq_params *irq_params = interrupt_params;
400         struct amdgpu_device *adev = irq_params->adev;
401         struct amdgpu_crtc *acrtc;
402         struct dm_crtc_state *acrtc_state;
403         unsigned long flags;
404
405         acrtc = get_crtc_by_otg_inst(adev, irq_params->irq_src - IRQ_TYPE_VUPDATE);
406
407         if (acrtc) {
408                 acrtc_state = to_dm_crtc_state(acrtc->base.state);
409
410                 DRM_DEBUG_VBL("crtc:%d, vupdate-vrr:%d\n",
411                               acrtc->crtc_id,
412                               amdgpu_dm_vrr_active(acrtc_state));
413
414                 /* Core vblank handling is done here after end of front-porch in
415                  * vrr mode, as vblank timestamping will give valid results
416                  * while now done after front-porch. This will also deliver
417                  * page-flip completion events that have been queued to us
418                  * if a pageflip happened inside front-porch.
419                  */
420                 if (amdgpu_dm_vrr_active(acrtc_state)) {
421                         drm_crtc_handle_vblank(&acrtc->base);
422
423                         /* BTR processing for pre-DCE12 ASICs */
424                         if (acrtc_state->stream &&
425                             adev->family < AMDGPU_FAMILY_AI) {
426                                 spin_lock_irqsave(&adev->ddev->event_lock, flags);
427                                 mod_freesync_handle_v_update(
428                                     adev->dm.freesync_module,
429                                     acrtc_state->stream,
430                                     &acrtc_state->vrr_params);
431
432                                 dc_stream_adjust_vmin_vmax(
433                                     adev->dm.dc,
434                                     acrtc_state->stream,
435                                     &acrtc_state->vrr_params.adjust);
436                                 spin_unlock_irqrestore(&adev->ddev->event_lock, flags);
437                         }
438                 }
439         }
440 }
441
442 /**
443  * dm_crtc_high_irq() - Handles CRTC interrupt
444  * @interrupt_params: used for determining the CRTC instance
445  *
446  * Handles the CRTC/VSYNC interrupt by notfying DRM's VBLANK
447  * event handler.
448  */
449 static void dm_crtc_high_irq(void *interrupt_params)
450 {
451         struct common_irq_params *irq_params = interrupt_params;
452         struct amdgpu_device *adev = irq_params->adev;
453         struct amdgpu_crtc *acrtc;
454         struct dm_crtc_state *acrtc_state;
455         unsigned long flags;
456
457         acrtc = get_crtc_by_otg_inst(adev, irq_params->irq_src - IRQ_TYPE_VBLANK);
458         if (!acrtc)
459                 return;
460
461         acrtc_state = to_dm_crtc_state(acrtc->base.state);
462
463         DRM_DEBUG_VBL("crtc:%d, vupdate-vrr:%d, planes:%d\n", acrtc->crtc_id,
464                          amdgpu_dm_vrr_active(acrtc_state),
465                          acrtc_state->active_planes);
466
467         /**
468          * Core vblank handling at start of front-porch is only possible
469          * in non-vrr mode, as only there vblank timestamping will give
470          * valid results while done in front-porch. Otherwise defer it
471          * to dm_vupdate_high_irq after end of front-porch.
472          */
473         if (!amdgpu_dm_vrr_active(acrtc_state))
474                 drm_crtc_handle_vblank(&acrtc->base);
475
476         /**
477          * Following stuff must happen at start of vblank, for crc
478          * computation and below-the-range btr support in vrr mode.
479          */
480         amdgpu_dm_crtc_handle_crc_irq(&acrtc->base);
481
482         /* BTR updates need to happen before VUPDATE on Vega and above. */
483         if (adev->family < AMDGPU_FAMILY_AI)
484                 return;
485
486         spin_lock_irqsave(&adev->ddev->event_lock, flags);
487
488         if (acrtc_state->stream && acrtc_state->vrr_params.supported &&
489             acrtc_state->freesync_config.state == VRR_STATE_ACTIVE_VARIABLE) {
490                 mod_freesync_handle_v_update(adev->dm.freesync_module,
491                                              acrtc_state->stream,
492                                              &acrtc_state->vrr_params);
493
494                 dc_stream_adjust_vmin_vmax(adev->dm.dc, acrtc_state->stream,
495                                            &acrtc_state->vrr_params.adjust);
496         }
497
498         /*
499          * If there aren't any active_planes then DCH HUBP may be clock-gated.
500          * In that case, pageflip completion interrupts won't fire and pageflip
501          * completion events won't get delivered. Prevent this by sending
502          * pending pageflip events from here if a flip is still pending.
503          *
504          * If any planes are enabled, use dm_pflip_high_irq() instead, to
505          * avoid race conditions between flip programming and completion,
506          * which could cause too early flip completion events.
507          */
508         if (adev->family >= AMDGPU_FAMILY_RV &&
509             acrtc->pflip_status == AMDGPU_FLIP_SUBMITTED &&
510             acrtc_state->active_planes == 0) {
511                 if (acrtc->event) {
512                         drm_crtc_send_vblank_event(&acrtc->base, acrtc->event);
513                         acrtc->event = NULL;
514                         drm_crtc_vblank_put(&acrtc->base);
515                 }
516                 acrtc->pflip_status = AMDGPU_FLIP_NONE;
517         }
518
519         spin_unlock_irqrestore(&adev->ddev->event_lock, flags);
520 }
521
522 static int dm_set_clockgating_state(void *handle,
523                   enum amd_clockgating_state state)
524 {
525         return 0;
526 }
527
528 static int dm_set_powergating_state(void *handle,
529                   enum amd_powergating_state state)
530 {
531         return 0;
532 }
533
534 /* Prototypes of private functions */
535 static int dm_early_init(void* handle);
536
537 /* Allocate memory for FBC compressed data  */
538 static void amdgpu_dm_fbc_init(struct drm_connector *connector)
539 {
540         struct drm_device *dev = connector->dev;
541         struct amdgpu_device *adev = dev->dev_private;
542         struct dm_comressor_info *compressor = &adev->dm.compressor;
543         struct amdgpu_dm_connector *aconn = to_amdgpu_dm_connector(connector);
544         struct drm_display_mode *mode;
545         unsigned long max_size = 0;
546
547         if (adev->dm.dc->fbc_compressor == NULL)
548                 return;
549
550         if (aconn->dc_link->connector_signal != SIGNAL_TYPE_EDP)
551                 return;
552
553         if (compressor->bo_ptr)
554                 return;
555
556
557         list_for_each_entry(mode, &connector->modes, head) {
558                 if (max_size < mode->htotal * mode->vtotal)
559                         max_size = mode->htotal * mode->vtotal;
560         }
561
562         if (max_size) {
563                 int r = amdgpu_bo_create_kernel(adev, max_size * 4, PAGE_SIZE,
564                             AMDGPU_GEM_DOMAIN_GTT, &compressor->bo_ptr,
565                             &compressor->gpu_addr, &compressor->cpu_addr);
566
567                 if (r)
568                         DRM_ERROR("DM: Failed to initialize FBC\n");
569                 else {
570                         adev->dm.dc->ctx->fbc_gpu_addr = compressor->gpu_addr;
571                         DRM_INFO("DM: FBC alloc %lu\n", max_size*4);
572                 }
573
574         }
575
576 }
577
578 static int amdgpu_dm_audio_component_get_eld(struct device *kdev, int port,
579                                           int pipe, bool *enabled,
580                                           unsigned char *buf, int max_bytes)
581 {
582         struct drm_device *dev = dev_get_drvdata(kdev);
583         struct amdgpu_device *adev = dev->dev_private;
584         struct drm_connector *connector;
585         struct drm_connector_list_iter conn_iter;
586         struct amdgpu_dm_connector *aconnector;
587         int ret = 0;
588
589         *enabled = false;
590
591         mutex_lock(&adev->dm.audio_lock);
592
593         drm_connector_list_iter_begin(dev, &conn_iter);
594         drm_for_each_connector_iter(connector, &conn_iter) {
595                 aconnector = to_amdgpu_dm_connector(connector);
596                 if (aconnector->audio_inst != port)
597                         continue;
598
599                 *enabled = true;
600                 ret = drm_eld_size(connector->eld);
601                 memcpy(buf, connector->eld, min(max_bytes, ret));
602
603                 break;
604         }
605         drm_connector_list_iter_end(&conn_iter);
606
607         mutex_unlock(&adev->dm.audio_lock);
608
609         DRM_DEBUG_KMS("Get ELD : idx=%d ret=%d en=%d\n", port, ret, *enabled);
610
611         return ret;
612 }
613
614 static const struct drm_audio_component_ops amdgpu_dm_audio_component_ops = {
615         .get_eld = amdgpu_dm_audio_component_get_eld,
616 };
617
618 static int amdgpu_dm_audio_component_bind(struct device *kdev,
619                                        struct device *hda_kdev, void *data)
620 {
621         struct drm_device *dev = dev_get_drvdata(kdev);
622         struct amdgpu_device *adev = dev->dev_private;
623         struct drm_audio_component *acomp = data;
624
625         acomp->ops = &amdgpu_dm_audio_component_ops;
626         acomp->dev = kdev;
627         adev->dm.audio_component = acomp;
628
629         return 0;
630 }
631
632 static void amdgpu_dm_audio_component_unbind(struct device *kdev,
633                                           struct device *hda_kdev, void *data)
634 {
635         struct drm_device *dev = dev_get_drvdata(kdev);
636         struct amdgpu_device *adev = dev->dev_private;
637         struct drm_audio_component *acomp = data;
638
639         acomp->ops = NULL;
640         acomp->dev = NULL;
641         adev->dm.audio_component = NULL;
642 }
643
644 static const struct component_ops amdgpu_dm_audio_component_bind_ops = {
645         .bind   = amdgpu_dm_audio_component_bind,
646         .unbind = amdgpu_dm_audio_component_unbind,
647 };
648
649 static int amdgpu_dm_audio_init(struct amdgpu_device *adev)
650 {
651         int i, ret;
652
653         if (!amdgpu_audio)
654                 return 0;
655
656         adev->mode_info.audio.enabled = true;
657
658         adev->mode_info.audio.num_pins = adev->dm.dc->res_pool->audio_count;
659
660         for (i = 0; i < adev->mode_info.audio.num_pins; i++) {
661                 adev->mode_info.audio.pin[i].channels = -1;
662                 adev->mode_info.audio.pin[i].rate = -1;
663                 adev->mode_info.audio.pin[i].bits_per_sample = -1;
664                 adev->mode_info.audio.pin[i].status_bits = 0;
665                 adev->mode_info.audio.pin[i].category_code = 0;
666                 adev->mode_info.audio.pin[i].connected = false;
667                 adev->mode_info.audio.pin[i].id =
668                         adev->dm.dc->res_pool->audios[i]->inst;
669                 adev->mode_info.audio.pin[i].offset = 0;
670         }
671
672         ret = component_add(adev->dev, &amdgpu_dm_audio_component_bind_ops);
673         if (ret < 0)
674                 return ret;
675
676         adev->dm.audio_registered = true;
677
678         return 0;
679 }
680
681 static void amdgpu_dm_audio_fini(struct amdgpu_device *adev)
682 {
683         if (!amdgpu_audio)
684                 return;
685
686         if (!adev->mode_info.audio.enabled)
687                 return;
688
689         if (adev->dm.audio_registered) {
690                 component_del(adev->dev, &amdgpu_dm_audio_component_bind_ops);
691                 adev->dm.audio_registered = false;
692         }
693
694         /* TODO: Disable audio? */
695
696         adev->mode_info.audio.enabled = false;
697 }
698
699 void amdgpu_dm_audio_eld_notify(struct amdgpu_device *adev, int pin)
700 {
701         struct drm_audio_component *acomp = adev->dm.audio_component;
702
703         if (acomp && acomp->audio_ops && acomp->audio_ops->pin_eld_notify) {
704                 DRM_DEBUG_KMS("Notify ELD: %d\n", pin);
705
706                 acomp->audio_ops->pin_eld_notify(acomp->audio_ops->audio_ptr,
707                                                  pin, -1);
708         }
709 }
710
711 static int dm_dmub_hw_init(struct amdgpu_device *adev)
712 {
713         const struct dmcub_firmware_header_v1_0 *hdr;
714         struct dmub_srv *dmub_srv = adev->dm.dmub_srv;
715         struct dmub_srv_fb_info *fb_info = adev->dm.dmub_fb_info;
716         const struct firmware *dmub_fw = adev->dm.dmub_fw;
717         struct dmcu *dmcu = adev->dm.dc->res_pool->dmcu;
718         struct abm *abm = adev->dm.dc->res_pool->abm;
719         struct dmub_srv_hw_params hw_params;
720         enum dmub_status status;
721         const unsigned char *fw_inst_const, *fw_bss_data;
722         uint32_t i, fw_inst_const_size, fw_bss_data_size;
723         bool has_hw_support;
724
725         if (!dmub_srv)
726                 /* DMUB isn't supported on the ASIC. */
727                 return 0;
728
729         if (!fb_info) {
730                 DRM_ERROR("No framebuffer info for DMUB service.\n");
731                 return -EINVAL;
732         }
733
734         if (!dmub_fw) {
735                 /* Firmware required for DMUB support. */
736                 DRM_ERROR("No firmware provided for DMUB.\n");
737                 return -EINVAL;
738         }
739
740         status = dmub_srv_has_hw_support(dmub_srv, &has_hw_support);
741         if (status != DMUB_STATUS_OK) {
742                 DRM_ERROR("Error checking HW support for DMUB: %d\n", status);
743                 return -EINVAL;
744         }
745
746         if (!has_hw_support) {
747                 DRM_INFO("DMUB unsupported on ASIC\n");
748                 return 0;
749         }
750
751         hdr = (const struct dmcub_firmware_header_v1_0 *)dmub_fw->data;
752
753         fw_inst_const = dmub_fw->data +
754                         le32_to_cpu(hdr->header.ucode_array_offset_bytes) +
755                         PSP_HEADER_BYTES;
756
757         fw_bss_data = dmub_fw->data +
758                       le32_to_cpu(hdr->header.ucode_array_offset_bytes) +
759                       le32_to_cpu(hdr->inst_const_bytes);
760
761         /* Copy firmware and bios info into FB memory. */
762         fw_inst_const_size = le32_to_cpu(hdr->inst_const_bytes) -
763                              PSP_HEADER_BYTES - PSP_FOOTER_BYTES;
764
765         fw_bss_data_size = le32_to_cpu(hdr->bss_data_bytes);
766
767         /* if adev->firmware.load_type == AMDGPU_FW_LOAD_PSP,
768          * amdgpu_ucode_init_single_fw will load dmub firmware
769          * fw_inst_const part to cw0; otherwise, the firmware back door load
770          * will be done by dm_dmub_hw_init
771          */
772         if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) {
773                 memcpy(fb_info->fb[DMUB_WINDOW_0_INST_CONST].cpu_addr, fw_inst_const,
774                                 fw_inst_const_size);
775         }
776
777         if (fw_bss_data_size)
778                 memcpy(fb_info->fb[DMUB_WINDOW_2_BSS_DATA].cpu_addr,
779                        fw_bss_data, fw_bss_data_size);
780
781         /* Copy firmware bios info into FB memory. */
782         memcpy(fb_info->fb[DMUB_WINDOW_3_VBIOS].cpu_addr, adev->bios,
783                adev->bios_size);
784
785         /* Reset regions that need to be reset. */
786         memset(fb_info->fb[DMUB_WINDOW_4_MAILBOX].cpu_addr, 0,
787         fb_info->fb[DMUB_WINDOW_4_MAILBOX].size);
788
789         memset(fb_info->fb[DMUB_WINDOW_5_TRACEBUFF].cpu_addr, 0,
790                fb_info->fb[DMUB_WINDOW_5_TRACEBUFF].size);
791
792         memset(fb_info->fb[DMUB_WINDOW_6_FW_STATE].cpu_addr, 0,
793                fb_info->fb[DMUB_WINDOW_6_FW_STATE].size);
794
795         /* Initialize hardware. */
796         memset(&hw_params, 0, sizeof(hw_params));
797         hw_params.fb_base = adev->gmc.fb_start;
798         hw_params.fb_offset = adev->gmc.aper_base;
799
800         /* backdoor load firmware and trigger dmub running */
801         if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP)
802                 hw_params.load_inst_const = true;
803
804         if (dmcu)
805                 hw_params.psp_version = dmcu->psp_version;
806
807         for (i = 0; i < fb_info->num_fb; ++i)
808                 hw_params.fb[i] = &fb_info->fb[i];
809
810         status = dmub_srv_hw_init(dmub_srv, &hw_params);
811         if (status != DMUB_STATUS_OK) {
812                 DRM_ERROR("Error initializing DMUB HW: %d\n", status);
813                 return -EINVAL;
814         }
815
816         /* Wait for firmware load to finish. */
817         status = dmub_srv_wait_for_auto_load(dmub_srv, 100000);
818         if (status != DMUB_STATUS_OK)
819                 DRM_WARN("Wait for DMUB auto-load failed: %d\n", status);
820
821         /* Init DMCU and ABM if available. */
822         if (dmcu && abm) {
823                 dmcu->funcs->dmcu_init(dmcu);
824                 abm->dmcu_is_running = dmcu->funcs->is_dmcu_initialized(dmcu);
825         }
826
827         adev->dm.dc->ctx->dmub_srv = dc_dmub_srv_create(adev->dm.dc, dmub_srv);
828         if (!adev->dm.dc->ctx->dmub_srv) {
829                 DRM_ERROR("Couldn't allocate DC DMUB server!\n");
830                 return -ENOMEM;
831         }
832
833         DRM_INFO("DMUB hardware initialized: version=0x%08X\n",
834                  adev->dm.dmcub_fw_version);
835
836         return 0;
837 }
838
839 static int amdgpu_dm_init(struct amdgpu_device *adev)
840 {
841         struct dc_init_data init_data;
842 #ifdef CONFIG_DRM_AMD_DC_HDCP
843         struct dc_callback_init init_params;
844 #endif
845         int r;
846
847         adev->dm.ddev = adev->ddev;
848         adev->dm.adev = adev;
849
850         /* Zero all the fields */
851         memset(&init_data, 0, sizeof(init_data));
852 #ifdef CONFIG_DRM_AMD_DC_HDCP
853         memset(&init_params, 0, sizeof(init_params));
854 #endif
855
856         mutex_init(&adev->dm.dc_lock);
857         mutex_init(&adev->dm.audio_lock);
858
859         if(amdgpu_dm_irq_init(adev)) {
860                 DRM_ERROR("amdgpu: failed to initialize DM IRQ support.\n");
861                 goto error;
862         }
863
864         init_data.asic_id.chip_family = adev->family;
865
866         init_data.asic_id.pci_revision_id = adev->pdev->revision;
867         init_data.asic_id.hw_internal_rev = adev->external_rev_id;
868
869         init_data.asic_id.vram_width = adev->gmc.vram_width;
870         /* TODO: initialize init_data.asic_id.vram_type here!!!! */
871         init_data.asic_id.atombios_base_address =
872                 adev->mode_info.atom_context->bios;
873
874         init_data.driver = adev;
875
876         adev->dm.cgs_device = amdgpu_cgs_create_device(adev);
877
878         if (!adev->dm.cgs_device) {
879                 DRM_ERROR("amdgpu: failed to create cgs device.\n");
880                 goto error;
881         }
882
883         init_data.cgs_device = adev->dm.cgs_device;
884
885         init_data.dce_environment = DCE_ENV_PRODUCTION_DRV;
886
887         switch (adev->asic_type) {
888         case CHIP_CARRIZO:
889         case CHIP_STONEY:
890         case CHIP_RAVEN:
891         case CHIP_RENOIR:
892                 init_data.flags.gpu_vm_support = true;
893                 break;
894         default:
895                 break;
896         }
897
898         if (amdgpu_dc_feature_mask & DC_FBC_MASK)
899                 init_data.flags.fbc_support = true;
900
901         if (amdgpu_dc_feature_mask & DC_MULTI_MON_PP_MCLK_SWITCH_MASK)
902                 init_data.flags.multi_mon_pp_mclk_switch = true;
903
904         if (amdgpu_dc_feature_mask & DC_DISABLE_FRACTIONAL_PWM_MASK)
905                 init_data.flags.disable_fractional_pwm = true;
906
907         init_data.flags.power_down_display_on_boot = true;
908
909         init_data.soc_bounding_box = adev->dm.soc_bounding_box;
910
911         /* Display Core create. */
912         adev->dm.dc = dc_create(&init_data);
913
914         if (adev->dm.dc) {
915                 DRM_INFO("Display Core initialized with v%s!\n", DC_VER);
916         } else {
917                 DRM_INFO("Display Core failed to initialize with v%s!\n", DC_VER);
918                 goto error;
919         }
920
921         if (amdgpu_dc_debug_mask & DC_DISABLE_PIPE_SPLIT) {
922                 adev->dm.dc->debug.force_single_disp_pipe_split = false;
923                 adev->dm.dc->debug.pipe_split_policy = MPC_SPLIT_AVOID;
924         }
925
926         if (adev->asic_type != CHIP_CARRIZO && adev->asic_type != CHIP_STONEY)
927                 adev->dm.dc->debug.disable_stutter = amdgpu_pp_feature_mask & PP_STUTTER_MODE ? false : true;
928
929         if (amdgpu_dc_debug_mask & DC_DISABLE_STUTTER)
930                 adev->dm.dc->debug.disable_stutter = true;
931
932         if (amdgpu_dc_debug_mask & DC_DISABLE_DSC)
933                 adev->dm.dc->debug.disable_dsc = true;
934
935         if (amdgpu_dc_debug_mask & DC_DISABLE_CLOCK_GATING)
936                 adev->dm.dc->debug.disable_clock_gate = true;
937
938         r = dm_dmub_hw_init(adev);
939         if (r) {
940                 DRM_ERROR("DMUB interface failed to initialize: status=%d\n", r);
941                 goto error;
942         }
943
944         dc_hardware_init(adev->dm.dc);
945
946         adev->dm.freesync_module = mod_freesync_create(adev->dm.dc);
947         if (!adev->dm.freesync_module) {
948                 DRM_ERROR(
949                 "amdgpu: failed to initialize freesync_module.\n");
950         } else
951                 DRM_DEBUG_DRIVER("amdgpu: freesync_module init done %p.\n",
952                                 adev->dm.freesync_module);
953
954         amdgpu_dm_init_color_mod();
955
956 #ifdef CONFIG_DRM_AMD_DC_HDCP
957         if (adev->asic_type >= CHIP_RAVEN) {
958                 adev->dm.hdcp_workqueue = hdcp_create_workqueue(adev, &init_params.cp_psp, adev->dm.dc);
959
960                 if (!adev->dm.hdcp_workqueue)
961                         DRM_ERROR("amdgpu: failed to initialize hdcp_workqueue.\n");
962                 else
963                         DRM_DEBUG_DRIVER("amdgpu: hdcp_workqueue init done %p.\n", adev->dm.hdcp_workqueue);
964
965                 dc_init_callbacks(adev->dm.dc, &init_params);
966         }
967 #endif
968         if (amdgpu_dm_initialize_drm_device(adev)) {
969                 DRM_ERROR(
970                 "amdgpu: failed to initialize sw for display support.\n");
971                 goto error;
972         }
973
974         /* Update the actual used number of crtc */
975         adev->mode_info.num_crtc = adev->dm.display_indexes_num;
976
977         /* TODO: Add_display_info? */
978
979         /* TODO use dynamic cursor width */
980         adev->ddev->mode_config.cursor_width = adev->dm.dc->caps.max_cursor_size;
981         adev->ddev->mode_config.cursor_height = adev->dm.dc->caps.max_cursor_size;
982
983         if (drm_vblank_init(adev->ddev, adev->dm.display_indexes_num)) {
984                 DRM_ERROR(
985                 "amdgpu: failed to initialize sw for display support.\n");
986                 goto error;
987         }
988
989         DRM_DEBUG_DRIVER("KMS initialized.\n");
990
991         return 0;
992 error:
993         amdgpu_dm_fini(adev);
994
995         return -EINVAL;
996 }
997
998 static void amdgpu_dm_fini(struct amdgpu_device *adev)
999 {
1000         amdgpu_dm_audio_fini(adev);
1001
1002         amdgpu_dm_destroy_drm_device(&adev->dm);
1003
1004 #ifdef CONFIG_DRM_AMD_DC_HDCP
1005         if (adev->dm.hdcp_workqueue) {
1006                 hdcp_destroy(adev->dm.hdcp_workqueue);
1007                 adev->dm.hdcp_workqueue = NULL;
1008         }
1009
1010         if (adev->dm.dc)
1011                 dc_deinit_callbacks(adev->dm.dc);
1012 #endif
1013         if (adev->dm.dc->ctx->dmub_srv) {
1014                 dc_dmub_srv_destroy(&adev->dm.dc->ctx->dmub_srv);
1015                 adev->dm.dc->ctx->dmub_srv = NULL;
1016         }
1017
1018         if (adev->dm.dmub_bo)
1019                 amdgpu_bo_free_kernel(&adev->dm.dmub_bo,
1020                                       &adev->dm.dmub_bo_gpu_addr,
1021                                       &adev->dm.dmub_bo_cpu_addr);
1022
1023         /* DC Destroy TODO: Replace destroy DAL */
1024         if (adev->dm.dc)
1025                 dc_destroy(&adev->dm.dc);
1026         /*
1027          * TODO: pageflip, vlank interrupt
1028          *
1029          * amdgpu_dm_irq_fini(adev);
1030          */
1031
1032         if (adev->dm.cgs_device) {
1033                 amdgpu_cgs_destroy_device(adev->dm.cgs_device);
1034                 adev->dm.cgs_device = NULL;
1035         }
1036         if (adev->dm.freesync_module) {
1037                 mod_freesync_destroy(adev->dm.freesync_module);
1038                 adev->dm.freesync_module = NULL;
1039         }
1040
1041         mutex_destroy(&adev->dm.audio_lock);
1042         mutex_destroy(&adev->dm.dc_lock);
1043
1044         return;
1045 }
1046
1047 static int load_dmcu_fw(struct amdgpu_device *adev)
1048 {
1049         const char *fw_name_dmcu = NULL;
1050         int r;
1051         const struct dmcu_firmware_header_v1_0 *hdr;
1052
1053         switch(adev->asic_type) {
1054         case CHIP_BONAIRE:
1055         case CHIP_HAWAII:
1056         case CHIP_KAVERI:
1057         case CHIP_KABINI:
1058         case CHIP_MULLINS:
1059         case CHIP_TONGA:
1060         case CHIP_FIJI:
1061         case CHIP_CARRIZO:
1062         case CHIP_STONEY:
1063         case CHIP_POLARIS11:
1064         case CHIP_POLARIS10:
1065         case CHIP_POLARIS12:
1066         case CHIP_VEGAM:
1067         case CHIP_VEGA10:
1068         case CHIP_VEGA12:
1069         case CHIP_VEGA20:
1070         case CHIP_NAVI10:
1071         case CHIP_NAVI14:
1072         case CHIP_RENOIR:
1073                 return 0;
1074         case CHIP_NAVI12:
1075                 fw_name_dmcu = FIRMWARE_NAVI12_DMCU;
1076                 break;
1077         case CHIP_RAVEN:
1078                 if (ASICREV_IS_PICASSO(adev->external_rev_id))
1079                         fw_name_dmcu = FIRMWARE_RAVEN_DMCU;
1080                 else if (ASICREV_IS_RAVEN2(adev->external_rev_id))
1081                         fw_name_dmcu = FIRMWARE_RAVEN_DMCU;
1082                 else
1083                         return 0;
1084                 break;
1085         default:
1086                 DRM_ERROR("Unsupported ASIC type: 0x%X\n", adev->asic_type);
1087                 return -EINVAL;
1088         }
1089
1090         if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) {
1091                 DRM_DEBUG_KMS("dm: DMCU firmware not supported on direct or SMU loading\n");
1092                 return 0;
1093         }
1094
1095         r = request_firmware_direct(&adev->dm.fw_dmcu, fw_name_dmcu, adev->dev);
1096         if (r == -ENOENT) {
1097                 /* DMCU firmware is not necessary, so don't raise a fuss if it's missing */
1098                 DRM_DEBUG_KMS("dm: DMCU firmware not found\n");
1099                 adev->dm.fw_dmcu = NULL;
1100                 return 0;
1101         }
1102         if (r) {
1103                 dev_err(adev->dev, "amdgpu_dm: Can't load firmware \"%s\"\n",
1104                         fw_name_dmcu);
1105                 return r;
1106         }
1107
1108         r = amdgpu_ucode_validate(adev->dm.fw_dmcu);
1109         if (r) {
1110                 dev_err(adev->dev, "amdgpu_dm: Can't validate firmware \"%s\"\n",
1111                         fw_name_dmcu);
1112                 release_firmware(adev->dm.fw_dmcu);
1113                 adev->dm.fw_dmcu = NULL;
1114                 return r;
1115         }
1116
1117         hdr = (const struct dmcu_firmware_header_v1_0 *)adev->dm.fw_dmcu->data;
1118         adev->firmware.ucode[AMDGPU_UCODE_ID_DMCU_ERAM].ucode_id = AMDGPU_UCODE_ID_DMCU_ERAM;
1119         adev->firmware.ucode[AMDGPU_UCODE_ID_DMCU_ERAM].fw = adev->dm.fw_dmcu;
1120         adev->firmware.fw_size +=
1121                 ALIGN(le32_to_cpu(hdr->header.ucode_size_bytes) - le32_to_cpu(hdr->intv_size_bytes), PAGE_SIZE);
1122
1123         adev->firmware.ucode[AMDGPU_UCODE_ID_DMCU_INTV].ucode_id = AMDGPU_UCODE_ID_DMCU_INTV;
1124         adev->firmware.ucode[AMDGPU_UCODE_ID_DMCU_INTV].fw = adev->dm.fw_dmcu;
1125         adev->firmware.fw_size +=
1126                 ALIGN(le32_to_cpu(hdr->intv_size_bytes), PAGE_SIZE);
1127
1128         adev->dm.dmcu_fw_version = le32_to_cpu(hdr->header.ucode_version);
1129
1130         DRM_DEBUG_KMS("PSP loading DMCU firmware\n");
1131
1132         return 0;
1133 }
1134
1135 static uint32_t amdgpu_dm_dmub_reg_read(void *ctx, uint32_t address)
1136 {
1137         struct amdgpu_device *adev = ctx;
1138
1139         return dm_read_reg(adev->dm.dc->ctx, address);
1140 }
1141
1142 static void amdgpu_dm_dmub_reg_write(void *ctx, uint32_t address,
1143                                      uint32_t value)
1144 {
1145         struct amdgpu_device *adev = ctx;
1146
1147         return dm_write_reg(adev->dm.dc->ctx, address, value);
1148 }
1149
1150 static int dm_dmub_sw_init(struct amdgpu_device *adev)
1151 {
1152         struct dmub_srv_create_params create_params;
1153         struct dmub_srv_region_params region_params;
1154         struct dmub_srv_region_info region_info;
1155         struct dmub_srv_fb_params fb_params;
1156         struct dmub_srv_fb_info *fb_info;
1157         struct dmub_srv *dmub_srv;
1158         const struct dmcub_firmware_header_v1_0 *hdr;
1159         const char *fw_name_dmub;
1160         enum dmub_asic dmub_asic;
1161         enum dmub_status status;
1162         int r;
1163
1164         switch (adev->asic_type) {
1165         case CHIP_RENOIR:
1166                 dmub_asic = DMUB_ASIC_DCN21;
1167                 fw_name_dmub = FIRMWARE_RENOIR_DMUB;
1168                 break;
1169
1170         default:
1171                 /* ASIC doesn't support DMUB. */
1172                 return 0;
1173         }
1174
1175         r = request_firmware_direct(&adev->dm.dmub_fw, fw_name_dmub, adev->dev);
1176         if (r) {
1177                 DRM_ERROR("DMUB firmware loading failed: %d\n", r);
1178                 return 0;
1179         }
1180
1181         r = amdgpu_ucode_validate(adev->dm.dmub_fw);
1182         if (r) {
1183                 DRM_ERROR("Couldn't validate DMUB firmware: %d\n", r);
1184                 return 0;
1185         }
1186
1187         hdr = (const struct dmcub_firmware_header_v1_0 *)adev->dm.dmub_fw->data;
1188
1189         if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
1190                 adev->firmware.ucode[AMDGPU_UCODE_ID_DMCUB].ucode_id =
1191                         AMDGPU_UCODE_ID_DMCUB;
1192                 adev->firmware.ucode[AMDGPU_UCODE_ID_DMCUB].fw =
1193                         adev->dm.dmub_fw;
1194                 adev->firmware.fw_size +=
1195                         ALIGN(le32_to_cpu(hdr->inst_const_bytes), PAGE_SIZE);
1196
1197                 DRM_INFO("Loading DMUB firmware via PSP: version=0x%08X\n",
1198                          adev->dm.dmcub_fw_version);
1199         }
1200
1201         adev->dm.dmcub_fw_version = le32_to_cpu(hdr->header.ucode_version);
1202
1203         adev->dm.dmub_srv = kzalloc(sizeof(*adev->dm.dmub_srv), GFP_KERNEL);
1204         dmub_srv = adev->dm.dmub_srv;
1205
1206         if (!dmub_srv) {
1207                 DRM_ERROR("Failed to allocate DMUB service!\n");
1208                 return -ENOMEM;
1209         }
1210
1211         memset(&create_params, 0, sizeof(create_params));
1212         create_params.user_ctx = adev;
1213         create_params.funcs.reg_read = amdgpu_dm_dmub_reg_read;
1214         create_params.funcs.reg_write = amdgpu_dm_dmub_reg_write;
1215         create_params.asic = dmub_asic;
1216
1217         /* Create the DMUB service. */
1218         status = dmub_srv_create(dmub_srv, &create_params);
1219         if (status != DMUB_STATUS_OK) {
1220                 DRM_ERROR("Error creating DMUB service: %d\n", status);
1221                 return -EINVAL;
1222         }
1223
1224         /* Calculate the size of all the regions for the DMUB service. */
1225         memset(&region_params, 0, sizeof(region_params));
1226
1227         region_params.inst_const_size = le32_to_cpu(hdr->inst_const_bytes) -
1228                                         PSP_HEADER_BYTES - PSP_FOOTER_BYTES;
1229         region_params.bss_data_size = le32_to_cpu(hdr->bss_data_bytes);
1230         region_params.vbios_size = adev->bios_size;
1231         region_params.fw_bss_data =
1232                 adev->dm.dmub_fw->data +
1233                 le32_to_cpu(hdr->header.ucode_array_offset_bytes) +
1234                 le32_to_cpu(hdr->inst_const_bytes);
1235         region_params.fw_inst_const =
1236                 adev->dm.dmub_fw->data +
1237                 le32_to_cpu(hdr->header.ucode_array_offset_bytes) +
1238                 PSP_HEADER_BYTES;
1239
1240         status = dmub_srv_calc_region_info(dmub_srv, &region_params,
1241                                            &region_info);
1242
1243         if (status != DMUB_STATUS_OK) {
1244                 DRM_ERROR("Error calculating DMUB region info: %d\n", status);
1245                 return -EINVAL;
1246         }
1247
1248         /*
1249          * Allocate a framebuffer based on the total size of all the regions.
1250          * TODO: Move this into GART.
1251          */
1252         r = amdgpu_bo_create_kernel(adev, region_info.fb_size, PAGE_SIZE,
1253                                     AMDGPU_GEM_DOMAIN_VRAM, &adev->dm.dmub_bo,
1254                                     &adev->dm.dmub_bo_gpu_addr,
1255                                     &adev->dm.dmub_bo_cpu_addr);
1256         if (r)
1257                 return r;
1258
1259         /* Rebase the regions on the framebuffer address. */
1260         memset(&fb_params, 0, sizeof(fb_params));
1261         fb_params.cpu_addr = adev->dm.dmub_bo_cpu_addr;
1262         fb_params.gpu_addr = adev->dm.dmub_bo_gpu_addr;
1263         fb_params.region_info = &region_info;
1264
1265         adev->dm.dmub_fb_info =
1266                 kzalloc(sizeof(*adev->dm.dmub_fb_info), GFP_KERNEL);
1267         fb_info = adev->dm.dmub_fb_info;
1268
1269         if (!fb_info) {
1270                 DRM_ERROR(
1271                         "Failed to allocate framebuffer info for DMUB service!\n");
1272                 return -ENOMEM;
1273         }
1274
1275         status = dmub_srv_calc_fb_info(dmub_srv, &fb_params, fb_info);
1276         if (status != DMUB_STATUS_OK) {
1277                 DRM_ERROR("Error calculating DMUB FB info: %d\n", status);
1278                 return -EINVAL;
1279         }
1280
1281         return 0;
1282 }
1283
1284 static int dm_sw_init(void *handle)
1285 {
1286         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1287         int r;
1288
1289         r = dm_dmub_sw_init(adev);
1290         if (r)
1291                 return r;
1292
1293         return load_dmcu_fw(adev);
1294 }
1295
1296 static int dm_sw_fini(void *handle)
1297 {
1298         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1299
1300         kfree(adev->dm.dmub_fb_info);
1301         adev->dm.dmub_fb_info = NULL;
1302
1303         if (adev->dm.dmub_srv) {
1304                 dmub_srv_destroy(adev->dm.dmub_srv);
1305                 adev->dm.dmub_srv = NULL;
1306         }
1307
1308         if (adev->dm.dmub_fw) {
1309                 release_firmware(adev->dm.dmub_fw);
1310                 adev->dm.dmub_fw = NULL;
1311         }
1312
1313         if(adev->dm.fw_dmcu) {
1314                 release_firmware(adev->dm.fw_dmcu);
1315                 adev->dm.fw_dmcu = NULL;
1316         }
1317
1318         return 0;
1319 }
1320
1321 static int detect_mst_link_for_all_connectors(struct drm_device *dev)
1322 {
1323         struct amdgpu_dm_connector *aconnector;
1324         struct drm_connector *connector;
1325         struct drm_connector_list_iter iter;
1326         int ret = 0;
1327
1328         drm_connector_list_iter_begin(dev, &iter);
1329         drm_for_each_connector_iter(connector, &iter) {
1330                 aconnector = to_amdgpu_dm_connector(connector);
1331                 if (aconnector->dc_link->type == dc_connection_mst_branch &&
1332                     aconnector->mst_mgr.aux) {
1333                         DRM_DEBUG_DRIVER("DM_MST: starting TM on aconnector: %p [id: %d]\n",
1334                                          aconnector,
1335                                          aconnector->base.base.id);
1336
1337                         ret = drm_dp_mst_topology_mgr_set_mst(&aconnector->mst_mgr, true);
1338                         if (ret < 0) {
1339                                 DRM_ERROR("DM_MST: Failed to start MST\n");
1340                                 aconnector->dc_link->type =
1341                                         dc_connection_single;
1342                                 break;
1343                         }
1344                 }
1345         }
1346         drm_connector_list_iter_end(&iter);
1347
1348         return ret;
1349 }
1350
1351 static int dm_late_init(void *handle)
1352 {
1353         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1354
1355         struct dmcu_iram_parameters params;
1356         unsigned int linear_lut[16];
1357         int i;
1358         struct dmcu *dmcu = NULL;
1359         bool ret;
1360
1361         if (!adev->dm.fw_dmcu)
1362                 return detect_mst_link_for_all_connectors(adev->ddev);
1363
1364         dmcu = adev->dm.dc->res_pool->dmcu;
1365
1366         for (i = 0; i < 16; i++)
1367                 linear_lut[i] = 0xFFFF * i / 15;
1368
1369         params.set = 0;
1370         params.backlight_ramping_start = 0xCCCC;
1371         params.backlight_ramping_reduction = 0xCCCCCCCC;
1372         params.backlight_lut_array_size = 16;
1373         params.backlight_lut_array = linear_lut;
1374
1375         /* Min backlight level after ABM reduction,  Don't allow below 1%
1376          * 0xFFFF x 0.01 = 0x28F
1377          */
1378         params.min_abm_backlight = 0x28F;
1379
1380         ret = dmcu_load_iram(dmcu, params);
1381
1382         if (!ret)
1383                 return -EINVAL;
1384
1385         return detect_mst_link_for_all_connectors(adev->ddev);
1386 }
1387
1388 static void s3_handle_mst(struct drm_device *dev, bool suspend)
1389 {
1390         struct amdgpu_dm_connector *aconnector;
1391         struct drm_connector *connector;
1392         struct drm_connector_list_iter iter;
1393         struct drm_dp_mst_topology_mgr *mgr;
1394         int ret;
1395         bool need_hotplug = false;
1396
1397         drm_connector_list_iter_begin(dev, &iter);
1398         drm_for_each_connector_iter(connector, &iter) {
1399                 aconnector = to_amdgpu_dm_connector(connector);
1400                 if (aconnector->dc_link->type != dc_connection_mst_branch ||
1401                     aconnector->mst_port)
1402                         continue;
1403
1404                 mgr = &aconnector->mst_mgr;
1405
1406                 if (suspend) {
1407                         drm_dp_mst_topology_mgr_suspend(mgr);
1408                 } else {
1409                         ret = drm_dp_mst_topology_mgr_resume(mgr, true);
1410                         if (ret < 0) {
1411                                 drm_dp_mst_topology_mgr_set_mst(mgr, false);
1412                                 need_hotplug = true;
1413                         }
1414                 }
1415         }
1416         drm_connector_list_iter_end(&iter);
1417
1418         if (need_hotplug)
1419                 drm_kms_helper_hotplug_event(dev);
1420 }
1421
1422 static int amdgpu_dm_smu_write_watermarks_table(struct amdgpu_device *adev)
1423 {
1424         struct smu_context *smu = &adev->smu;
1425         int ret = 0;
1426
1427         if (!is_support_sw_smu(adev))
1428                 return 0;
1429
1430         /* This interface is for dGPU Navi1x.Linux dc-pplib interface depends
1431          * on window driver dc implementation.
1432          * For Navi1x, clock settings of dcn watermarks are fixed. the settings
1433          * should be passed to smu during boot up and resume from s3.
1434          * boot up: dc calculate dcn watermark clock settings within dc_create,
1435          * dcn20_resource_construct
1436          * then call pplib functions below to pass the settings to smu:
1437          * smu_set_watermarks_for_clock_ranges
1438          * smu_set_watermarks_table
1439          * navi10_set_watermarks_table
1440          * smu_write_watermarks_table
1441          *
1442          * For Renoir, clock settings of dcn watermark are also fixed values.
1443          * dc has implemented different flow for window driver:
1444          * dc_hardware_init / dc_set_power_state
1445          * dcn10_init_hw
1446          * notify_wm_ranges
1447          * set_wm_ranges
1448          * -- Linux
1449          * smu_set_watermarks_for_clock_ranges
1450          * renoir_set_watermarks_table
1451          * smu_write_watermarks_table
1452          *
1453          * For Linux,
1454          * dc_hardware_init -> amdgpu_dm_init
1455          * dc_set_power_state --> dm_resume
1456          *
1457          * therefore, this function apply to navi10/12/14 but not Renoir
1458          * *
1459          */
1460         switch(adev->asic_type) {
1461         case CHIP_NAVI10:
1462         case CHIP_NAVI14:
1463         case CHIP_NAVI12:
1464                 break;
1465         default:
1466                 return 0;
1467         }
1468
1469         mutex_lock(&smu->mutex);
1470
1471         /* pass data to smu controller */
1472         if ((smu->watermarks_bitmap & WATERMARKS_EXIST) &&
1473                         !(smu->watermarks_bitmap & WATERMARKS_LOADED)) {
1474                 ret = smu_write_watermarks_table(smu);
1475
1476                 if (ret) {
1477                         mutex_unlock(&smu->mutex);
1478                         DRM_ERROR("Failed to update WMTABLE!\n");
1479                         return ret;
1480                 }
1481                 smu->watermarks_bitmap |= WATERMARKS_LOADED;
1482         }
1483
1484         mutex_unlock(&smu->mutex);
1485
1486         return 0;
1487 }
1488
1489 /**
1490  * dm_hw_init() - Initialize DC device
1491  * @handle: The base driver device containing the amdgpu_dm device.
1492  *
1493  * Initialize the &struct amdgpu_display_manager device. This involves calling
1494  * the initializers of each DM component, then populating the struct with them.
1495  *
1496  * Although the function implies hardware initialization, both hardware and
1497  * software are initialized here. Splitting them out to their relevant init
1498  * hooks is a future TODO item.
1499  *
1500  * Some notable things that are initialized here:
1501  *
1502  * - Display Core, both software and hardware
1503  * - DC modules that we need (freesync and color management)
1504  * - DRM software states
1505  * - Interrupt sources and handlers
1506  * - Vblank support
1507  * - Debug FS entries, if enabled
1508  */
1509 static int dm_hw_init(void *handle)
1510 {
1511         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1512         /* Create DAL display manager */
1513         amdgpu_dm_init(adev);
1514         amdgpu_dm_hpd_init(adev);
1515
1516         return 0;
1517 }
1518
1519 /**
1520  * dm_hw_fini() - Teardown DC device
1521  * @handle: The base driver device containing the amdgpu_dm device.
1522  *
1523  * Teardown components within &struct amdgpu_display_manager that require
1524  * cleanup. This involves cleaning up the DRM device, DC, and any modules that
1525  * were loaded. Also flush IRQ workqueues and disable them.
1526  */
1527 static int dm_hw_fini(void *handle)
1528 {
1529         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1530
1531         amdgpu_dm_hpd_fini(adev);
1532
1533         amdgpu_dm_irq_fini(adev);
1534         amdgpu_dm_fini(adev);
1535         return 0;
1536 }
1537
1538
1539 static int dm_enable_vblank(struct drm_crtc *crtc);
1540 static void dm_disable_vblank(struct drm_crtc *crtc);
1541
1542 static void dm_gpureset_toggle_interrupts(struct amdgpu_device *adev,
1543                                  struct dc_state *state, bool enable)
1544 {
1545         enum dc_irq_source irq_source;
1546         struct amdgpu_crtc *acrtc;
1547         int rc = -EBUSY;
1548         int i = 0;
1549
1550         for (i = 0; i < state->stream_count; i++) {
1551                 acrtc = get_crtc_by_otg_inst(
1552                                 adev, state->stream_status[i].primary_otg_inst);
1553
1554                 if (acrtc && state->stream_status[i].plane_count != 0) {
1555                         irq_source = IRQ_TYPE_PFLIP + acrtc->otg_inst;
1556                         rc = dc_interrupt_set(adev->dm.dc, irq_source, enable) ? 0 : -EBUSY;
1557                         DRM_DEBUG("crtc %d - vupdate irq %sabling: r=%d\n",
1558                                   acrtc->crtc_id, enable ? "en" : "dis", rc);
1559                         if (rc)
1560                                 DRM_WARN("Failed to %s pflip interrupts\n",
1561                                          enable ? "enable" : "disable");
1562
1563                         if (enable) {
1564                                 rc = dm_enable_vblank(&acrtc->base);
1565                                 if (rc)
1566                                         DRM_WARN("Failed to enable vblank interrupts\n");
1567                         } else {
1568                                 dm_disable_vblank(&acrtc->base);
1569                         }
1570
1571                 }
1572         }
1573
1574 }
1575
1576 enum dc_status amdgpu_dm_commit_zero_streams(struct dc *dc)
1577 {
1578         struct dc_state *context = NULL;
1579         enum dc_status res = DC_ERROR_UNEXPECTED;
1580         int i;
1581         struct dc_stream_state *del_streams[MAX_PIPES];
1582         int del_streams_count = 0;
1583
1584         memset(del_streams, 0, sizeof(del_streams));
1585
1586         context = dc_create_state(dc);
1587         if (context == NULL)
1588                 goto context_alloc_fail;
1589
1590         dc_resource_state_copy_construct_current(dc, context);
1591
1592         /* First remove from context all streams */
1593         for (i = 0; i < context->stream_count; i++) {
1594                 struct dc_stream_state *stream = context->streams[i];
1595
1596                 del_streams[del_streams_count++] = stream;
1597         }
1598
1599         /* Remove all planes for removed streams and then remove the streams */
1600         for (i = 0; i < del_streams_count; i++) {
1601                 if (!dc_rem_all_planes_for_stream(dc, del_streams[i], context)) {
1602                         res = DC_FAIL_DETACH_SURFACES;
1603                         goto fail;
1604                 }
1605
1606                 res = dc_remove_stream_from_ctx(dc, context, del_streams[i]);
1607                 if (res != DC_OK)
1608                         goto fail;
1609         }
1610
1611
1612         res = dc_validate_global_state(dc, context, false);
1613
1614         if (res != DC_OK) {
1615                 DRM_ERROR("%s:resource validation failed, dc_status:%d\n", __func__, res);
1616                 goto fail;
1617         }
1618
1619         res = dc_commit_state(dc, context);
1620
1621 fail:
1622         dc_release_state(context);
1623
1624 context_alloc_fail:
1625         return res;
1626 }
1627
1628 static int dm_suspend(void *handle)
1629 {
1630         struct amdgpu_device *adev = handle;
1631         struct amdgpu_display_manager *dm = &adev->dm;
1632         int ret = 0;
1633
1634         if (adev->in_gpu_reset) {
1635                 mutex_lock(&dm->dc_lock);
1636                 dm->cached_dc_state = dc_copy_state(dm->dc->current_state);
1637
1638                 dm_gpureset_toggle_interrupts(adev, dm->cached_dc_state, false);
1639
1640                 amdgpu_dm_commit_zero_streams(dm->dc);
1641
1642                 amdgpu_dm_irq_suspend(adev);
1643
1644                 return ret;
1645         }
1646
1647         WARN_ON(adev->dm.cached_state);
1648         adev->dm.cached_state = drm_atomic_helper_suspend(adev->ddev);
1649
1650         s3_handle_mst(adev->ddev, true);
1651
1652         amdgpu_dm_irq_suspend(adev);
1653
1654
1655         dc_set_power_state(dm->dc, DC_ACPI_CM_POWER_STATE_D3);
1656
1657         return 0;
1658 }
1659
1660 static struct amdgpu_dm_connector *
1661 amdgpu_dm_find_first_crtc_matching_connector(struct drm_atomic_state *state,
1662                                              struct drm_crtc *crtc)
1663 {
1664         uint32_t i;
1665         struct drm_connector_state *new_con_state;
1666         struct drm_connector *connector;
1667         struct drm_crtc *crtc_from_state;
1668
1669         for_each_new_connector_in_state(state, connector, new_con_state, i) {
1670                 crtc_from_state = new_con_state->crtc;
1671
1672                 if (crtc_from_state == crtc)
1673                         return to_amdgpu_dm_connector(connector);
1674         }
1675
1676         return NULL;
1677 }
1678
1679 static void emulated_link_detect(struct dc_link *link)
1680 {
1681         struct dc_sink_init_data sink_init_data = { 0 };
1682         struct display_sink_capability sink_caps = { 0 };
1683         enum dc_edid_status edid_status;
1684         struct dc_context *dc_ctx = link->ctx;
1685         struct dc_sink *sink = NULL;
1686         struct dc_sink *prev_sink = NULL;
1687
1688         link->type = dc_connection_none;
1689         prev_sink = link->local_sink;
1690
1691         if (prev_sink != NULL)
1692                 dc_sink_retain(prev_sink);
1693
1694         switch (link->connector_signal) {
1695         case SIGNAL_TYPE_HDMI_TYPE_A: {
1696                 sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C;
1697                 sink_caps.signal = SIGNAL_TYPE_HDMI_TYPE_A;
1698                 break;
1699         }
1700
1701         case SIGNAL_TYPE_DVI_SINGLE_LINK: {
1702                 sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C;
1703                 sink_caps.signal = SIGNAL_TYPE_DVI_SINGLE_LINK;
1704                 break;
1705         }
1706
1707         case SIGNAL_TYPE_DVI_DUAL_LINK: {
1708                 sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C;
1709                 sink_caps.signal = SIGNAL_TYPE_DVI_DUAL_LINK;
1710                 break;
1711         }
1712
1713         case SIGNAL_TYPE_LVDS: {
1714                 sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C;
1715                 sink_caps.signal = SIGNAL_TYPE_LVDS;
1716                 break;
1717         }
1718
1719         case SIGNAL_TYPE_EDP: {
1720                 sink_caps.transaction_type =
1721                         DDC_TRANSACTION_TYPE_I2C_OVER_AUX;
1722                 sink_caps.signal = SIGNAL_TYPE_EDP;
1723                 break;
1724         }
1725
1726         case SIGNAL_TYPE_DISPLAY_PORT: {
1727                 sink_caps.transaction_type =
1728                         DDC_TRANSACTION_TYPE_I2C_OVER_AUX;
1729                 sink_caps.signal = SIGNAL_TYPE_VIRTUAL;
1730                 break;
1731         }
1732
1733         default:
1734                 DC_ERROR("Invalid connector type! signal:%d\n",
1735                         link->connector_signal);
1736                 return;
1737         }
1738
1739         sink_init_data.link = link;
1740         sink_init_data.sink_signal = sink_caps.signal;
1741
1742         sink = dc_sink_create(&sink_init_data);
1743         if (!sink) {
1744                 DC_ERROR("Failed to create sink!\n");
1745                 return;
1746         }
1747
1748         /* dc_sink_create returns a new reference */
1749         link->local_sink = sink;
1750
1751         edid_status = dm_helpers_read_local_edid(
1752                         link->ctx,
1753                         link,
1754                         sink);
1755
1756         if (edid_status != EDID_OK)
1757                 DC_ERROR("Failed to read EDID");
1758
1759 }
1760
1761 static void dm_gpureset_commit_state(struct dc_state *dc_state,
1762                                      struct amdgpu_display_manager *dm)
1763 {
1764         struct {
1765                 struct dc_surface_update surface_updates[MAX_SURFACES];
1766                 struct dc_plane_info plane_infos[MAX_SURFACES];
1767                 struct dc_scaling_info scaling_infos[MAX_SURFACES];
1768                 struct dc_flip_addrs flip_addrs[MAX_SURFACES];
1769                 struct dc_stream_update stream_update;
1770         } * bundle;
1771         int k, m;
1772
1773         bundle = kzalloc(sizeof(*bundle), GFP_KERNEL);
1774
1775         if (!bundle) {
1776                 dm_error("Failed to allocate update bundle\n");
1777                 goto cleanup;
1778         }
1779
1780         for (k = 0; k < dc_state->stream_count; k++) {
1781                 bundle->stream_update.stream = dc_state->streams[k];
1782
1783                 for (m = 0; m < dc_state->stream_status->plane_count; m++) {
1784                         bundle->surface_updates[m].surface =
1785                                 dc_state->stream_status->plane_states[m];
1786                         bundle->surface_updates[m].surface->force_full_update =
1787                                 true;
1788                 }
1789                 dc_commit_updates_for_stream(
1790                         dm->dc, bundle->surface_updates,
1791                         dc_state->stream_status->plane_count,
1792                         dc_state->streams[k], &bundle->stream_update, dc_state);
1793         }
1794
1795 cleanup:
1796         kfree(bundle);
1797
1798         return;
1799 }
1800
1801 static int dm_resume(void *handle)
1802 {
1803         struct amdgpu_device *adev = handle;
1804         struct drm_device *ddev = adev->ddev;
1805         struct amdgpu_display_manager *dm = &adev->dm;
1806         struct amdgpu_dm_connector *aconnector;
1807         struct drm_connector *connector;
1808         struct drm_connector_list_iter iter;
1809         struct drm_crtc *crtc;
1810         struct drm_crtc_state *new_crtc_state;
1811         struct dm_crtc_state *dm_new_crtc_state;
1812         struct drm_plane *plane;
1813         struct drm_plane_state *new_plane_state;
1814         struct dm_plane_state *dm_new_plane_state;
1815         struct dm_atomic_state *dm_state = to_dm_atomic_state(dm->atomic_obj.state);
1816         enum dc_connection_type new_connection_type = dc_connection_none;
1817         struct dc_state *dc_state;
1818         int i, r, j;
1819
1820         if (adev->in_gpu_reset) {
1821                 dc_state = dm->cached_dc_state;
1822
1823                 r = dm_dmub_hw_init(adev);
1824                 if (r)
1825                         DRM_ERROR("DMUB interface failed to initialize: status=%d\n", r);
1826
1827                 dc_set_power_state(dm->dc, DC_ACPI_CM_POWER_STATE_D0);
1828                 dc_resume(dm->dc);
1829
1830                 amdgpu_dm_irq_resume_early(adev);
1831
1832                 for (i = 0; i < dc_state->stream_count; i++) {
1833                         dc_state->streams[i]->mode_changed = true;
1834                         for (j = 0; j < dc_state->stream_status->plane_count; j++) {
1835                                 dc_state->stream_status->plane_states[j]->update_flags.raw
1836                                         = 0xffffffff;
1837                         }
1838                 }
1839
1840                 WARN_ON(!dc_commit_state(dm->dc, dc_state));
1841
1842                 dm_gpureset_commit_state(dm->cached_dc_state, dm);
1843
1844                 dm_gpureset_toggle_interrupts(adev, dm->cached_dc_state, true);
1845
1846                 dc_release_state(dm->cached_dc_state);
1847                 dm->cached_dc_state = NULL;
1848
1849                 amdgpu_dm_irq_resume_late(adev);
1850
1851                 mutex_unlock(&dm->dc_lock);
1852
1853                 return 0;
1854         }
1855         /* Recreate dc_state - DC invalidates it when setting power state to S3. */
1856         dc_release_state(dm_state->context);
1857         dm_state->context = dc_create_state(dm->dc);
1858         /* TODO: Remove dc_state->dccg, use dc->dccg directly. */
1859         dc_resource_state_construct(dm->dc, dm_state->context);
1860
1861         /* Before powering on DC we need to re-initialize DMUB. */
1862         r = dm_dmub_hw_init(adev);
1863         if (r)
1864                 DRM_ERROR("DMUB interface failed to initialize: status=%d\n", r);
1865
1866         /* power on hardware */
1867         dc_set_power_state(dm->dc, DC_ACPI_CM_POWER_STATE_D0);
1868
1869         /* program HPD filter */
1870         dc_resume(dm->dc);
1871
1872         /*
1873          * early enable HPD Rx IRQ, should be done before set mode as short
1874          * pulse interrupts are used for MST
1875          */
1876         amdgpu_dm_irq_resume_early(adev);
1877
1878         /* On resume we need to rewrite the MSTM control bits to enable MST*/
1879         s3_handle_mst(ddev, false);
1880
1881         /* Do detection*/
1882         drm_connector_list_iter_begin(ddev, &iter);
1883         drm_for_each_connector_iter(connector, &iter) {
1884                 aconnector = to_amdgpu_dm_connector(connector);
1885
1886                 /*
1887                  * this is the case when traversing through already created
1888                  * MST connectors, should be skipped
1889                  */
1890                 if (aconnector->mst_port)
1891                         continue;
1892
1893                 mutex_lock(&aconnector->hpd_lock);
1894                 if (!dc_link_detect_sink(aconnector->dc_link, &new_connection_type))
1895                         DRM_ERROR("KMS: Failed to detect connector\n");
1896
1897                 if (aconnector->base.force && new_connection_type == dc_connection_none)
1898                         emulated_link_detect(aconnector->dc_link);
1899                 else
1900                         dc_link_detect(aconnector->dc_link, DETECT_REASON_HPD);
1901
1902                 if (aconnector->fake_enable && aconnector->dc_link->local_sink)
1903                         aconnector->fake_enable = false;
1904
1905                 if (aconnector->dc_sink)
1906                         dc_sink_release(aconnector->dc_sink);
1907                 aconnector->dc_sink = NULL;
1908                 amdgpu_dm_update_connector_after_detect(aconnector);
1909                 mutex_unlock(&aconnector->hpd_lock);
1910         }
1911         drm_connector_list_iter_end(&iter);
1912
1913         /* Force mode set in atomic commit */
1914         for_each_new_crtc_in_state(dm->cached_state, crtc, new_crtc_state, i)
1915                 new_crtc_state->active_changed = true;
1916
1917         /*
1918          * atomic_check is expected to create the dc states. We need to release
1919          * them here, since they were duplicated as part of the suspend
1920          * procedure.
1921          */
1922         for_each_new_crtc_in_state(dm->cached_state, crtc, new_crtc_state, i) {
1923                 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
1924                 if (dm_new_crtc_state->stream) {
1925                         WARN_ON(kref_read(&dm_new_crtc_state->stream->refcount) > 1);
1926                         dc_stream_release(dm_new_crtc_state->stream);
1927                         dm_new_crtc_state->stream = NULL;
1928                 }
1929         }
1930
1931         for_each_new_plane_in_state(dm->cached_state, plane, new_plane_state, i) {
1932                 dm_new_plane_state = to_dm_plane_state(new_plane_state);
1933                 if (dm_new_plane_state->dc_state) {
1934                         WARN_ON(kref_read(&dm_new_plane_state->dc_state->refcount) > 1);
1935                         dc_plane_state_release(dm_new_plane_state->dc_state);
1936                         dm_new_plane_state->dc_state = NULL;
1937                 }
1938         }
1939
1940         drm_atomic_helper_resume(ddev, dm->cached_state);
1941
1942         dm->cached_state = NULL;
1943
1944         amdgpu_dm_irq_resume_late(adev);
1945
1946         amdgpu_dm_smu_write_watermarks_table(adev);
1947
1948         return 0;
1949 }
1950
1951 /**
1952  * DOC: DM Lifecycle
1953  *
1954  * DM (and consequently DC) is registered in the amdgpu base driver as a IP
1955  * block. When CONFIG_DRM_AMD_DC is enabled, the DM device IP block is added to
1956  * the base driver's device list to be initialized and torn down accordingly.
1957  *
1958  * The functions to do so are provided as hooks in &struct amd_ip_funcs.
1959  */
1960
1961 static const struct amd_ip_funcs amdgpu_dm_funcs = {
1962         .name = "dm",
1963         .early_init = dm_early_init,
1964         .late_init = dm_late_init,
1965         .sw_init = dm_sw_init,
1966         .sw_fini = dm_sw_fini,
1967         .hw_init = dm_hw_init,
1968         .hw_fini = dm_hw_fini,
1969         .suspend = dm_suspend,
1970         .resume = dm_resume,
1971         .is_idle = dm_is_idle,
1972         .wait_for_idle = dm_wait_for_idle,
1973         .check_soft_reset = dm_check_soft_reset,
1974         .soft_reset = dm_soft_reset,
1975         .set_clockgating_state = dm_set_clockgating_state,
1976         .set_powergating_state = dm_set_powergating_state,
1977 };
1978
1979 const struct amdgpu_ip_block_version dm_ip_block =
1980 {
1981         .type = AMD_IP_BLOCK_TYPE_DCE,
1982         .major = 1,
1983         .minor = 0,
1984         .rev = 0,
1985         .funcs = &amdgpu_dm_funcs,
1986 };
1987
1988
1989 /**
1990  * DOC: atomic
1991  *
1992  * *WIP*
1993  */
1994
1995 static const struct drm_mode_config_funcs amdgpu_dm_mode_funcs = {
1996         .fb_create = amdgpu_display_user_framebuffer_create,
1997         .output_poll_changed = drm_fb_helper_output_poll_changed,
1998         .atomic_check = amdgpu_dm_atomic_check,
1999         .atomic_commit = amdgpu_dm_atomic_commit,
2000 };
2001
2002 static struct drm_mode_config_helper_funcs amdgpu_dm_mode_config_helperfuncs = {
2003         .atomic_commit_tail = amdgpu_dm_atomic_commit_tail
2004 };
2005
2006 static void update_connector_ext_caps(struct amdgpu_dm_connector *aconnector)
2007 {
2008         u32 max_cll, min_cll, max, min, q, r;
2009         struct amdgpu_dm_backlight_caps *caps;
2010         struct amdgpu_display_manager *dm;
2011         struct drm_connector *conn_base;
2012         struct amdgpu_device *adev;
2013         static const u8 pre_computed_values[] = {
2014                 50, 51, 52, 53, 55, 56, 57, 58, 59, 61, 62, 63, 65, 66, 68, 69,
2015                 71, 72, 74, 75, 77, 79, 81, 82, 84, 86, 88, 90, 92, 94, 96, 98};
2016
2017         if (!aconnector || !aconnector->dc_link)
2018                 return;
2019
2020         conn_base = &aconnector->base;
2021         adev = conn_base->dev->dev_private;
2022         dm = &adev->dm;
2023         caps = &dm->backlight_caps;
2024         caps->ext_caps = &aconnector->dc_link->dpcd_sink_ext_caps;
2025         caps->aux_support = false;
2026         max_cll = conn_base->hdr_sink_metadata.hdmi_type1.max_cll;
2027         min_cll = conn_base->hdr_sink_metadata.hdmi_type1.min_cll;
2028
2029         if (caps->ext_caps->bits.oled == 1 ||
2030             caps->ext_caps->bits.sdr_aux_backlight_control == 1 ||
2031             caps->ext_caps->bits.hdr_aux_backlight_control == 1)
2032                 caps->aux_support = true;
2033
2034         /* From the specification (CTA-861-G), for calculating the maximum
2035          * luminance we need to use:
2036          *      Luminance = 50*2**(CV/32)
2037          * Where CV is a one-byte value.
2038          * For calculating this expression we may need float point precision;
2039          * to avoid this complexity level, we take advantage that CV is divided
2040          * by a constant. From the Euclids division algorithm, we know that CV
2041          * can be written as: CV = 32*q + r. Next, we replace CV in the
2042          * Luminance expression and get 50*(2**q)*(2**(r/32)), hence we just
2043          * need to pre-compute the value of r/32. For pre-computing the values
2044          * We just used the following Ruby line:
2045          *      (0...32).each {|cv| puts (50*2**(cv/32.0)).round}
2046          * The results of the above expressions can be verified at
2047          * pre_computed_values.
2048          */
2049         q = max_cll >> 5;
2050         r = max_cll % 32;
2051         max = (1 << q) * pre_computed_values[r];
2052
2053         // min luminance: maxLum * (CV/255)^2 / 100
2054         q = DIV_ROUND_CLOSEST(min_cll, 255);
2055         min = max * DIV_ROUND_CLOSEST((q * q), 100);
2056
2057         caps->aux_max_input_signal = max;
2058         caps->aux_min_input_signal = min;
2059 }
2060
2061 void amdgpu_dm_update_connector_after_detect(
2062                 struct amdgpu_dm_connector *aconnector)
2063 {
2064         struct drm_connector *connector = &aconnector->base;
2065         struct drm_device *dev = connector->dev;
2066         struct dc_sink *sink;
2067
2068         /* MST handled by drm_mst framework */
2069         if (aconnector->mst_mgr.mst_state == true)
2070                 return;
2071
2072
2073         sink = aconnector->dc_link->local_sink;
2074         if (sink)
2075                 dc_sink_retain(sink);
2076
2077         /*
2078          * Edid mgmt connector gets first update only in mode_valid hook and then
2079          * the connector sink is set to either fake or physical sink depends on link status.
2080          * Skip if already done during boot.
2081          */
2082         if (aconnector->base.force != DRM_FORCE_UNSPECIFIED
2083                         && aconnector->dc_em_sink) {
2084
2085                 /*
2086                  * For S3 resume with headless use eml_sink to fake stream
2087                  * because on resume connector->sink is set to NULL
2088                  */
2089                 mutex_lock(&dev->mode_config.mutex);
2090
2091                 if (sink) {
2092                         if (aconnector->dc_sink) {
2093                                 amdgpu_dm_update_freesync_caps(connector, NULL);
2094                                 /*
2095                                  * retain and release below are used to
2096                                  * bump up refcount for sink because the link doesn't point
2097                                  * to it anymore after disconnect, so on next crtc to connector
2098                                  * reshuffle by UMD we will get into unwanted dc_sink release
2099                                  */
2100                                 dc_sink_release(aconnector->dc_sink);
2101                         }
2102                         aconnector->dc_sink = sink;
2103                         dc_sink_retain(aconnector->dc_sink);
2104                         amdgpu_dm_update_freesync_caps(connector,
2105                                         aconnector->edid);
2106                 } else {
2107                         amdgpu_dm_update_freesync_caps(connector, NULL);
2108                         if (!aconnector->dc_sink) {
2109                                 aconnector->dc_sink = aconnector->dc_em_sink;
2110                                 dc_sink_retain(aconnector->dc_sink);
2111                         }
2112                 }
2113
2114                 mutex_unlock(&dev->mode_config.mutex);
2115
2116                 if (sink)
2117                         dc_sink_release(sink);
2118                 return;
2119         }
2120
2121         /*
2122          * TODO: temporary guard to look for proper fix
2123          * if this sink is MST sink, we should not do anything
2124          */
2125         if (sink && sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT_MST) {
2126                 dc_sink_release(sink);
2127                 return;
2128         }
2129
2130         if (aconnector->dc_sink == sink) {
2131                 /*
2132                  * We got a DP short pulse (Link Loss, DP CTS, etc...).
2133                  * Do nothing!!
2134                  */
2135                 DRM_DEBUG_DRIVER("DCHPD: connector_id=%d: dc_sink didn't change.\n",
2136                                 aconnector->connector_id);
2137                 if (sink)
2138                         dc_sink_release(sink);
2139                 return;
2140         }
2141
2142         DRM_DEBUG_DRIVER("DCHPD: connector_id=%d: Old sink=%p New sink=%p\n",
2143                 aconnector->connector_id, aconnector->dc_sink, sink);
2144
2145         mutex_lock(&dev->mode_config.mutex);
2146
2147         /*
2148          * 1. Update status of the drm connector
2149          * 2. Send an event and let userspace tell us what to do
2150          */
2151         if (sink) {
2152                 /*
2153                  * TODO: check if we still need the S3 mode update workaround.
2154                  * If yes, put it here.
2155                  */
2156                 if (aconnector->dc_sink)
2157                         amdgpu_dm_update_freesync_caps(connector, NULL);
2158
2159                 aconnector->dc_sink = sink;
2160                 dc_sink_retain(aconnector->dc_sink);
2161                 if (sink->dc_edid.length == 0) {
2162                         aconnector->edid = NULL;
2163                         if (aconnector->dc_link->aux_mode) {
2164                                 drm_dp_cec_unset_edid(
2165                                         &aconnector->dm_dp_aux.aux);
2166                         }
2167                 } else {
2168                         aconnector->edid =
2169                                 (struct edid *)sink->dc_edid.raw_edid;
2170
2171                         drm_connector_update_edid_property(connector,
2172                                                            aconnector->edid);
2173
2174                         if (aconnector->dc_link->aux_mode)
2175                                 drm_dp_cec_set_edid(&aconnector->dm_dp_aux.aux,
2176                                                     aconnector->edid);
2177                 }
2178
2179                 amdgpu_dm_update_freesync_caps(connector, aconnector->edid);
2180                 update_connector_ext_caps(aconnector);
2181         } else {
2182                 drm_dp_cec_unset_edid(&aconnector->dm_dp_aux.aux);
2183                 amdgpu_dm_update_freesync_caps(connector, NULL);
2184                 drm_connector_update_edid_property(connector, NULL);
2185                 aconnector->num_modes = 0;
2186                 dc_sink_release(aconnector->dc_sink);
2187                 aconnector->dc_sink = NULL;
2188                 aconnector->edid = NULL;
2189 #ifdef CONFIG_DRM_AMD_DC_HDCP
2190                 /* Set CP to DESIRED if it was ENABLED, so we can re-enable it again on hotplug */
2191                 if (connector->state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED)
2192                         connector->state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED;
2193 #endif
2194         }
2195
2196         mutex_unlock(&dev->mode_config.mutex);
2197
2198         if (sink)
2199                 dc_sink_release(sink);
2200 }
2201
2202 static void handle_hpd_irq(void *param)
2203 {
2204         struct amdgpu_dm_connector *aconnector = (struct amdgpu_dm_connector *)param;
2205         struct drm_connector *connector = &aconnector->base;
2206         struct drm_device *dev = connector->dev;
2207         enum dc_connection_type new_connection_type = dc_connection_none;
2208 #ifdef CONFIG_DRM_AMD_DC_HDCP
2209         struct amdgpu_device *adev = dev->dev_private;
2210 #endif
2211
2212         /*
2213          * In case of failure or MST no need to update connector status or notify the OS
2214          * since (for MST case) MST does this in its own context.
2215          */
2216         mutex_lock(&aconnector->hpd_lock);
2217
2218 #ifdef CONFIG_DRM_AMD_DC_HDCP
2219         if (adev->dm.hdcp_workqueue)
2220                 hdcp_reset_display(adev->dm.hdcp_workqueue, aconnector->dc_link->link_index);
2221 #endif
2222         if (aconnector->fake_enable)
2223                 aconnector->fake_enable = false;
2224
2225         if (!dc_link_detect_sink(aconnector->dc_link, &new_connection_type))
2226                 DRM_ERROR("KMS: Failed to detect connector\n");
2227
2228         if (aconnector->base.force && new_connection_type == dc_connection_none) {
2229                 emulated_link_detect(aconnector->dc_link);
2230
2231
2232                 drm_modeset_lock_all(dev);
2233                 dm_restore_drm_connector_state(dev, connector);
2234                 drm_modeset_unlock_all(dev);
2235
2236                 if (aconnector->base.force == DRM_FORCE_UNSPECIFIED)
2237                         drm_kms_helper_hotplug_event(dev);
2238
2239         } else if (dc_link_detect(aconnector->dc_link, DETECT_REASON_HPD)) {
2240                 amdgpu_dm_update_connector_after_detect(aconnector);
2241
2242
2243                 drm_modeset_lock_all(dev);
2244                 dm_restore_drm_connector_state(dev, connector);
2245                 drm_modeset_unlock_all(dev);
2246
2247                 if (aconnector->base.force == DRM_FORCE_UNSPECIFIED)
2248                         drm_kms_helper_hotplug_event(dev);
2249         }
2250         mutex_unlock(&aconnector->hpd_lock);
2251
2252 }
2253
2254 static void dm_handle_hpd_rx_irq(struct amdgpu_dm_connector *aconnector)
2255 {
2256         uint8_t esi[DP_PSR_ERROR_STATUS - DP_SINK_COUNT_ESI] = { 0 };
2257         uint8_t dret;
2258         bool new_irq_handled = false;
2259         int dpcd_addr;
2260         int dpcd_bytes_to_read;
2261
2262         const int max_process_count = 30;
2263         int process_count = 0;
2264
2265         const struct dc_link_status *link_status = dc_link_get_status(aconnector->dc_link);
2266
2267         if (link_status->dpcd_caps->dpcd_rev.raw < 0x12) {
2268                 dpcd_bytes_to_read = DP_LANE0_1_STATUS - DP_SINK_COUNT;
2269                 /* DPCD 0x200 - 0x201 for downstream IRQ */
2270                 dpcd_addr = DP_SINK_COUNT;
2271         } else {
2272                 dpcd_bytes_to_read = DP_PSR_ERROR_STATUS - DP_SINK_COUNT_ESI;
2273                 /* DPCD 0x2002 - 0x2005 for downstream IRQ */
2274                 dpcd_addr = DP_SINK_COUNT_ESI;
2275         }
2276
2277         dret = drm_dp_dpcd_read(
2278                 &aconnector->dm_dp_aux.aux,
2279                 dpcd_addr,
2280                 esi,
2281                 dpcd_bytes_to_read);
2282
2283         while (dret == dpcd_bytes_to_read &&
2284                 process_count < max_process_count) {
2285                 uint8_t retry;
2286                 dret = 0;
2287
2288                 process_count++;
2289
2290                 DRM_DEBUG_DRIVER("ESI %02x %02x %02x\n", esi[0], esi[1], esi[2]);
2291                 /* handle HPD short pulse irq */
2292                 if (aconnector->mst_mgr.mst_state)
2293                         drm_dp_mst_hpd_irq(
2294                                 &aconnector->mst_mgr,
2295                                 esi,
2296                                 &new_irq_handled);
2297
2298                 if (new_irq_handled) {
2299                         /* ACK at DPCD to notify down stream */
2300                         const int ack_dpcd_bytes_to_write =
2301                                 dpcd_bytes_to_read - 1;
2302
2303                         for (retry = 0; retry < 3; retry++) {
2304                                 uint8_t wret;
2305
2306                                 wret = drm_dp_dpcd_write(
2307                                         &aconnector->dm_dp_aux.aux,
2308                                         dpcd_addr + 1,
2309                                         &esi[1],
2310                                         ack_dpcd_bytes_to_write);
2311                                 if (wret == ack_dpcd_bytes_to_write)
2312                                         break;
2313                         }
2314
2315                         /* check if there is new irq to be handled */
2316                         dret = drm_dp_dpcd_read(
2317                                 &aconnector->dm_dp_aux.aux,
2318                                 dpcd_addr,
2319                                 esi,
2320                                 dpcd_bytes_to_read);
2321
2322                         new_irq_handled = false;
2323                 } else {
2324                         break;
2325                 }
2326         }
2327
2328         if (process_count == max_process_count)
2329                 DRM_DEBUG_DRIVER("Loop exceeded max iterations\n");
2330 }
2331
2332 static void handle_hpd_rx_irq(void *param)
2333 {
2334         struct amdgpu_dm_connector *aconnector = (struct amdgpu_dm_connector *)param;
2335         struct drm_connector *connector = &aconnector->base;
2336         struct drm_device *dev = connector->dev;
2337         struct dc_link *dc_link = aconnector->dc_link;
2338         bool is_mst_root_connector = aconnector->mst_mgr.mst_state;
2339         enum dc_connection_type new_connection_type = dc_connection_none;
2340 #ifdef CONFIG_DRM_AMD_DC_HDCP
2341         union hpd_irq_data hpd_irq_data;
2342         struct amdgpu_device *adev = dev->dev_private;
2343
2344         memset(&hpd_irq_data, 0, sizeof(hpd_irq_data));
2345 #endif
2346
2347         /*
2348          * TODO:Temporary add mutex to protect hpd interrupt not have a gpio
2349          * conflict, after implement i2c helper, this mutex should be
2350          * retired.
2351          */
2352         if (dc_link->type != dc_connection_mst_branch)
2353                 mutex_lock(&aconnector->hpd_lock);
2354
2355
2356 #ifdef CONFIG_DRM_AMD_DC_HDCP
2357         if (dc_link_handle_hpd_rx_irq(dc_link, &hpd_irq_data, NULL) &&
2358 #else
2359         if (dc_link_handle_hpd_rx_irq(dc_link, NULL, NULL) &&
2360 #endif
2361                         !is_mst_root_connector) {
2362                 /* Downstream Port status changed. */
2363                 if (!dc_link_detect_sink(dc_link, &new_connection_type))
2364                         DRM_ERROR("KMS: Failed to detect connector\n");
2365
2366                 if (aconnector->base.force && new_connection_type == dc_connection_none) {
2367                         emulated_link_detect(dc_link);
2368
2369                         if (aconnector->fake_enable)
2370                                 aconnector->fake_enable = false;
2371
2372                         amdgpu_dm_update_connector_after_detect(aconnector);
2373
2374
2375                         drm_modeset_lock_all(dev);
2376                         dm_restore_drm_connector_state(dev, connector);
2377                         drm_modeset_unlock_all(dev);
2378
2379                         drm_kms_helper_hotplug_event(dev);
2380                 } else if (dc_link_detect(dc_link, DETECT_REASON_HPDRX)) {
2381
2382                         if (aconnector->fake_enable)
2383                                 aconnector->fake_enable = false;
2384
2385                         amdgpu_dm_update_connector_after_detect(aconnector);
2386
2387
2388                         drm_modeset_lock_all(dev);
2389                         dm_restore_drm_connector_state(dev, connector);
2390                         drm_modeset_unlock_all(dev);
2391
2392                         drm_kms_helper_hotplug_event(dev);
2393                 }
2394         }
2395 #ifdef CONFIG_DRM_AMD_DC_HDCP
2396         if (hpd_irq_data.bytes.device_service_irq.bits.CP_IRQ) {
2397                 if (adev->dm.hdcp_workqueue)
2398                         hdcp_handle_cpirq(adev->dm.hdcp_workqueue,  aconnector->base.index);
2399         }
2400 #endif
2401         if ((dc_link->cur_link_settings.lane_count != LANE_COUNT_UNKNOWN) ||
2402             (dc_link->type == dc_connection_mst_branch))
2403                 dm_handle_hpd_rx_irq(aconnector);
2404
2405         if (dc_link->type != dc_connection_mst_branch) {
2406                 drm_dp_cec_irq(&aconnector->dm_dp_aux.aux);
2407                 mutex_unlock(&aconnector->hpd_lock);
2408         }
2409 }
2410
2411 static void register_hpd_handlers(struct amdgpu_device *adev)
2412 {
2413         struct drm_device *dev = adev->ddev;
2414         struct drm_connector *connector;
2415         struct amdgpu_dm_connector *aconnector;
2416         const struct dc_link *dc_link;
2417         struct dc_interrupt_params int_params = {0};
2418
2419         int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
2420         int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
2421
2422         list_for_each_entry(connector,
2423                         &dev->mode_config.connector_list, head) {
2424
2425                 aconnector = to_amdgpu_dm_connector(connector);
2426                 dc_link = aconnector->dc_link;
2427
2428                 if (DC_IRQ_SOURCE_INVALID != dc_link->irq_source_hpd) {
2429                         int_params.int_context = INTERRUPT_LOW_IRQ_CONTEXT;
2430                         int_params.irq_source = dc_link->irq_source_hpd;
2431
2432                         amdgpu_dm_irq_register_interrupt(adev, &int_params,
2433                                         handle_hpd_irq,
2434                                         (void *) aconnector);
2435                 }
2436
2437                 if (DC_IRQ_SOURCE_INVALID != dc_link->irq_source_hpd_rx) {
2438
2439                         /* Also register for DP short pulse (hpd_rx). */
2440                         int_params.int_context = INTERRUPT_LOW_IRQ_CONTEXT;
2441                         int_params.irq_source = dc_link->irq_source_hpd_rx;
2442
2443                         amdgpu_dm_irq_register_interrupt(adev, &int_params,
2444                                         handle_hpd_rx_irq,
2445                                         (void *) aconnector);
2446                 }
2447         }
2448 }
2449
2450 /* Register IRQ sources and initialize IRQ callbacks */
2451 static int dce110_register_irq_handlers(struct amdgpu_device *adev)
2452 {
2453         struct dc *dc = adev->dm.dc;
2454         struct common_irq_params *c_irq_params;
2455         struct dc_interrupt_params int_params = {0};
2456         int r;
2457         int i;
2458         unsigned client_id = AMDGPU_IRQ_CLIENTID_LEGACY;
2459
2460         if (adev->asic_type >= CHIP_VEGA10)
2461                 client_id = SOC15_IH_CLIENTID_DCE;
2462
2463         int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
2464         int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
2465
2466         /*
2467          * Actions of amdgpu_irq_add_id():
2468          * 1. Register a set() function with base driver.
2469          *    Base driver will call set() function to enable/disable an
2470          *    interrupt in DC hardware.
2471          * 2. Register amdgpu_dm_irq_handler().
2472          *    Base driver will call amdgpu_dm_irq_handler() for ALL interrupts
2473          *    coming from DC hardware.
2474          *    amdgpu_dm_irq_handler() will re-direct the interrupt to DC
2475          *    for acknowledging and handling. */
2476
2477         /* Use VBLANK interrupt */
2478         for (i = VISLANDS30_IV_SRCID_D1_VERTICAL_INTERRUPT0; i <= VISLANDS30_IV_SRCID_D6_VERTICAL_INTERRUPT0; i++) {
2479                 r = amdgpu_irq_add_id(adev, client_id, i, &adev->crtc_irq);
2480                 if (r) {
2481                         DRM_ERROR("Failed to add crtc irq id!\n");
2482                         return r;
2483                 }
2484
2485                 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
2486                 int_params.irq_source =
2487                         dc_interrupt_to_irq_source(dc, i, 0);
2488
2489                 c_irq_params = &adev->dm.vblank_params[int_params.irq_source - DC_IRQ_SOURCE_VBLANK1];
2490
2491                 c_irq_params->adev = adev;
2492                 c_irq_params->irq_src = int_params.irq_source;
2493
2494                 amdgpu_dm_irq_register_interrupt(adev, &int_params,
2495                                 dm_crtc_high_irq, c_irq_params);
2496         }
2497
2498         /* Use VUPDATE interrupt */
2499         for (i = VISLANDS30_IV_SRCID_D1_V_UPDATE_INT; i <= VISLANDS30_IV_SRCID_D6_V_UPDATE_INT; i += 2) {
2500                 r = amdgpu_irq_add_id(adev, client_id, i, &adev->vupdate_irq);
2501                 if (r) {
2502                         DRM_ERROR("Failed to add vupdate irq id!\n");
2503                         return r;
2504                 }
2505
2506                 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
2507                 int_params.irq_source =
2508                         dc_interrupt_to_irq_source(dc, i, 0);
2509
2510                 c_irq_params = &adev->dm.vupdate_params[int_params.irq_source - DC_IRQ_SOURCE_VUPDATE1];
2511
2512                 c_irq_params->adev = adev;
2513                 c_irq_params->irq_src = int_params.irq_source;
2514
2515                 amdgpu_dm_irq_register_interrupt(adev, &int_params,
2516                                 dm_vupdate_high_irq, c_irq_params);
2517         }
2518
2519         /* Use GRPH_PFLIP interrupt */
2520         for (i = VISLANDS30_IV_SRCID_D1_GRPH_PFLIP;
2521                         i <= VISLANDS30_IV_SRCID_D6_GRPH_PFLIP; i += 2) {
2522                 r = amdgpu_irq_add_id(adev, client_id, i, &adev->pageflip_irq);
2523                 if (r) {
2524                         DRM_ERROR("Failed to add page flip irq id!\n");
2525                         return r;
2526                 }
2527
2528                 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
2529                 int_params.irq_source =
2530                         dc_interrupt_to_irq_source(dc, i, 0);
2531
2532                 c_irq_params = &adev->dm.pflip_params[int_params.irq_source - DC_IRQ_SOURCE_PFLIP_FIRST];
2533
2534                 c_irq_params->adev = adev;
2535                 c_irq_params->irq_src = int_params.irq_source;
2536
2537                 amdgpu_dm_irq_register_interrupt(adev, &int_params,
2538                                 dm_pflip_high_irq, c_irq_params);
2539
2540         }
2541
2542         /* HPD */
2543         r = amdgpu_irq_add_id(adev, client_id,
2544                         VISLANDS30_IV_SRCID_HOTPLUG_DETECT_A, &adev->hpd_irq);
2545         if (r) {
2546                 DRM_ERROR("Failed to add hpd irq id!\n");
2547                 return r;
2548         }
2549
2550         register_hpd_handlers(adev);
2551
2552         return 0;
2553 }
2554
2555 #if defined(CONFIG_DRM_AMD_DC_DCN)
2556 /* Register IRQ sources and initialize IRQ callbacks */
2557 static int dcn10_register_irq_handlers(struct amdgpu_device *adev)
2558 {
2559         struct dc *dc = adev->dm.dc;
2560         struct common_irq_params *c_irq_params;
2561         struct dc_interrupt_params int_params = {0};
2562         int r;
2563         int i;
2564
2565         int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
2566         int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
2567
2568         /*
2569          * Actions of amdgpu_irq_add_id():
2570          * 1. Register a set() function with base driver.
2571          *    Base driver will call set() function to enable/disable an
2572          *    interrupt in DC hardware.
2573          * 2. Register amdgpu_dm_irq_handler().
2574          *    Base driver will call amdgpu_dm_irq_handler() for ALL interrupts
2575          *    coming from DC hardware.
2576          *    amdgpu_dm_irq_handler() will re-direct the interrupt to DC
2577          *    for acknowledging and handling.
2578          */
2579
2580         /* Use VSTARTUP interrupt */
2581         for (i = DCN_1_0__SRCID__DC_D1_OTG_VSTARTUP;
2582                         i <= DCN_1_0__SRCID__DC_D1_OTG_VSTARTUP + adev->mode_info.num_crtc - 1;
2583                         i++) {
2584                 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, i, &adev->crtc_irq);
2585
2586                 if (r) {
2587                         DRM_ERROR("Failed to add crtc irq id!\n");
2588                         return r;
2589                 }
2590
2591                 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
2592                 int_params.irq_source =
2593                         dc_interrupt_to_irq_source(dc, i, 0);
2594
2595                 c_irq_params = &adev->dm.vblank_params[int_params.irq_source - DC_IRQ_SOURCE_VBLANK1];
2596
2597                 c_irq_params->adev = adev;
2598                 c_irq_params->irq_src = int_params.irq_source;
2599
2600                 amdgpu_dm_irq_register_interrupt(
2601                         adev, &int_params, dm_crtc_high_irq, c_irq_params);
2602         }
2603
2604         /* Use VUPDATE_NO_LOCK interrupt on DCN, which seems to correspond to
2605          * the regular VUPDATE interrupt on DCE. We want DC_IRQ_SOURCE_VUPDATEx
2606          * to trigger at end of each vblank, regardless of state of the lock,
2607          * matching DCE behaviour.
2608          */
2609         for (i = DCN_1_0__SRCID__OTG0_IHC_V_UPDATE_NO_LOCK_INTERRUPT;
2610              i <= DCN_1_0__SRCID__OTG0_IHC_V_UPDATE_NO_LOCK_INTERRUPT + adev->mode_info.num_crtc - 1;
2611              i++) {
2612                 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, i, &adev->vupdate_irq);
2613
2614                 if (r) {
2615                         DRM_ERROR("Failed to add vupdate irq id!\n");
2616                         return r;
2617                 }
2618
2619                 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
2620                 int_params.irq_source =
2621                         dc_interrupt_to_irq_source(dc, i, 0);
2622
2623                 c_irq_params = &adev->dm.vupdate_params[int_params.irq_source - DC_IRQ_SOURCE_VUPDATE1];
2624
2625                 c_irq_params->adev = adev;
2626                 c_irq_params->irq_src = int_params.irq_source;
2627
2628                 amdgpu_dm_irq_register_interrupt(adev, &int_params,
2629                                 dm_vupdate_high_irq, c_irq_params);
2630         }
2631
2632         /* Use GRPH_PFLIP interrupt */
2633         for (i = DCN_1_0__SRCID__HUBP0_FLIP_INTERRUPT;
2634                         i <= DCN_1_0__SRCID__HUBP0_FLIP_INTERRUPT + adev->mode_info.num_crtc - 1;
2635                         i++) {
2636                 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, i, &adev->pageflip_irq);
2637                 if (r) {
2638                         DRM_ERROR("Failed to add page flip irq id!\n");
2639                         return r;
2640                 }
2641
2642                 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
2643                 int_params.irq_source =
2644                         dc_interrupt_to_irq_source(dc, i, 0);
2645
2646                 c_irq_params = &adev->dm.pflip_params[int_params.irq_source - DC_IRQ_SOURCE_PFLIP_FIRST];
2647
2648                 c_irq_params->adev = adev;
2649                 c_irq_params->irq_src = int_params.irq_source;
2650
2651                 amdgpu_dm_irq_register_interrupt(adev, &int_params,
2652                                 dm_pflip_high_irq, c_irq_params);
2653
2654         }
2655
2656         /* HPD */
2657         r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, DCN_1_0__SRCID__DC_HPD1_INT,
2658                         &adev->hpd_irq);
2659         if (r) {
2660                 DRM_ERROR("Failed to add hpd irq id!\n");
2661                 return r;
2662         }
2663
2664         register_hpd_handlers(adev);
2665
2666         return 0;
2667 }
2668 #endif
2669
2670 /*
2671  * Acquires the lock for the atomic state object and returns
2672  * the new atomic state.
2673  *
2674  * This should only be called during atomic check.
2675  */
2676 static int dm_atomic_get_state(struct drm_atomic_state *state,
2677                                struct dm_atomic_state **dm_state)
2678 {
2679         struct drm_device *dev = state->dev;
2680         struct amdgpu_device *adev = dev->dev_private;
2681         struct amdgpu_display_manager *dm = &adev->dm;
2682         struct drm_private_state *priv_state;
2683
2684         if (*dm_state)
2685                 return 0;
2686
2687         priv_state = drm_atomic_get_private_obj_state(state, &dm->atomic_obj);
2688         if (IS_ERR(priv_state))
2689                 return PTR_ERR(priv_state);
2690
2691         *dm_state = to_dm_atomic_state(priv_state);
2692
2693         return 0;
2694 }
2695
2696 struct dm_atomic_state *
2697 dm_atomic_get_new_state(struct drm_atomic_state *state)
2698 {
2699         struct drm_device *dev = state->dev;
2700         struct amdgpu_device *adev = dev->dev_private;
2701         struct amdgpu_display_manager *dm = &adev->dm;
2702         struct drm_private_obj *obj;
2703         struct drm_private_state *new_obj_state;
2704         int i;
2705
2706         for_each_new_private_obj_in_state(state, obj, new_obj_state, i) {
2707                 if (obj->funcs == dm->atomic_obj.funcs)
2708                         return to_dm_atomic_state(new_obj_state);
2709         }
2710
2711         return NULL;
2712 }
2713
2714 struct dm_atomic_state *
2715 dm_atomic_get_old_state(struct drm_atomic_state *state)
2716 {
2717         struct drm_device *dev = state->dev;
2718         struct amdgpu_device *adev = dev->dev_private;
2719         struct amdgpu_display_manager *dm = &adev->dm;
2720         struct drm_private_obj *obj;
2721         struct drm_private_state *old_obj_state;
2722         int i;
2723
2724         for_each_old_private_obj_in_state(state, obj, old_obj_state, i) {
2725                 if (obj->funcs == dm->atomic_obj.funcs)
2726                         return to_dm_atomic_state(old_obj_state);
2727         }
2728
2729         return NULL;
2730 }
2731
2732 static struct drm_private_state *
2733 dm_atomic_duplicate_state(struct drm_private_obj *obj)
2734 {
2735         struct dm_atomic_state *old_state, *new_state;
2736
2737         new_state = kzalloc(sizeof(*new_state), GFP_KERNEL);
2738         if (!new_state)
2739                 return NULL;
2740
2741         __drm_atomic_helper_private_obj_duplicate_state(obj, &new_state->base);
2742
2743         old_state = to_dm_atomic_state(obj->state);
2744
2745         if (old_state && old_state->context)
2746                 new_state->context = dc_copy_state(old_state->context);
2747
2748         if (!new_state->context) {
2749                 kfree(new_state);
2750                 return NULL;
2751         }
2752
2753         return &new_state->base;
2754 }
2755
2756 static void dm_atomic_destroy_state(struct drm_private_obj *obj,
2757                                     struct drm_private_state *state)
2758 {
2759         struct dm_atomic_state *dm_state = to_dm_atomic_state(state);
2760
2761         if (dm_state && dm_state->context)
2762                 dc_release_state(dm_state->context);
2763
2764         kfree(dm_state);
2765 }
2766
2767 static struct drm_private_state_funcs dm_atomic_state_funcs = {
2768         .atomic_duplicate_state = dm_atomic_duplicate_state,
2769         .atomic_destroy_state = dm_atomic_destroy_state,
2770 };
2771
2772 static int amdgpu_dm_mode_config_init(struct amdgpu_device *adev)
2773 {
2774         struct dm_atomic_state *state;
2775         int r;
2776
2777         adev->mode_info.mode_config_initialized = true;
2778
2779         adev->ddev->mode_config.funcs = (void *)&amdgpu_dm_mode_funcs;
2780         adev->ddev->mode_config.helper_private = &amdgpu_dm_mode_config_helperfuncs;
2781
2782         adev->ddev->mode_config.max_width = 16384;
2783         adev->ddev->mode_config.max_height = 16384;
2784
2785         adev->ddev->mode_config.preferred_depth = 24;
2786         adev->ddev->mode_config.prefer_shadow = 1;
2787         /* indicates support for immediate flip */
2788         adev->ddev->mode_config.async_page_flip = true;
2789
2790         adev->ddev->mode_config.fb_base = adev->gmc.aper_base;
2791
2792         state = kzalloc(sizeof(*state), GFP_KERNEL);
2793         if (!state)
2794                 return -ENOMEM;
2795
2796         state->context = dc_create_state(adev->dm.dc);
2797         if (!state->context) {
2798                 kfree(state);
2799                 return -ENOMEM;
2800         }
2801
2802         dc_resource_state_copy_construct_current(adev->dm.dc, state->context);
2803
2804         drm_atomic_private_obj_init(adev->ddev,
2805                                     &adev->dm.atomic_obj,
2806                                     &state->base,
2807                                     &dm_atomic_state_funcs);
2808
2809         r = amdgpu_display_modeset_create_props(adev);
2810         if (r)
2811                 return r;
2812
2813         r = amdgpu_dm_audio_init(adev);
2814         if (r)
2815                 return r;
2816
2817         return 0;
2818 }
2819
2820 #define AMDGPU_DM_DEFAULT_MIN_BACKLIGHT 12
2821 #define AMDGPU_DM_DEFAULT_MAX_BACKLIGHT 255
2822 #define AUX_BL_DEFAULT_TRANSITION_TIME_MS 50
2823
2824 #if defined(CONFIG_BACKLIGHT_CLASS_DEVICE) ||\
2825         defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE)
2826
2827 static void amdgpu_dm_update_backlight_caps(struct amdgpu_display_manager *dm)
2828 {
2829 #if defined(CONFIG_ACPI)
2830         struct amdgpu_dm_backlight_caps caps;
2831
2832         if (dm->backlight_caps.caps_valid)
2833                 return;
2834
2835         amdgpu_acpi_get_backlight_caps(dm->adev, &caps);
2836         if (caps.caps_valid) {
2837                 dm->backlight_caps.caps_valid = true;
2838                 if (caps.aux_support)
2839                         return;
2840                 dm->backlight_caps.min_input_signal = caps.min_input_signal;
2841                 dm->backlight_caps.max_input_signal = caps.max_input_signal;
2842         } else {
2843                 dm->backlight_caps.min_input_signal =
2844                                 AMDGPU_DM_DEFAULT_MIN_BACKLIGHT;
2845                 dm->backlight_caps.max_input_signal =
2846                                 AMDGPU_DM_DEFAULT_MAX_BACKLIGHT;
2847         }
2848 #else
2849         if (dm->backlight_caps.aux_support)
2850                 return;
2851
2852         dm->backlight_caps.min_input_signal = AMDGPU_DM_DEFAULT_MIN_BACKLIGHT;
2853         dm->backlight_caps.max_input_signal = AMDGPU_DM_DEFAULT_MAX_BACKLIGHT;
2854 #endif
2855 }
2856
2857 static int set_backlight_via_aux(struct dc_link *link, uint32_t brightness)
2858 {
2859         bool rc;
2860
2861         if (!link)
2862                 return 1;
2863
2864         rc = dc_link_set_backlight_level_nits(link, true, brightness,
2865                                               AUX_BL_DEFAULT_TRANSITION_TIME_MS);
2866
2867         return rc ? 0 : 1;
2868 }
2869
2870 static u32 convert_brightness(const struct amdgpu_dm_backlight_caps *caps,
2871                               const uint32_t user_brightness)
2872 {
2873         u32 min, max, conversion_pace;
2874         u32 brightness = user_brightness;
2875
2876         if (!caps)
2877                 goto out;
2878
2879         if (!caps->aux_support) {
2880                 max = caps->max_input_signal;
2881                 min = caps->min_input_signal;
2882                 /*
2883                  * The brightness input is in the range 0-255
2884                  * It needs to be rescaled to be between the
2885                  * requested min and max input signal
2886                  * It also needs to be scaled up by 0x101 to
2887                  * match the DC interface which has a range of
2888                  * 0 to 0xffff
2889                  */
2890                 conversion_pace = 0x101;
2891                 brightness =
2892                         user_brightness
2893                         * conversion_pace
2894                         * (max - min)
2895                         / AMDGPU_MAX_BL_LEVEL
2896                         + min * conversion_pace;
2897         } else {
2898                 /* TODO
2899                  * We are doing a linear interpolation here, which is OK but
2900                  * does not provide the optimal result. We probably want
2901                  * something close to the Perceptual Quantizer (PQ) curve.
2902                  */
2903                 max = caps->aux_max_input_signal;
2904                 min = caps->aux_min_input_signal;
2905
2906                 brightness = (AMDGPU_MAX_BL_LEVEL - user_brightness) * min
2907                                + user_brightness * max;
2908                 // Multiple the value by 1000 since we use millinits
2909                 brightness *= 1000;
2910                 brightness = DIV_ROUND_CLOSEST(brightness, AMDGPU_MAX_BL_LEVEL);
2911         }
2912
2913 out:
2914         return brightness;
2915 }
2916
2917 static int amdgpu_dm_backlight_update_status(struct backlight_device *bd)
2918 {
2919         struct amdgpu_display_manager *dm = bl_get_data(bd);
2920         struct amdgpu_dm_backlight_caps caps;
2921         struct dc_link *link = NULL;
2922         u32 brightness;
2923         bool rc;
2924
2925         amdgpu_dm_update_backlight_caps(dm);
2926         caps = dm->backlight_caps;
2927
2928         link = (struct dc_link *)dm->backlight_link;
2929
2930         brightness = convert_brightness(&caps, bd->props.brightness);
2931         // Change brightness based on AUX property
2932         if (caps.aux_support)
2933                 return set_backlight_via_aux(link, brightness);
2934
2935         rc = dc_link_set_backlight_level(dm->backlight_link, brightness, 0);
2936
2937         return rc ? 0 : 1;
2938 }
2939
2940 static int amdgpu_dm_backlight_get_brightness(struct backlight_device *bd)
2941 {
2942         struct amdgpu_display_manager *dm = bl_get_data(bd);
2943         int ret = dc_link_get_backlight_level(dm->backlight_link);
2944
2945         if (ret == DC_ERROR_UNEXPECTED)
2946                 return bd->props.brightness;
2947         return ret;
2948 }
2949
2950 static const struct backlight_ops amdgpu_dm_backlight_ops = {
2951         .options = BL_CORE_SUSPENDRESUME,
2952         .get_brightness = amdgpu_dm_backlight_get_brightness,
2953         .update_status  = amdgpu_dm_backlight_update_status,
2954 };
2955
2956 static void
2957 amdgpu_dm_register_backlight_device(struct amdgpu_display_manager *dm)
2958 {
2959         char bl_name[16];
2960         struct backlight_properties props = { 0 };
2961
2962         amdgpu_dm_update_backlight_caps(dm);
2963
2964         props.max_brightness = AMDGPU_MAX_BL_LEVEL;
2965         props.brightness = AMDGPU_MAX_BL_LEVEL;
2966         props.type = BACKLIGHT_RAW;
2967
2968         snprintf(bl_name, sizeof(bl_name), "amdgpu_bl%d",
2969                         dm->adev->ddev->primary->index);
2970
2971         dm->backlight_dev = backlight_device_register(bl_name,
2972                         dm->adev->ddev->dev,
2973                         dm,
2974                         &amdgpu_dm_backlight_ops,
2975                         &props);
2976
2977         if (IS_ERR(dm->backlight_dev))
2978                 DRM_ERROR("DM: Backlight registration failed!\n");
2979         else
2980                 DRM_DEBUG_DRIVER("DM: Registered Backlight device: %s\n", bl_name);
2981 }
2982
2983 #endif
2984
2985 static int initialize_plane(struct amdgpu_display_manager *dm,
2986                             struct amdgpu_mode_info *mode_info, int plane_id,
2987                             enum drm_plane_type plane_type,
2988                             const struct dc_plane_cap *plane_cap)
2989 {
2990         struct drm_plane *plane;
2991         unsigned long possible_crtcs;
2992         int ret = 0;
2993
2994         plane = kzalloc(sizeof(struct drm_plane), GFP_KERNEL);
2995         if (!plane) {
2996                 DRM_ERROR("KMS: Failed to allocate plane\n");
2997                 return -ENOMEM;
2998         }
2999         plane->type = plane_type;
3000
3001         /*
3002          * HACK: IGT tests expect that the primary plane for a CRTC
3003          * can only have one possible CRTC. Only expose support for
3004          * any CRTC if they're not going to be used as a primary plane
3005          * for a CRTC - like overlay or underlay planes.
3006          */
3007         possible_crtcs = 1 << plane_id;
3008         if (plane_id >= dm->dc->caps.max_streams)
3009                 possible_crtcs = 0xff;
3010
3011         ret = amdgpu_dm_plane_init(dm, plane, possible_crtcs, plane_cap);
3012
3013         if (ret) {
3014                 DRM_ERROR("KMS: Failed to initialize plane\n");
3015                 kfree(plane);
3016                 return ret;
3017         }
3018
3019         if (mode_info)
3020                 mode_info->planes[plane_id] = plane;
3021
3022         return ret;
3023 }
3024
3025
3026 static void register_backlight_device(struct amdgpu_display_manager *dm,
3027                                       struct dc_link *link)
3028 {
3029 #if defined(CONFIG_BACKLIGHT_CLASS_DEVICE) ||\
3030         defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE)
3031
3032         if ((link->connector_signal & (SIGNAL_TYPE_EDP | SIGNAL_TYPE_LVDS)) &&
3033             link->type != dc_connection_none) {
3034                 /*
3035                  * Event if registration failed, we should continue with
3036                  * DM initialization because not having a backlight control
3037                  * is better then a black screen.
3038                  */
3039                 amdgpu_dm_register_backlight_device(dm);
3040
3041                 if (dm->backlight_dev)
3042                         dm->backlight_link = link;
3043         }
3044 #endif
3045 }
3046
3047
3048 /*
3049  * In this architecture, the association
3050  * connector -> encoder -> crtc
3051  * id not really requried. The crtc and connector will hold the
3052  * display_index as an abstraction to use with DAL component
3053  *
3054  * Returns 0 on success
3055  */
3056 static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev)
3057 {
3058         struct amdgpu_display_manager *dm = &adev->dm;
3059         int32_t i;
3060         struct amdgpu_dm_connector *aconnector = NULL;
3061         struct amdgpu_encoder *aencoder = NULL;
3062         struct amdgpu_mode_info *mode_info = &adev->mode_info;
3063         uint32_t link_cnt;
3064         int32_t primary_planes;
3065         enum dc_connection_type new_connection_type = dc_connection_none;
3066         const struct dc_plane_cap *plane;
3067
3068         link_cnt = dm->dc->caps.max_links;
3069         if (amdgpu_dm_mode_config_init(dm->adev)) {
3070                 DRM_ERROR("DM: Failed to initialize mode config\n");
3071                 return -EINVAL;
3072         }
3073
3074         /* There is one primary plane per CRTC */
3075         primary_planes = dm->dc->caps.max_streams;
3076         ASSERT(primary_planes <= AMDGPU_MAX_PLANES);
3077
3078         /*
3079          * Initialize primary planes, implicit planes for legacy IOCTLS.
3080          * Order is reversed to match iteration order in atomic check.
3081          */
3082         for (i = (primary_planes - 1); i >= 0; i--) {
3083                 plane = &dm->dc->caps.planes[i];
3084
3085                 if (initialize_plane(dm, mode_info, i,
3086                                      DRM_PLANE_TYPE_PRIMARY, plane)) {
3087                         DRM_ERROR("KMS: Failed to initialize primary plane\n");
3088                         goto fail;
3089                 }
3090         }
3091
3092         /*
3093          * Initialize overlay planes, index starting after primary planes.
3094          * These planes have a higher DRM index than the primary planes since
3095          * they should be considered as having a higher z-order.
3096          * Order is reversed to match iteration order in atomic check.
3097          *
3098          * Only support DCN for now, and only expose one so we don't encourage
3099          * userspace to use up all the pipes.
3100          */
3101         for (i = 0; i < dm->dc->caps.max_planes; ++i) {
3102                 struct dc_plane_cap *plane = &dm->dc->caps.planes[i];
3103
3104                 if (plane->type != DC_PLANE_TYPE_DCN_UNIVERSAL)
3105                         continue;
3106
3107                 if (!plane->blends_with_above || !plane->blends_with_below)
3108                         continue;
3109
3110                 if (!plane->pixel_format_support.argb8888)
3111                         continue;
3112
3113                 if (initialize_plane(dm, NULL, primary_planes + i,
3114                                      DRM_PLANE_TYPE_OVERLAY, plane)) {
3115                         DRM_ERROR("KMS: Failed to initialize overlay plane\n");
3116                         goto fail;
3117                 }
3118
3119                 /* Only create one overlay plane. */
3120                 break;
3121         }
3122
3123         for (i = 0; i < dm->dc->caps.max_streams; i++)
3124                 if (amdgpu_dm_crtc_init(dm, mode_info->planes[i], i)) {
3125                         DRM_ERROR("KMS: Failed to initialize crtc\n");
3126                         goto fail;
3127                 }
3128
3129         dm->display_indexes_num = dm->dc->caps.max_streams;
3130
3131         /* loops over all connectors on the board */
3132         for (i = 0; i < link_cnt; i++) {
3133                 struct dc_link *link = NULL;
3134
3135                 if (i > AMDGPU_DM_MAX_DISPLAY_INDEX) {
3136                         DRM_ERROR(
3137                                 "KMS: Cannot support more than %d display indexes\n",
3138                                         AMDGPU_DM_MAX_DISPLAY_INDEX);
3139                         continue;
3140                 }
3141
3142                 aconnector = kzalloc(sizeof(*aconnector), GFP_KERNEL);
3143                 if (!aconnector)
3144                         goto fail;
3145
3146                 aencoder = kzalloc(sizeof(*aencoder), GFP_KERNEL);
3147                 if (!aencoder)
3148                         goto fail;
3149
3150                 if (amdgpu_dm_encoder_init(dm->ddev, aencoder, i)) {
3151                         DRM_ERROR("KMS: Failed to initialize encoder\n");
3152                         goto fail;
3153                 }
3154
3155                 if (amdgpu_dm_connector_init(dm, aconnector, i, aencoder)) {
3156                         DRM_ERROR("KMS: Failed to initialize connector\n");
3157                         goto fail;
3158                 }
3159
3160                 link = dc_get_link_at_index(dm->dc, i);
3161
3162                 if (!dc_link_detect_sink(link, &new_connection_type))
3163                         DRM_ERROR("KMS: Failed to detect connector\n");
3164
3165                 if (aconnector->base.force && new_connection_type == dc_connection_none) {
3166                         emulated_link_detect(link);
3167                         amdgpu_dm_update_connector_after_detect(aconnector);
3168
3169                 } else if (dc_link_detect(link, DETECT_REASON_BOOT)) {
3170                         amdgpu_dm_update_connector_after_detect(aconnector);
3171                         register_backlight_device(dm, link);
3172                         if (amdgpu_dc_feature_mask & DC_PSR_MASK)
3173                                 amdgpu_dm_set_psr_caps(link);
3174                 }
3175
3176
3177         }
3178
3179         /* Software is initialized. Now we can register interrupt handlers. */
3180         switch (adev->asic_type) {
3181         case CHIP_BONAIRE:
3182         case CHIP_HAWAII:
3183         case CHIP_KAVERI:
3184         case CHIP_KABINI:
3185         case CHIP_MULLINS:
3186         case CHIP_TONGA:
3187         case CHIP_FIJI:
3188         case CHIP_CARRIZO:
3189         case CHIP_STONEY:
3190         case CHIP_POLARIS11:
3191         case CHIP_POLARIS10:
3192         case CHIP_POLARIS12:
3193         case CHIP_VEGAM:
3194         case CHIP_VEGA10:
3195         case CHIP_VEGA12:
3196         case CHIP_VEGA20:
3197                 if (dce110_register_irq_handlers(dm->adev)) {
3198                         DRM_ERROR("DM: Failed to initialize IRQ\n");
3199                         goto fail;
3200                 }
3201                 break;
3202 #if defined(CONFIG_DRM_AMD_DC_DCN)
3203         case CHIP_RAVEN:
3204         case CHIP_NAVI12:
3205         case CHIP_NAVI10:
3206         case CHIP_NAVI14:
3207         case CHIP_RENOIR:
3208                 if (dcn10_register_irq_handlers(dm->adev)) {
3209                         DRM_ERROR("DM: Failed to initialize IRQ\n");
3210                         goto fail;
3211                 }
3212                 break;
3213 #endif
3214         default:
3215                 DRM_ERROR("Unsupported ASIC type: 0x%X\n", adev->asic_type);
3216                 goto fail;
3217         }
3218
3219         /* No userspace support. */
3220         dm->dc->debug.disable_tri_buf = true;
3221
3222         return 0;
3223 fail:
3224         kfree(aencoder);
3225         kfree(aconnector);
3226
3227         return -EINVAL;
3228 }
3229
3230 static void amdgpu_dm_destroy_drm_device(struct amdgpu_display_manager *dm)
3231 {
3232         drm_mode_config_cleanup(dm->ddev);
3233         drm_atomic_private_obj_fini(&dm->atomic_obj);
3234         return;
3235 }
3236
3237 /******************************************************************************
3238  * amdgpu_display_funcs functions
3239  *****************************************************************************/
3240
3241 /*
3242  * dm_bandwidth_update - program display watermarks
3243  *
3244  * @adev: amdgpu_device pointer
3245  *
3246  * Calculate and program the display watermarks and line buffer allocation.
3247  */
3248 static void dm_bandwidth_update(struct amdgpu_device *adev)
3249 {
3250         /* TODO: implement later */
3251 }
3252
3253 static const struct amdgpu_display_funcs dm_display_funcs = {
3254         .bandwidth_update = dm_bandwidth_update, /* called unconditionally */
3255         .vblank_get_counter = dm_vblank_get_counter,/* called unconditionally */
3256         .backlight_set_level = NULL, /* never called for DC */
3257         .backlight_get_level = NULL, /* never called for DC */
3258         .hpd_sense = NULL,/* called unconditionally */
3259         .hpd_set_polarity = NULL, /* called unconditionally */
3260         .hpd_get_gpio_reg = NULL, /* VBIOS parsing. DAL does it. */
3261         .page_flip_get_scanoutpos =
3262                 dm_crtc_get_scanoutpos,/* called unconditionally */
3263         .add_encoder = NULL, /* VBIOS parsing. DAL does it. */
3264         .add_connector = NULL, /* VBIOS parsing. DAL does it. */
3265 };
3266
3267 #if defined(CONFIG_DEBUG_KERNEL_DC)
3268
3269 static ssize_t s3_debug_store(struct device *device,
3270                               struct device_attribute *attr,
3271                               const char *buf,
3272                               size_t count)
3273 {
3274         int ret;
3275         int s3_state;
3276         struct drm_device *drm_dev = dev_get_drvdata(device);
3277         struct amdgpu_device *adev = drm_dev->dev_private;
3278
3279         ret = kstrtoint(buf, 0, &s3_state);
3280
3281         if (ret == 0) {
3282                 if (s3_state) {
3283                         dm_resume(adev);
3284                         drm_kms_helper_hotplug_event(adev->ddev);
3285                 } else
3286                         dm_suspend(adev);
3287         }
3288
3289         return ret == 0 ? count : 0;
3290 }
3291
3292 DEVICE_ATTR_WO(s3_debug);
3293
3294 #endif
3295
3296 static int dm_early_init(void *handle)
3297 {
3298         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
3299
3300         switch (adev->asic_type) {
3301         case CHIP_BONAIRE:
3302         case CHIP_HAWAII:
3303                 adev->mode_info.num_crtc = 6;
3304                 adev->mode_info.num_hpd = 6;
3305                 adev->mode_info.num_dig = 6;
3306                 break;
3307         case CHIP_KAVERI:
3308                 adev->mode_info.num_crtc = 4;
3309                 adev->mode_info.num_hpd = 6;
3310                 adev->mode_info.num_dig = 7;
3311                 break;
3312         case CHIP_KABINI:
3313         case CHIP_MULLINS:
3314                 adev->mode_info.num_crtc = 2;
3315                 adev->mode_info.num_hpd = 6;
3316                 adev->mode_info.num_dig = 6;
3317                 break;
3318         case CHIP_FIJI:
3319         case CHIP_TONGA:
3320                 adev->mode_info.num_crtc = 6;
3321                 adev->mode_info.num_hpd = 6;
3322                 adev->mode_info.num_dig = 7;
3323                 break;
3324         case CHIP_CARRIZO:
3325                 adev->mode_info.num_crtc = 3;
3326                 adev->mode_info.num_hpd = 6;
3327                 adev->mode_info.num_dig = 9;
3328                 break;
3329         case CHIP_STONEY:
3330                 adev->mode_info.num_crtc = 2;
3331                 adev->mode_info.num_hpd = 6;
3332                 adev->mode_info.num_dig = 9;
3333                 break;
3334         case CHIP_POLARIS11:
3335         case CHIP_POLARIS12:
3336                 adev->mode_info.num_crtc = 5;
3337                 adev->mode_info.num_hpd = 5;
3338                 adev->mode_info.num_dig = 5;
3339                 break;
3340         case CHIP_POLARIS10:
3341         case CHIP_VEGAM:
3342                 adev->mode_info.num_crtc = 6;
3343                 adev->mode_info.num_hpd = 6;
3344                 adev->mode_info.num_dig = 6;
3345                 break;
3346         case CHIP_VEGA10:
3347         case CHIP_VEGA12:
3348         case CHIP_VEGA20:
3349                 adev->mode_info.num_crtc = 6;
3350                 adev->mode_info.num_hpd = 6;
3351                 adev->mode_info.num_dig = 6;
3352                 break;
3353 #if defined(CONFIG_DRM_AMD_DC_DCN)
3354         case CHIP_RAVEN:
3355                 adev->mode_info.num_crtc = 4;
3356                 adev->mode_info.num_hpd = 4;
3357                 adev->mode_info.num_dig = 4;
3358                 break;
3359 #endif
3360         case CHIP_NAVI10:
3361         case CHIP_NAVI12:
3362                 adev->mode_info.num_crtc = 6;
3363                 adev->mode_info.num_hpd = 6;
3364                 adev->mode_info.num_dig = 6;
3365                 break;
3366         case CHIP_NAVI14:
3367                 adev->mode_info.num_crtc = 5;
3368                 adev->mode_info.num_hpd = 5;
3369                 adev->mode_info.num_dig = 5;
3370                 break;
3371         case CHIP_RENOIR:
3372                 adev->mode_info.num_crtc = 4;
3373                 adev->mode_info.num_hpd = 4;
3374                 adev->mode_info.num_dig = 4;
3375                 break;
3376         default:
3377                 DRM_ERROR("Unsupported ASIC type: 0x%X\n", adev->asic_type);
3378                 return -EINVAL;
3379         }
3380
3381         amdgpu_dm_set_irq_funcs(adev);
3382
3383         if (adev->mode_info.funcs == NULL)
3384                 adev->mode_info.funcs = &dm_display_funcs;
3385
3386         /*
3387          * Note: Do NOT change adev->audio_endpt_rreg and
3388          * adev->audio_endpt_wreg because they are initialised in
3389          * amdgpu_device_init()
3390          */
3391 #if defined(CONFIG_DEBUG_KERNEL_DC)
3392         device_create_file(
3393                 adev->ddev->dev,
3394                 &dev_attr_s3_debug);
3395 #endif
3396
3397         return 0;
3398 }
3399
3400 static bool modeset_required(struct drm_crtc_state *crtc_state,
3401                              struct dc_stream_state *new_stream,
3402                              struct dc_stream_state *old_stream)
3403 {
3404         if (!drm_atomic_crtc_needs_modeset(crtc_state))
3405                 return false;
3406
3407         if (!crtc_state->enable)
3408                 return false;
3409
3410         return crtc_state->active;
3411 }
3412
3413 static bool modereset_required(struct drm_crtc_state *crtc_state)
3414 {
3415         if (!drm_atomic_crtc_needs_modeset(crtc_state))
3416                 return false;
3417
3418         return !crtc_state->enable || !crtc_state->active;
3419 }
3420
3421 static void amdgpu_dm_encoder_destroy(struct drm_encoder *encoder)
3422 {
3423         drm_encoder_cleanup(encoder);
3424         kfree(encoder);
3425 }
3426
3427 static const struct drm_encoder_funcs amdgpu_dm_encoder_funcs = {
3428         .destroy = amdgpu_dm_encoder_destroy,
3429 };
3430
3431
3432 static int fill_dc_scaling_info(const struct drm_plane_state *state,
3433                                 struct dc_scaling_info *scaling_info)
3434 {
3435         int scale_w, scale_h;
3436
3437         memset(scaling_info, 0, sizeof(*scaling_info));
3438
3439         /* Source is fixed 16.16 but we ignore mantissa for now... */
3440         scaling_info->src_rect.x = state->src_x >> 16;
3441         scaling_info->src_rect.y = state->src_y >> 16;
3442
3443         scaling_info->src_rect.width = state->src_w >> 16;
3444         if (scaling_info->src_rect.width == 0)
3445                 return -EINVAL;
3446
3447         scaling_info->src_rect.height = state->src_h >> 16;
3448         if (scaling_info->src_rect.height == 0)
3449                 return -EINVAL;
3450
3451         scaling_info->dst_rect.x = state->crtc_x;
3452         scaling_info->dst_rect.y = state->crtc_y;
3453
3454         if (state->crtc_w == 0)
3455                 return -EINVAL;
3456
3457         scaling_info->dst_rect.width = state->crtc_w;
3458
3459         if (state->crtc_h == 0)
3460                 return -EINVAL;
3461
3462         scaling_info->dst_rect.height = state->crtc_h;
3463
3464         /* DRM doesn't specify clipping on destination output. */
3465         scaling_info->clip_rect = scaling_info->dst_rect;
3466
3467         /* TODO: Validate scaling per-format with DC plane caps */
3468         scale_w = scaling_info->dst_rect.width * 1000 /
3469                   scaling_info->src_rect.width;
3470
3471         if (scale_w < 250 || scale_w > 16000)
3472                 return -EINVAL;
3473
3474         scale_h = scaling_info->dst_rect.height * 1000 /
3475                   scaling_info->src_rect.height;
3476
3477         if (scale_h < 250 || scale_h > 16000)
3478                 return -EINVAL;
3479
3480         /*
3481          * The "scaling_quality" can be ignored for now, quality = 0 has DC
3482          * assume reasonable defaults based on the format.
3483          */
3484
3485         return 0;
3486 }
3487
3488 static int get_fb_info(const struct amdgpu_framebuffer *amdgpu_fb,
3489                        uint64_t *tiling_flags, bool *tmz_surface)
3490 {
3491         struct amdgpu_bo *rbo = gem_to_amdgpu_bo(amdgpu_fb->base.obj[0]);
3492         int r = amdgpu_bo_reserve(rbo, false);
3493
3494         if (unlikely(r)) {
3495                 /* Don't show error message when returning -ERESTARTSYS */
3496                 if (r != -ERESTARTSYS)
3497                         DRM_ERROR("Unable to reserve buffer: %d\n", r);
3498                 return r;
3499         }
3500
3501         if (tiling_flags)
3502                 amdgpu_bo_get_tiling_flags(rbo, tiling_flags);
3503
3504         if (tmz_surface)
3505                 *tmz_surface = amdgpu_bo_encrypted(rbo);
3506
3507         amdgpu_bo_unreserve(rbo);
3508
3509         return r;
3510 }
3511
3512 static inline uint64_t get_dcc_address(uint64_t address, uint64_t tiling_flags)
3513 {
3514         uint32_t offset = AMDGPU_TILING_GET(tiling_flags, DCC_OFFSET_256B);
3515
3516         return offset ? (address + offset * 256) : 0;
3517 }
3518
3519 static int
3520 fill_plane_dcc_attributes(struct amdgpu_device *adev,
3521                           const struct amdgpu_framebuffer *afb,
3522                           const enum surface_pixel_format format,
3523                           const enum dc_rotation_angle rotation,
3524                           const struct plane_size *plane_size,
3525                           const union dc_tiling_info *tiling_info,
3526                           const uint64_t info,
3527                           struct dc_plane_dcc_param *dcc,
3528                           struct dc_plane_address *address,
3529                           bool force_disable_dcc)
3530 {
3531         struct dc *dc = adev->dm.dc;
3532         struct dc_dcc_surface_param input;
3533         struct dc_surface_dcc_cap output;
3534         uint32_t offset = AMDGPU_TILING_GET(info, DCC_OFFSET_256B);
3535         uint32_t i64b = AMDGPU_TILING_GET(info, DCC_INDEPENDENT_64B) != 0;
3536         uint64_t dcc_address;
3537
3538         memset(&input, 0, sizeof(input));
3539         memset(&output, 0, sizeof(output));
3540
3541         if (force_disable_dcc)
3542                 return 0;
3543
3544         if (!offset)
3545                 return 0;
3546
3547         if (format >= SURFACE_PIXEL_FORMAT_VIDEO_BEGIN)
3548                 return 0;
3549
3550         if (!dc->cap_funcs.get_dcc_compression_cap)
3551                 return -EINVAL;
3552
3553         input.format = format;
3554         input.surface_size.width = plane_size->surface_size.width;
3555         input.surface_size.height = plane_size->surface_size.height;
3556         input.swizzle_mode = tiling_info->gfx9.swizzle;
3557
3558         if (rotation == ROTATION_ANGLE_0 || rotation == ROTATION_ANGLE_180)
3559                 input.scan = SCAN_DIRECTION_HORIZONTAL;
3560         else if (rotation == ROTATION_ANGLE_90 || rotation == ROTATION_ANGLE_270)
3561                 input.scan = SCAN_DIRECTION_VERTICAL;
3562
3563         if (!dc->cap_funcs.get_dcc_compression_cap(dc, &input, &output))
3564                 return -EINVAL;
3565
3566         if (!output.capable)
3567                 return -EINVAL;
3568
3569         if (i64b == 0 && output.grph.rgb.independent_64b_blks != 0)
3570                 return -EINVAL;
3571
3572         dcc->enable = 1;
3573         dcc->meta_pitch =
3574                 AMDGPU_TILING_GET(info, DCC_PITCH_MAX) + 1;
3575         dcc->independent_64b_blks = i64b;
3576
3577         dcc_address = get_dcc_address(afb->address, info);
3578         address->grph.meta_addr.low_part = lower_32_bits(dcc_address);
3579         address->grph.meta_addr.high_part = upper_32_bits(dcc_address);
3580
3581         return 0;
3582 }
3583
3584 static int
3585 fill_plane_buffer_attributes(struct amdgpu_device *adev,
3586                              const struct amdgpu_framebuffer *afb,
3587                              const enum surface_pixel_format format,
3588                              const enum dc_rotation_angle rotation,
3589                              const uint64_t tiling_flags,
3590                              union dc_tiling_info *tiling_info,
3591                              struct plane_size *plane_size,
3592                              struct dc_plane_dcc_param *dcc,
3593                              struct dc_plane_address *address,
3594                              bool tmz_surface,
3595                              bool force_disable_dcc)
3596 {
3597         const struct drm_framebuffer *fb = &afb->base;
3598         int ret;
3599
3600         memset(tiling_info, 0, sizeof(*tiling_info));
3601         memset(plane_size, 0, sizeof(*plane_size));
3602         memset(dcc, 0, sizeof(*dcc));
3603         memset(address, 0, sizeof(*address));
3604
3605         address->tmz_surface = tmz_surface;
3606
3607         if (format < SURFACE_PIXEL_FORMAT_VIDEO_BEGIN) {
3608                 plane_size->surface_size.x = 0;
3609                 plane_size->surface_size.y = 0;
3610                 plane_size->surface_size.width = fb->width;
3611                 plane_size->surface_size.height = fb->height;
3612                 plane_size->surface_pitch =
3613                         fb->pitches[0] / fb->format->cpp[0];
3614
3615                 address->type = PLN_ADDR_TYPE_GRAPHICS;
3616                 address->grph.addr.low_part = lower_32_bits(afb->address);
3617                 address->grph.addr.high_part = upper_32_bits(afb->address);
3618         } else if (format < SURFACE_PIXEL_FORMAT_INVALID) {
3619                 uint64_t chroma_addr = afb->address + fb->offsets[1];
3620
3621                 plane_size->surface_size.x = 0;
3622                 plane_size->surface_size.y = 0;
3623                 plane_size->surface_size.width = fb->width;
3624                 plane_size->surface_size.height = fb->height;
3625                 plane_size->surface_pitch =
3626                         fb->pitches[0] / fb->format->cpp[0];
3627
3628                 plane_size->chroma_size.x = 0;
3629                 plane_size->chroma_size.y = 0;
3630                 /* TODO: set these based on surface format */
3631                 plane_size->chroma_size.width = fb->width / 2;
3632                 plane_size->chroma_size.height = fb->height / 2;
3633
3634                 plane_size->chroma_pitch =
3635                         fb->pitches[1] / fb->format->cpp[1];
3636
3637                 address->type = PLN_ADDR_TYPE_VIDEO_PROGRESSIVE;
3638                 address->video_progressive.luma_addr.low_part =
3639                         lower_32_bits(afb->address);
3640                 address->video_progressive.luma_addr.high_part =
3641                         upper_32_bits(afb->address);
3642                 address->video_progressive.chroma_addr.low_part =
3643                         lower_32_bits(chroma_addr);
3644                 address->video_progressive.chroma_addr.high_part =
3645                         upper_32_bits(chroma_addr);
3646         }
3647
3648         /* Fill GFX8 params */
3649         if (AMDGPU_TILING_GET(tiling_flags, ARRAY_MODE) == DC_ARRAY_2D_TILED_THIN1) {
3650                 unsigned int bankw, bankh, mtaspect, tile_split, num_banks;
3651
3652                 bankw = AMDGPU_TILING_GET(tiling_flags, BANK_WIDTH);
3653                 bankh = AMDGPU_TILING_GET(tiling_flags, BANK_HEIGHT);
3654                 mtaspect = AMDGPU_TILING_GET(tiling_flags, MACRO_TILE_ASPECT);
3655                 tile_split = AMDGPU_TILING_GET(tiling_flags, TILE_SPLIT);
3656                 num_banks = AMDGPU_TILING_GET(tiling_flags, NUM_BANKS);
3657
3658                 /* XXX fix me for VI */
3659                 tiling_info->gfx8.num_banks = num_banks;
3660                 tiling_info->gfx8.array_mode =
3661                                 DC_ARRAY_2D_TILED_THIN1;
3662                 tiling_info->gfx8.tile_split = tile_split;
3663                 tiling_info->gfx8.bank_width = bankw;
3664                 tiling_info->gfx8.bank_height = bankh;
3665                 tiling_info->gfx8.tile_aspect = mtaspect;
3666                 tiling_info->gfx8.tile_mode =
3667                                 DC_ADDR_SURF_MICRO_TILING_DISPLAY;
3668         } else if (AMDGPU_TILING_GET(tiling_flags, ARRAY_MODE)
3669                         == DC_ARRAY_1D_TILED_THIN1) {
3670                 tiling_info->gfx8.array_mode = DC_ARRAY_1D_TILED_THIN1;
3671         }
3672
3673         tiling_info->gfx8.pipe_config =
3674                         AMDGPU_TILING_GET(tiling_flags, PIPE_CONFIG);
3675
3676         if (adev->asic_type == CHIP_VEGA10 ||
3677             adev->asic_type == CHIP_VEGA12 ||
3678             adev->asic_type == CHIP_VEGA20 ||
3679             adev->asic_type == CHIP_NAVI10 ||
3680             adev->asic_type == CHIP_NAVI14 ||
3681             adev->asic_type == CHIP_NAVI12 ||
3682             adev->asic_type == CHIP_RENOIR ||
3683             adev->asic_type == CHIP_RAVEN) {
3684                 /* Fill GFX9 params */
3685                 tiling_info->gfx9.num_pipes =
3686                         adev->gfx.config.gb_addr_config_fields.num_pipes;
3687                 tiling_info->gfx9.num_banks =
3688                         adev->gfx.config.gb_addr_config_fields.num_banks;
3689                 tiling_info->gfx9.pipe_interleave =
3690                         adev->gfx.config.gb_addr_config_fields.pipe_interleave_size;
3691                 tiling_info->gfx9.num_shader_engines =
3692                         adev->gfx.config.gb_addr_config_fields.num_se;
3693                 tiling_info->gfx9.max_compressed_frags =
3694                         adev->gfx.config.gb_addr_config_fields.max_compress_frags;
3695                 tiling_info->gfx9.num_rb_per_se =
3696                         adev->gfx.config.gb_addr_config_fields.num_rb_per_se;
3697                 tiling_info->gfx9.swizzle =
3698                         AMDGPU_TILING_GET(tiling_flags, SWIZZLE_MODE);
3699                 tiling_info->gfx9.shaderEnable = 1;
3700
3701                 ret = fill_plane_dcc_attributes(adev, afb, format, rotation,
3702                                                 plane_size, tiling_info,
3703                                                 tiling_flags, dcc, address,
3704                                                 force_disable_dcc);
3705                 if (ret)
3706                         return ret;
3707         }
3708
3709         return 0;
3710 }
3711
3712 static void
3713 fill_blending_from_plane_state(const struct drm_plane_state *plane_state,
3714                                bool *per_pixel_alpha, bool *global_alpha,
3715                                int *global_alpha_value)
3716 {
3717         *per_pixel_alpha = false;
3718         *global_alpha = false;
3719         *global_alpha_value = 0xff;
3720
3721         if (plane_state->plane->type != DRM_PLANE_TYPE_OVERLAY)
3722                 return;
3723
3724         if (plane_state->pixel_blend_mode == DRM_MODE_BLEND_PREMULTI) {
3725                 static const uint32_t alpha_formats[] = {
3726                         DRM_FORMAT_ARGB8888,
3727                         DRM_FORMAT_RGBA8888,
3728                         DRM_FORMAT_ABGR8888,
3729                 };
3730                 uint32_t format = plane_state->fb->format->format;
3731                 unsigned int i;
3732
3733                 for (i = 0; i < ARRAY_SIZE(alpha_formats); ++i) {
3734                         if (format == alpha_formats[i]) {
3735                                 *per_pixel_alpha = true;
3736                                 break;
3737                         }
3738                 }
3739         }
3740
3741         if (plane_state->alpha < 0xffff) {
3742                 *global_alpha = true;
3743                 *global_alpha_value = plane_state->alpha >> 8;
3744         }
3745 }
3746
3747 static int
3748 fill_plane_color_attributes(const struct drm_plane_state *plane_state,
3749                             const enum surface_pixel_format format,
3750                             enum dc_color_space *color_space)
3751 {
3752         bool full_range;
3753
3754         *color_space = COLOR_SPACE_SRGB;
3755
3756         /* DRM color properties only affect non-RGB formats. */
3757         if (format < SURFACE_PIXEL_FORMAT_VIDEO_BEGIN)
3758                 return 0;
3759
3760         full_range = (plane_state->color_range == DRM_COLOR_YCBCR_FULL_RANGE);
3761
3762         switch (plane_state->color_encoding) {
3763         case DRM_COLOR_YCBCR_BT601:
3764                 if (full_range)
3765                         *color_space = COLOR_SPACE_YCBCR601;
3766                 else
3767                         *color_space = COLOR_SPACE_YCBCR601_LIMITED;
3768                 break;
3769
3770         case DRM_COLOR_YCBCR_BT709:
3771                 if (full_range)
3772                         *color_space = COLOR_SPACE_YCBCR709;
3773                 else
3774                         *color_space = COLOR_SPACE_YCBCR709_LIMITED;
3775                 break;
3776
3777         case DRM_COLOR_YCBCR_BT2020:
3778                 if (full_range)
3779                         *color_space = COLOR_SPACE_2020_YCBCR;
3780                 else
3781                         return -EINVAL;
3782                 break;
3783
3784         default:
3785                 return -EINVAL;
3786         }
3787
3788         return 0;
3789 }
3790
3791 static int
3792 fill_dc_plane_info_and_addr(struct amdgpu_device *adev,
3793                             const struct drm_plane_state *plane_state,
3794                             const uint64_t tiling_flags,
3795                             struct dc_plane_info *plane_info,
3796                             struct dc_plane_address *address,
3797                             bool tmz_surface,
3798                             bool force_disable_dcc)
3799 {
3800         const struct drm_framebuffer *fb = plane_state->fb;
3801         const struct amdgpu_framebuffer *afb =
3802                 to_amdgpu_framebuffer(plane_state->fb);
3803         struct drm_format_name_buf format_name;
3804         int ret;
3805
3806         memset(plane_info, 0, sizeof(*plane_info));
3807
3808         switch (fb->format->format) {
3809         case DRM_FORMAT_C8:
3810                 plane_info->format =
3811                         SURFACE_PIXEL_FORMAT_GRPH_PALETA_256_COLORS;
3812                 break;
3813         case DRM_FORMAT_RGB565:
3814                 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_RGB565;
3815                 break;
3816         case DRM_FORMAT_XRGB8888:
3817         case DRM_FORMAT_ARGB8888:
3818                 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ARGB8888;
3819                 break;
3820         case DRM_FORMAT_XRGB2101010:
3821         case DRM_FORMAT_ARGB2101010:
3822                 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ARGB2101010;
3823                 break;
3824         case DRM_FORMAT_XBGR2101010:
3825         case DRM_FORMAT_ABGR2101010:
3826                 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ABGR2101010;
3827                 break;
3828         case DRM_FORMAT_XBGR8888:
3829         case DRM_FORMAT_ABGR8888:
3830                 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ABGR8888;
3831                 break;
3832         case DRM_FORMAT_NV21:
3833                 plane_info->format = SURFACE_PIXEL_FORMAT_VIDEO_420_YCbCr;
3834                 break;
3835         case DRM_FORMAT_NV12:
3836                 plane_info->format = SURFACE_PIXEL_FORMAT_VIDEO_420_YCrCb;
3837                 break;
3838         case DRM_FORMAT_P010:
3839                 plane_info->format = SURFACE_PIXEL_FORMAT_VIDEO_420_10bpc_YCrCb;
3840                 break;
3841         case DRM_FORMAT_XRGB16161616F:
3842         case DRM_FORMAT_ARGB16161616F:
3843                 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ARGB16161616F;
3844                 break;
3845         case DRM_FORMAT_XBGR16161616F:
3846         case DRM_FORMAT_ABGR16161616F:
3847                 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ABGR16161616F;
3848                 break;
3849         default:
3850                 DRM_ERROR(
3851                         "Unsupported screen format %s\n",
3852                         drm_get_format_name(fb->format->format, &format_name));
3853                 return -EINVAL;
3854         }
3855
3856         switch (plane_state->rotation & DRM_MODE_ROTATE_MASK) {
3857         case DRM_MODE_ROTATE_0:
3858                 plane_info->rotation = ROTATION_ANGLE_0;
3859                 break;
3860         case DRM_MODE_ROTATE_90:
3861                 plane_info->rotation = ROTATION_ANGLE_90;
3862                 break;
3863         case DRM_MODE_ROTATE_180:
3864                 plane_info->rotation = ROTATION_ANGLE_180;
3865                 break;
3866         case DRM_MODE_ROTATE_270:
3867                 plane_info->rotation = ROTATION_ANGLE_270;
3868                 break;
3869         default:
3870                 plane_info->rotation = ROTATION_ANGLE_0;
3871                 break;
3872         }
3873
3874         plane_info->visible = true;
3875         plane_info->stereo_format = PLANE_STEREO_FORMAT_NONE;
3876
3877         plane_info->layer_index = 0;
3878
3879         ret = fill_plane_color_attributes(plane_state, plane_info->format,
3880                                           &plane_info->color_space);
3881         if (ret)
3882                 return ret;
3883
3884         ret = fill_plane_buffer_attributes(adev, afb, plane_info->format,
3885                                            plane_info->rotation, tiling_flags,
3886                                            &plane_info->tiling_info,
3887                                            &plane_info->plane_size,
3888                                            &plane_info->dcc, address, tmz_surface,
3889                                            force_disable_dcc);
3890         if (ret)
3891                 return ret;
3892
3893         fill_blending_from_plane_state(
3894                 plane_state, &plane_info->per_pixel_alpha,
3895                 &plane_info->global_alpha, &plane_info->global_alpha_value);
3896
3897         return 0;
3898 }
3899
3900 static int fill_dc_plane_attributes(struct amdgpu_device *adev,
3901                                     struct dc_plane_state *dc_plane_state,
3902                                     struct drm_plane_state *plane_state,
3903                                     struct drm_crtc_state *crtc_state)
3904 {
3905         struct dm_crtc_state *dm_crtc_state = to_dm_crtc_state(crtc_state);
3906         const struct amdgpu_framebuffer *amdgpu_fb =
3907                 to_amdgpu_framebuffer(plane_state->fb);
3908         struct dc_scaling_info scaling_info;
3909         struct dc_plane_info plane_info;
3910         uint64_t tiling_flags;
3911         int ret;
3912         bool tmz_surface = false;
3913         bool force_disable_dcc = false;
3914
3915         ret = fill_dc_scaling_info(plane_state, &scaling_info);
3916         if (ret)
3917                 return ret;
3918
3919         dc_plane_state->src_rect = scaling_info.src_rect;
3920         dc_plane_state->dst_rect = scaling_info.dst_rect;
3921         dc_plane_state->clip_rect = scaling_info.clip_rect;
3922         dc_plane_state->scaling_quality = scaling_info.scaling_quality;
3923
3924         ret = get_fb_info(amdgpu_fb, &tiling_flags, &tmz_surface);
3925         if (ret)
3926                 return ret;
3927
3928         force_disable_dcc = adev->asic_type == CHIP_RAVEN && adev->in_suspend;
3929         ret = fill_dc_plane_info_and_addr(adev, plane_state, tiling_flags,
3930                                           &plane_info,
3931                                           &dc_plane_state->address,
3932                                           tmz_surface,
3933                                           force_disable_dcc);
3934         if (ret)
3935                 return ret;
3936
3937         dc_plane_state->format = plane_info.format;
3938         dc_plane_state->color_space = plane_info.color_space;
3939         dc_plane_state->format = plane_info.format;
3940         dc_plane_state->plane_size = plane_info.plane_size;
3941         dc_plane_state->rotation = plane_info.rotation;
3942         dc_plane_state->horizontal_mirror = plane_info.horizontal_mirror;
3943         dc_plane_state->stereo_format = plane_info.stereo_format;
3944         dc_plane_state->tiling_info = plane_info.tiling_info;
3945         dc_plane_state->visible = plane_info.visible;
3946         dc_plane_state->per_pixel_alpha = plane_info.per_pixel_alpha;
3947         dc_plane_state->global_alpha = plane_info.global_alpha;
3948         dc_plane_state->global_alpha_value = plane_info.global_alpha_value;
3949         dc_plane_state->dcc = plane_info.dcc;
3950         dc_plane_state->layer_index = plane_info.layer_index; // Always returns 0
3951
3952         /*
3953          * Always set input transfer function, since plane state is refreshed
3954          * every time.
3955          */
3956         ret = amdgpu_dm_update_plane_color_mgmt(dm_crtc_state, dc_plane_state);
3957         if (ret)
3958                 return ret;
3959
3960         return 0;
3961 }
3962
3963 static void update_stream_scaling_settings(const struct drm_display_mode *mode,
3964                                            const struct dm_connector_state *dm_state,
3965                                            struct dc_stream_state *stream)
3966 {
3967         enum amdgpu_rmx_type rmx_type;
3968
3969         struct rect src = { 0 }; /* viewport in composition space*/
3970         struct rect dst = { 0 }; /* stream addressable area */
3971
3972         /* no mode. nothing to be done */
3973         if (!mode)
3974                 return;
3975
3976         /* Full screen scaling by default */
3977         src.width = mode->hdisplay;
3978         src.height = mode->vdisplay;
3979         dst.width = stream->timing.h_addressable;
3980         dst.height = stream->timing.v_addressable;
3981
3982         if (dm_state) {
3983                 rmx_type = dm_state->scaling;
3984                 if (rmx_type == RMX_ASPECT || rmx_type == RMX_OFF) {
3985                         if (src.width * dst.height <
3986                                         src.height * dst.width) {
3987                                 /* height needs less upscaling/more downscaling */
3988                                 dst.width = src.width *
3989                                                 dst.height / src.height;
3990                         } else {
3991                                 /* width needs less upscaling/more downscaling */
3992                                 dst.height = src.height *
3993                                                 dst.width / src.width;
3994                         }
3995                 } else if (rmx_type == RMX_CENTER) {
3996                         dst = src;
3997                 }
3998
3999                 dst.x = (stream->timing.h_addressable - dst.width) / 2;
4000                 dst.y = (stream->timing.v_addressable - dst.height) / 2;
4001
4002                 if (dm_state->underscan_enable) {
4003                         dst.x += dm_state->underscan_hborder / 2;
4004                         dst.y += dm_state->underscan_vborder / 2;
4005                         dst.width -= dm_state->underscan_hborder;
4006                         dst.height -= dm_state->underscan_vborder;
4007                 }
4008         }
4009
4010         stream->src = src;
4011         stream->dst = dst;
4012
4013         DRM_DEBUG_DRIVER("Destination Rectangle x:%d  y:%d  width:%d  height:%d\n",
4014                         dst.x, dst.y, dst.width, dst.height);
4015
4016 }
4017
4018 static enum dc_color_depth
4019 convert_color_depth_from_display_info(const struct drm_connector *connector,
4020                                       bool is_y420, int requested_bpc)
4021 {
4022         uint8_t bpc;
4023
4024         if (is_y420) {
4025                 bpc = 8;
4026
4027                 /* Cap display bpc based on HDMI 2.0 HF-VSDB */
4028                 if (connector->display_info.hdmi.y420_dc_modes & DRM_EDID_YCBCR420_DC_48)
4029                         bpc = 16;
4030                 else if (connector->display_info.hdmi.y420_dc_modes & DRM_EDID_YCBCR420_DC_36)
4031                         bpc = 12;
4032                 else if (connector->display_info.hdmi.y420_dc_modes & DRM_EDID_YCBCR420_DC_30)
4033                         bpc = 10;
4034         } else {
4035                 bpc = (uint8_t)connector->display_info.bpc;
4036                 /* Assume 8 bpc by default if no bpc is specified. */
4037                 bpc = bpc ? bpc : 8;
4038         }
4039
4040         if (requested_bpc > 0) {
4041                 /*
4042                  * Cap display bpc based on the user requested value.
4043                  *
4044                  * The value for state->max_bpc may not correctly updated
4045                  * depending on when the connector gets added to the state
4046                  * or if this was called outside of atomic check, so it
4047                  * can't be used directly.
4048                  */
4049                 bpc = min_t(u8, bpc, requested_bpc);
4050
4051                 /* Round down to the nearest even number. */
4052                 bpc = bpc - (bpc & 1);
4053         }
4054
4055         switch (bpc) {
4056         case 0:
4057                 /*
4058                  * Temporary Work around, DRM doesn't parse color depth for
4059                  * EDID revision before 1.4
4060                  * TODO: Fix edid parsing
4061                  */
4062                 return COLOR_DEPTH_888;
4063         case 6:
4064                 return COLOR_DEPTH_666;
4065         case 8:
4066                 return COLOR_DEPTH_888;
4067         case 10:
4068                 return COLOR_DEPTH_101010;
4069         case 12:
4070                 return COLOR_DEPTH_121212;
4071         case 14:
4072                 return COLOR_DEPTH_141414;
4073         case 16:
4074                 return COLOR_DEPTH_161616;
4075         default:
4076                 return COLOR_DEPTH_UNDEFINED;
4077         }
4078 }
4079
4080 static enum dc_aspect_ratio
4081 get_aspect_ratio(const struct drm_display_mode *mode_in)
4082 {
4083         /* 1-1 mapping, since both enums follow the HDMI spec. */
4084         return (enum dc_aspect_ratio) mode_in->picture_aspect_ratio;
4085 }
4086
4087 static enum dc_color_space
4088 get_output_color_space(const struct dc_crtc_timing *dc_crtc_timing)
4089 {
4090         enum dc_color_space color_space = COLOR_SPACE_SRGB;
4091
4092         switch (dc_crtc_timing->pixel_encoding) {
4093         case PIXEL_ENCODING_YCBCR422:
4094         case PIXEL_ENCODING_YCBCR444:
4095         case PIXEL_ENCODING_YCBCR420:
4096         {
4097                 /*
4098                  * 27030khz is the separation point between HDTV and SDTV
4099                  * according to HDMI spec, we use YCbCr709 and YCbCr601
4100                  * respectively
4101                  */
4102                 if (dc_crtc_timing->pix_clk_100hz > 270300) {
4103                         if (dc_crtc_timing->flags.Y_ONLY)
4104                                 color_space =
4105                                         COLOR_SPACE_YCBCR709_LIMITED;
4106                         else
4107                                 color_space = COLOR_SPACE_YCBCR709;
4108                 } else {
4109                         if (dc_crtc_timing->flags.Y_ONLY)
4110                                 color_space =
4111                                         COLOR_SPACE_YCBCR601_LIMITED;
4112                         else
4113                                 color_space = COLOR_SPACE_YCBCR601;
4114                 }
4115
4116         }
4117         break;
4118         case PIXEL_ENCODING_RGB:
4119                 color_space = COLOR_SPACE_SRGB;
4120                 break;
4121
4122         default:
4123                 WARN_ON(1);
4124                 break;
4125         }
4126
4127         return color_space;
4128 }
4129
4130 static bool adjust_colour_depth_from_display_info(
4131         struct dc_crtc_timing *timing_out,
4132         const struct drm_display_info *info)
4133 {
4134         enum dc_color_depth depth = timing_out->display_color_depth;
4135         int normalized_clk;
4136         do {
4137                 normalized_clk = timing_out->pix_clk_100hz / 10;
4138                 /* YCbCr 4:2:0 requires additional adjustment of 1/2 */
4139                 if (timing_out->pixel_encoding == PIXEL_ENCODING_YCBCR420)
4140                         normalized_clk /= 2;
4141                 /* Adjusting pix clock following on HDMI spec based on colour depth */
4142                 switch (depth) {
4143                 case COLOR_DEPTH_888:
4144                         break;
4145                 case COLOR_DEPTH_101010:
4146                         normalized_clk = (normalized_clk * 30) / 24;
4147                         break;
4148                 case COLOR_DEPTH_121212:
4149                         normalized_clk = (normalized_clk * 36) / 24;
4150                         break;
4151                 case COLOR_DEPTH_161616:
4152                         normalized_clk = (normalized_clk * 48) / 24;
4153                         break;
4154                 default:
4155                         /* The above depths are the only ones valid for HDMI. */
4156                         return false;
4157                 }
4158                 if (normalized_clk <= info->max_tmds_clock) {
4159                         timing_out->display_color_depth = depth;
4160                         return true;
4161                 }
4162         } while (--depth > COLOR_DEPTH_666);
4163         return false;
4164 }
4165
4166 static void fill_stream_properties_from_drm_display_mode(
4167         struct dc_stream_state *stream,
4168         const struct drm_display_mode *mode_in,
4169         const struct drm_connector *connector,
4170         const struct drm_connector_state *connector_state,
4171         const struct dc_stream_state *old_stream,
4172         int requested_bpc)
4173 {
4174         struct dc_crtc_timing *timing_out = &stream->timing;
4175         const struct drm_display_info *info = &connector->display_info;
4176         struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
4177         struct hdmi_vendor_infoframe hv_frame;
4178         struct hdmi_avi_infoframe avi_frame;
4179
4180         memset(&hv_frame, 0, sizeof(hv_frame));
4181         memset(&avi_frame, 0, sizeof(avi_frame));
4182
4183         timing_out->h_border_left = 0;
4184         timing_out->h_border_right = 0;
4185         timing_out->v_border_top = 0;
4186         timing_out->v_border_bottom = 0;
4187         /* TODO: un-hardcode */
4188         if (drm_mode_is_420_only(info, mode_in)
4189                         && stream->signal == SIGNAL_TYPE_HDMI_TYPE_A)
4190                 timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR420;
4191         else if (drm_mode_is_420_also(info, mode_in)
4192                         && aconnector->force_yuv420_output)
4193                 timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR420;
4194         else if ((connector->display_info.color_formats & DRM_COLOR_FORMAT_YCRCB444)
4195                         && stream->signal == SIGNAL_TYPE_HDMI_TYPE_A)
4196                 timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR444;
4197         else
4198                 timing_out->pixel_encoding = PIXEL_ENCODING_RGB;
4199
4200         timing_out->timing_3d_format = TIMING_3D_FORMAT_NONE;
4201         timing_out->display_color_depth = convert_color_depth_from_display_info(
4202                 connector,
4203                 (timing_out->pixel_encoding == PIXEL_ENCODING_YCBCR420),
4204                 requested_bpc);
4205         timing_out->scan_type = SCANNING_TYPE_NODATA;
4206         timing_out->hdmi_vic = 0;
4207
4208         if(old_stream) {
4209                 timing_out->vic = old_stream->timing.vic;
4210                 timing_out->flags.HSYNC_POSITIVE_POLARITY = old_stream->timing.flags.HSYNC_POSITIVE_POLARITY;
4211                 timing_out->flags.VSYNC_POSITIVE_POLARITY = old_stream->timing.flags.VSYNC_POSITIVE_POLARITY;
4212         } else {
4213                 timing_out->vic = drm_match_cea_mode(mode_in);
4214                 if (mode_in->flags & DRM_MODE_FLAG_PHSYNC)
4215                         timing_out->flags.HSYNC_POSITIVE_POLARITY = 1;
4216                 if (mode_in->flags & DRM_MODE_FLAG_PVSYNC)
4217                         timing_out->flags.VSYNC_POSITIVE_POLARITY = 1;
4218         }
4219
4220         if (stream->signal == SIGNAL_TYPE_HDMI_TYPE_A) {
4221                 drm_hdmi_avi_infoframe_from_display_mode(&avi_frame, (struct drm_connector *)connector, mode_in);
4222                 timing_out->vic = avi_frame.video_code;
4223                 drm_hdmi_vendor_infoframe_from_display_mode(&hv_frame, (struct drm_connector *)connector, mode_in);
4224                 timing_out->hdmi_vic = hv_frame.vic;
4225         }
4226
4227         timing_out->h_addressable = mode_in->crtc_hdisplay;
4228         timing_out->h_total = mode_in->crtc_htotal;
4229         timing_out->h_sync_width =
4230                 mode_in->crtc_hsync_end - mode_in->crtc_hsync_start;
4231         timing_out->h_front_porch =
4232                 mode_in->crtc_hsync_start - mode_in->crtc_hdisplay;
4233         timing_out->v_total = mode_in->crtc_vtotal;
4234         timing_out->v_addressable = mode_in->crtc_vdisplay;
4235         timing_out->v_front_porch =
4236                 mode_in->crtc_vsync_start - mode_in->crtc_vdisplay;
4237         timing_out->v_sync_width =
4238                 mode_in->crtc_vsync_end - mode_in->crtc_vsync_start;
4239         timing_out->pix_clk_100hz = mode_in->crtc_clock * 10;
4240         timing_out->aspect_ratio = get_aspect_ratio(mode_in);
4241
4242         stream->output_color_space = get_output_color_space(timing_out);
4243
4244         stream->out_transfer_func->type = TF_TYPE_PREDEFINED;
4245         stream->out_transfer_func->tf = TRANSFER_FUNCTION_SRGB;
4246         if (stream->signal == SIGNAL_TYPE_HDMI_TYPE_A) {
4247                 if (!adjust_colour_depth_from_display_info(timing_out, info) &&
4248                     drm_mode_is_420_also(info, mode_in) &&
4249                     timing_out->pixel_encoding != PIXEL_ENCODING_YCBCR420) {
4250                         timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR420;
4251                         adjust_colour_depth_from_display_info(timing_out, info);
4252                 }
4253         }
4254 }
4255
4256 static void fill_audio_info(struct audio_info *audio_info,
4257                             const struct drm_connector *drm_connector,
4258                             const struct dc_sink *dc_sink)
4259 {
4260         int i = 0;
4261         int cea_revision = 0;
4262         const struct dc_edid_caps *edid_caps = &dc_sink->edid_caps;
4263
4264         audio_info->manufacture_id = edid_caps->manufacturer_id;
4265         audio_info->product_id = edid_caps->product_id;
4266
4267         cea_revision = drm_connector->display_info.cea_rev;
4268
4269         strscpy(audio_info->display_name,
4270                 edid_caps->display_name,
4271                 AUDIO_INFO_DISPLAY_NAME_SIZE_IN_CHARS);
4272
4273         if (cea_revision >= 3) {
4274                 audio_info->mode_count = edid_caps->audio_mode_count;
4275
4276                 for (i = 0; i < audio_info->mode_count; ++i) {
4277                         audio_info->modes[i].format_code =
4278                                         (enum audio_format_code)
4279                                         (edid_caps->audio_modes[i].format_code);
4280                         audio_info->modes[i].channel_count =
4281                                         edid_caps->audio_modes[i].channel_count;
4282                         audio_info->modes[i].sample_rates.all =
4283                                         edid_caps->audio_modes[i].sample_rate;
4284                         audio_info->modes[i].sample_size =
4285                                         edid_caps->audio_modes[i].sample_size;
4286                 }
4287         }
4288
4289         audio_info->flags.all = edid_caps->speaker_flags;
4290
4291         /* TODO: We only check for the progressive mode, check for interlace mode too */
4292         if (drm_connector->latency_present[0]) {
4293                 audio_info->video_latency = drm_connector->video_latency[0];
4294                 audio_info->audio_latency = drm_connector->audio_latency[0];
4295         }
4296
4297         /* TODO: For DP, video and audio latency should be calculated from DPCD caps */
4298
4299 }
4300
4301 static void
4302 copy_crtc_timing_for_drm_display_mode(const struct drm_display_mode *src_mode,
4303                                       struct drm_display_mode *dst_mode)
4304 {
4305         dst_mode->crtc_hdisplay = src_mode->crtc_hdisplay;
4306         dst_mode->crtc_vdisplay = src_mode->crtc_vdisplay;
4307         dst_mode->crtc_clock = src_mode->crtc_clock;
4308         dst_mode->crtc_hblank_start = src_mode->crtc_hblank_start;
4309         dst_mode->crtc_hblank_end = src_mode->crtc_hblank_end;
4310         dst_mode->crtc_hsync_start =  src_mode->crtc_hsync_start;
4311         dst_mode->crtc_hsync_end = src_mode->crtc_hsync_end;
4312         dst_mode->crtc_htotal = src_mode->crtc_htotal;
4313         dst_mode->crtc_hskew = src_mode->crtc_hskew;
4314         dst_mode->crtc_vblank_start = src_mode->crtc_vblank_start;
4315         dst_mode->crtc_vblank_end = src_mode->crtc_vblank_end;
4316         dst_mode->crtc_vsync_start = src_mode->crtc_vsync_start;
4317         dst_mode->crtc_vsync_end = src_mode->crtc_vsync_end;
4318         dst_mode->crtc_vtotal = src_mode->crtc_vtotal;
4319 }
4320
4321 static void
4322 decide_crtc_timing_for_drm_display_mode(struct drm_display_mode *drm_mode,
4323                                         const struct drm_display_mode *native_mode,
4324                                         bool scale_enabled)
4325 {
4326         if (scale_enabled) {
4327                 copy_crtc_timing_for_drm_display_mode(native_mode, drm_mode);
4328         } else if (native_mode->clock == drm_mode->clock &&
4329                         native_mode->htotal == drm_mode->htotal &&
4330                         native_mode->vtotal == drm_mode->vtotal) {
4331                 copy_crtc_timing_for_drm_display_mode(native_mode, drm_mode);
4332         } else {
4333                 /* no scaling nor amdgpu inserted, no need to patch */
4334         }
4335 }
4336
4337 static struct dc_sink *
4338 create_fake_sink(struct amdgpu_dm_connector *aconnector)
4339 {
4340         struct dc_sink_init_data sink_init_data = { 0 };
4341         struct dc_sink *sink = NULL;
4342         sink_init_data.link = aconnector->dc_link;
4343         sink_init_data.sink_signal = aconnector->dc_link->connector_signal;
4344
4345         sink = dc_sink_create(&sink_init_data);
4346         if (!sink) {
4347                 DRM_ERROR("Failed to create sink!\n");
4348                 return NULL;
4349         }
4350         sink->sink_signal = SIGNAL_TYPE_VIRTUAL;
4351
4352         return sink;
4353 }
4354
4355 static void set_multisync_trigger_params(
4356                 struct dc_stream_state *stream)
4357 {
4358         if (stream->triggered_crtc_reset.enabled) {
4359                 stream->triggered_crtc_reset.event = CRTC_EVENT_VSYNC_RISING;
4360                 stream->triggered_crtc_reset.delay = TRIGGER_DELAY_NEXT_LINE;
4361         }
4362 }
4363
4364 static void set_master_stream(struct dc_stream_state *stream_set[],
4365                               int stream_count)
4366 {
4367         int j, highest_rfr = 0, master_stream = 0;
4368
4369         for (j = 0;  j < stream_count; j++) {
4370                 if (stream_set[j] && stream_set[j]->triggered_crtc_reset.enabled) {
4371                         int refresh_rate = 0;
4372
4373                         refresh_rate = (stream_set[j]->timing.pix_clk_100hz*100)/
4374                                 (stream_set[j]->timing.h_total*stream_set[j]->timing.v_total);
4375                         if (refresh_rate > highest_rfr) {
4376                                 highest_rfr = refresh_rate;
4377                                 master_stream = j;
4378                         }
4379                 }
4380         }
4381         for (j = 0;  j < stream_count; j++) {
4382                 if (stream_set[j])
4383                         stream_set[j]->triggered_crtc_reset.event_source = stream_set[master_stream];
4384         }
4385 }
4386
4387 static void dm_enable_per_frame_crtc_master_sync(struct dc_state *context)
4388 {
4389         int i = 0;
4390
4391         if (context->stream_count < 2)
4392                 return;
4393         for (i = 0; i < context->stream_count ; i++) {
4394                 if (!context->streams[i])
4395                         continue;
4396                 /*
4397                  * TODO: add a function to read AMD VSDB bits and set
4398                  * crtc_sync_master.multi_sync_enabled flag
4399                  * For now it's set to false
4400                  */
4401                 set_multisync_trigger_params(context->streams[i]);
4402         }
4403         set_master_stream(context->streams, context->stream_count);
4404 }
4405
4406 static struct dc_stream_state *
4407 create_stream_for_sink(struct amdgpu_dm_connector *aconnector,
4408                        const struct drm_display_mode *drm_mode,
4409                        const struct dm_connector_state *dm_state,
4410                        const struct dc_stream_state *old_stream,
4411                        int requested_bpc)
4412 {
4413         struct drm_display_mode *preferred_mode = NULL;
4414         struct drm_connector *drm_connector;
4415         const struct drm_connector_state *con_state =
4416                 dm_state ? &dm_state->base : NULL;
4417         struct dc_stream_state *stream = NULL;
4418         struct drm_display_mode mode = *drm_mode;
4419         bool native_mode_found = false;
4420         bool scale = dm_state ? (dm_state->scaling != RMX_OFF) : false;
4421         int mode_refresh;
4422         int preferred_refresh = 0;
4423 #if defined(CONFIG_DRM_AMD_DC_DCN)
4424         struct dsc_dec_dpcd_caps dsc_caps;
4425 #endif
4426         uint32_t link_bandwidth_kbps;
4427
4428         struct dc_sink *sink = NULL;
4429         if (aconnector == NULL) {
4430                 DRM_ERROR("aconnector is NULL!\n");
4431                 return stream;
4432         }
4433
4434         drm_connector = &aconnector->base;
4435
4436         if (!aconnector->dc_sink) {
4437                 sink = create_fake_sink(aconnector);
4438                 if (!sink)
4439                         return stream;
4440         } else {
4441                 sink = aconnector->dc_sink;
4442                 dc_sink_retain(sink);
4443         }
4444
4445         stream = dc_create_stream_for_sink(sink);
4446
4447         if (stream == NULL) {
4448                 DRM_ERROR("Failed to create stream for sink!\n");
4449                 goto finish;
4450         }
4451
4452         stream->dm_stream_context = aconnector;
4453
4454         stream->timing.flags.LTE_340MCSC_SCRAMBLE =
4455                 drm_connector->display_info.hdmi.scdc.scrambling.low_rates;
4456
4457         list_for_each_entry(preferred_mode, &aconnector->base.modes, head) {
4458                 /* Search for preferred mode */
4459                 if (preferred_mode->type & DRM_MODE_TYPE_PREFERRED) {
4460                         native_mode_found = true;
4461                         break;
4462                 }
4463         }
4464         if (!native_mode_found)
4465                 preferred_mode = list_first_entry_or_null(
4466                                 &aconnector->base.modes,
4467                                 struct drm_display_mode,
4468                                 head);
4469
4470         mode_refresh = drm_mode_vrefresh(&mode);
4471
4472         if (preferred_mode == NULL) {
4473                 /*
4474                  * This may not be an error, the use case is when we have no
4475                  * usermode calls to reset and set mode upon hotplug. In this
4476                  * case, we call set mode ourselves to restore the previous mode
4477                  * and the modelist may not be filled in in time.
4478                  */
4479                 DRM_DEBUG_DRIVER("No preferred mode found\n");
4480         } else {
4481                 decide_crtc_timing_for_drm_display_mode(
4482                                 &mode, preferred_mode,
4483                                 dm_state ? (dm_state->scaling != RMX_OFF) : false);
4484                 preferred_refresh = drm_mode_vrefresh(preferred_mode);
4485         }
4486
4487         if (!dm_state)
4488                 drm_mode_set_crtcinfo(&mode, 0);
4489
4490         /*
4491         * If scaling is enabled and refresh rate didn't change
4492         * we copy the vic and polarities of the old timings
4493         */
4494         if (!scale || mode_refresh != preferred_refresh)
4495                 fill_stream_properties_from_drm_display_mode(stream,
4496                         &mode, &aconnector->base, con_state, NULL, requested_bpc);
4497         else
4498                 fill_stream_properties_from_drm_display_mode(stream,
4499                         &mode, &aconnector->base, con_state, old_stream, requested_bpc);
4500
4501         stream->timing.flags.DSC = 0;
4502
4503         if (aconnector->dc_link && sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT) {
4504 #if defined(CONFIG_DRM_AMD_DC_DCN)
4505                 dc_dsc_parse_dsc_dpcd(aconnector->dc_link->ctx->dc,
4506                                       aconnector->dc_link->dpcd_caps.dsc_caps.dsc_basic_caps.raw,
4507                                       aconnector->dc_link->dpcd_caps.dsc_caps.dsc_ext_caps.raw,
4508                                       &dsc_caps);
4509 #endif
4510                 link_bandwidth_kbps = dc_link_bandwidth_kbps(aconnector->dc_link,
4511                                                              dc_link_get_link_cap(aconnector->dc_link));
4512
4513 #if defined(CONFIG_DRM_AMD_DC_DCN)
4514                 if (dsc_caps.is_dsc_supported)
4515                         if (dc_dsc_compute_config(aconnector->dc_link->ctx->dc->res_pool->dscs[0],
4516                                                   &dsc_caps,
4517                                                   aconnector->dc_link->ctx->dc->debug.dsc_min_slice_height_override,
4518                                                   link_bandwidth_kbps,
4519                                                   &stream->timing,
4520                                                   &stream->timing.dsc_cfg))
4521                                 stream->timing.flags.DSC = 1;
4522 #endif
4523         }
4524
4525         update_stream_scaling_settings(&mode, dm_state, stream);
4526
4527         fill_audio_info(
4528                 &stream->audio_info,
4529                 drm_connector,
4530                 sink);
4531
4532         update_stream_signal(stream, sink);
4533
4534         if (stream->signal == SIGNAL_TYPE_HDMI_TYPE_A)
4535                 mod_build_hf_vsif_infopacket(stream, &stream->vsp_infopacket, false, false);
4536         if (stream->link->psr_settings.psr_feature_enabled)     {
4537                 struct dc  *core_dc = stream->link->ctx->dc;
4538
4539                 if (dc_is_dmcu_initialized(core_dc)) {
4540                         //
4541                         // should decide stream support vsc sdp colorimetry capability
4542                         // before building vsc info packet
4543                         //
4544                         stream->use_vsc_sdp_for_colorimetry = false;
4545                         if (aconnector->dc_sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT_MST) {
4546                                 stream->use_vsc_sdp_for_colorimetry =
4547                                         aconnector->dc_sink->is_vsc_sdp_colorimetry_supported;
4548                         } else {
4549                                 if (stream->link->dpcd_caps.dpcd_rev.raw >= 0x14 &&
4550                                         stream->link->dpcd_caps.dprx_feature.bits.VSC_SDP_COLORIMETRY_SUPPORTED) {
4551                                         stream->use_vsc_sdp_for_colorimetry = true;
4552                                 }
4553                         }
4554                         mod_build_vsc_infopacket(stream, &stream->vsc_infopacket);
4555                 }
4556         }
4557 finish:
4558         dc_sink_release(sink);
4559
4560         return stream;
4561 }
4562
4563 static void amdgpu_dm_crtc_destroy(struct drm_crtc *crtc)
4564 {
4565         drm_crtc_cleanup(crtc);
4566         kfree(crtc);
4567 }
4568
4569 static void dm_crtc_destroy_state(struct drm_crtc *crtc,
4570                                   struct drm_crtc_state *state)
4571 {
4572         struct dm_crtc_state *cur = to_dm_crtc_state(state);
4573
4574         /* TODO Destroy dc_stream objects are stream object is flattened */
4575         if (cur->stream)
4576                 dc_stream_release(cur->stream);
4577
4578
4579         __drm_atomic_helper_crtc_destroy_state(state);
4580
4581
4582         kfree(state);
4583 }
4584
4585 static void dm_crtc_reset_state(struct drm_crtc *crtc)
4586 {
4587         struct dm_crtc_state *state;
4588
4589         if (crtc->state)
4590                 dm_crtc_destroy_state(crtc, crtc->state);
4591
4592         state = kzalloc(sizeof(*state), GFP_KERNEL);
4593         if (WARN_ON(!state))
4594                 return;
4595
4596         crtc->state = &state->base;
4597         crtc->state->crtc = crtc;
4598
4599 }
4600
4601 static struct drm_crtc_state *
4602 dm_crtc_duplicate_state(struct drm_crtc *crtc)
4603 {
4604         struct dm_crtc_state *state, *cur;
4605
4606         cur = to_dm_crtc_state(crtc->state);
4607
4608         if (WARN_ON(!crtc->state))
4609                 return NULL;
4610
4611         state = kzalloc(sizeof(*state), GFP_KERNEL);
4612         if (!state)
4613                 return NULL;
4614
4615         __drm_atomic_helper_crtc_duplicate_state(crtc, &state->base);
4616
4617         if (cur->stream) {
4618                 state->stream = cur->stream;
4619                 dc_stream_retain(state->stream);
4620         }
4621
4622         state->active_planes = cur->active_planes;
4623         state->interrupts_enabled = cur->interrupts_enabled;
4624         state->vrr_params = cur->vrr_params;
4625         state->vrr_infopacket = cur->vrr_infopacket;
4626         state->abm_level = cur->abm_level;
4627         state->vrr_supported = cur->vrr_supported;
4628         state->freesync_config = cur->freesync_config;
4629         state->crc_src = cur->crc_src;
4630         state->cm_has_degamma = cur->cm_has_degamma;
4631         state->cm_is_degamma_srgb = cur->cm_is_degamma_srgb;
4632
4633         /* TODO Duplicate dc_stream after objects are stream object is flattened */
4634
4635         return &state->base;
4636 }
4637
4638 static inline int dm_set_vupdate_irq(struct drm_crtc *crtc, bool enable)
4639 {
4640         enum dc_irq_source irq_source;
4641         struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
4642         struct amdgpu_device *adev = crtc->dev->dev_private;
4643         int rc;
4644
4645         irq_source = IRQ_TYPE_VUPDATE + acrtc->otg_inst;
4646
4647         rc = dc_interrupt_set(adev->dm.dc, irq_source, enable) ? 0 : -EBUSY;
4648
4649         DRM_DEBUG_DRIVER("crtc %d - vupdate irq %sabling: r=%d\n",
4650                          acrtc->crtc_id, enable ? "en" : "dis", rc);
4651         return rc;
4652 }
4653
4654 static inline int dm_set_vblank(struct drm_crtc *crtc, bool enable)
4655 {
4656         enum dc_irq_source irq_source;
4657         struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
4658         struct amdgpu_device *adev = crtc->dev->dev_private;
4659         struct dm_crtc_state *acrtc_state = to_dm_crtc_state(crtc->state);
4660         int rc = 0;
4661
4662         if (enable) {
4663                 /* vblank irq on -> Only need vupdate irq in vrr mode */
4664                 if (amdgpu_dm_vrr_active(acrtc_state))
4665                         rc = dm_set_vupdate_irq(crtc, true);
4666         } else {
4667                 /* vblank irq off -> vupdate irq off */
4668                 rc = dm_set_vupdate_irq(crtc, false);
4669         }
4670
4671         if (rc)
4672                 return rc;
4673
4674         irq_source = IRQ_TYPE_VBLANK + acrtc->otg_inst;
4675         return dc_interrupt_set(adev->dm.dc, irq_source, enable) ? 0 : -EBUSY;
4676 }
4677
4678 static int dm_enable_vblank(struct drm_crtc *crtc)
4679 {
4680         return dm_set_vblank(crtc, true);
4681 }
4682
4683 static void dm_disable_vblank(struct drm_crtc *crtc)
4684 {
4685         dm_set_vblank(crtc, false);
4686 }
4687
4688 /* Implemented only the options currently availible for the driver */
4689 static const struct drm_crtc_funcs amdgpu_dm_crtc_funcs = {
4690         .reset = dm_crtc_reset_state,
4691         .destroy = amdgpu_dm_crtc_destroy,
4692         .gamma_set = drm_atomic_helper_legacy_gamma_set,
4693         .set_config = drm_atomic_helper_set_config,
4694         .page_flip = drm_atomic_helper_page_flip,
4695         .atomic_duplicate_state = dm_crtc_duplicate_state,
4696         .atomic_destroy_state = dm_crtc_destroy_state,
4697         .set_crc_source = amdgpu_dm_crtc_set_crc_source,
4698         .verify_crc_source = amdgpu_dm_crtc_verify_crc_source,
4699         .get_crc_sources = amdgpu_dm_crtc_get_crc_sources,
4700         .get_vblank_counter = amdgpu_get_vblank_counter_kms,
4701         .enable_vblank = dm_enable_vblank,
4702         .disable_vblank = dm_disable_vblank,
4703         .get_vblank_timestamp = drm_crtc_vblank_helper_get_vblank_timestamp,
4704 };
4705
4706 static enum drm_connector_status
4707 amdgpu_dm_connector_detect(struct drm_connector *connector, bool force)
4708 {
4709         bool connected;
4710         struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
4711
4712         /*
4713          * Notes:
4714          * 1. This interface is NOT called in context of HPD irq.
4715          * 2. This interface *is called* in context of user-mode ioctl. Which
4716          * makes it a bad place for *any* MST-related activity.
4717          */
4718
4719         if (aconnector->base.force == DRM_FORCE_UNSPECIFIED &&
4720             !aconnector->fake_enable)
4721                 connected = (aconnector->dc_sink != NULL);
4722         else
4723                 connected = (aconnector->base.force == DRM_FORCE_ON);
4724
4725         return (connected ? connector_status_connected :
4726                         connector_status_disconnected);
4727 }
4728
4729 int amdgpu_dm_connector_atomic_set_property(struct drm_connector *connector,
4730                                             struct drm_connector_state *connector_state,
4731                                             struct drm_property *property,
4732                                             uint64_t val)
4733 {
4734         struct drm_device *dev = connector->dev;
4735         struct amdgpu_device *adev = dev->dev_private;
4736         struct dm_connector_state *dm_old_state =
4737                 to_dm_connector_state(connector->state);
4738         struct dm_connector_state *dm_new_state =
4739                 to_dm_connector_state(connector_state);
4740
4741         int ret = -EINVAL;
4742
4743         if (property == dev->mode_config.scaling_mode_property) {
4744                 enum amdgpu_rmx_type rmx_type;
4745
4746                 switch (val) {
4747                 case DRM_MODE_SCALE_CENTER:
4748                         rmx_type = RMX_CENTER;
4749                         break;
4750                 case DRM_MODE_SCALE_ASPECT:
4751                         rmx_type = RMX_ASPECT;
4752                         break;
4753                 case DRM_MODE_SCALE_FULLSCREEN:
4754                         rmx_type = RMX_FULL;
4755                         break;
4756                 case DRM_MODE_SCALE_NONE:
4757                 default:
4758                         rmx_type = RMX_OFF;
4759                         break;
4760                 }
4761
4762                 if (dm_old_state->scaling == rmx_type)
4763                         return 0;
4764
4765                 dm_new_state->scaling = rmx_type;
4766                 ret = 0;
4767         } else if (property == adev->mode_info.underscan_hborder_property) {
4768                 dm_new_state->underscan_hborder = val;
4769                 ret = 0;
4770         } else if (property == adev->mode_info.underscan_vborder_property) {
4771                 dm_new_state->underscan_vborder = val;
4772                 ret = 0;
4773         } else if (property == adev->mode_info.underscan_property) {
4774                 dm_new_state->underscan_enable = val;
4775                 ret = 0;
4776         } else if (property == adev->mode_info.abm_level_property) {
4777                 dm_new_state->abm_level = val;
4778                 ret = 0;
4779         }
4780
4781         return ret;
4782 }
4783
4784 int amdgpu_dm_connector_atomic_get_property(struct drm_connector *connector,
4785                                             const struct drm_connector_state *state,
4786                                             struct drm_property *property,
4787                                             uint64_t *val)
4788 {
4789         struct drm_device *dev = connector->dev;
4790         struct amdgpu_device *adev = dev->dev_private;
4791         struct dm_connector_state *dm_state =
4792                 to_dm_connector_state(state);
4793         int ret = -EINVAL;
4794
4795         if (property == dev->mode_config.scaling_mode_property) {
4796                 switch (dm_state->scaling) {
4797                 case RMX_CENTER:
4798                         *val = DRM_MODE_SCALE_CENTER;
4799                         break;
4800                 case RMX_ASPECT:
4801                         *val = DRM_MODE_SCALE_ASPECT;
4802                         break;
4803                 case RMX_FULL:
4804                         *val = DRM_MODE_SCALE_FULLSCREEN;
4805                         break;
4806                 case RMX_OFF:
4807                 default:
4808                         *val = DRM_MODE_SCALE_NONE;
4809                         break;
4810                 }
4811                 ret = 0;
4812         } else if (property == adev->mode_info.underscan_hborder_property) {
4813                 *val = dm_state->underscan_hborder;
4814                 ret = 0;
4815         } else if (property == adev->mode_info.underscan_vborder_property) {
4816                 *val = dm_state->underscan_vborder;
4817                 ret = 0;
4818         } else if (property == adev->mode_info.underscan_property) {
4819                 *val = dm_state->underscan_enable;
4820                 ret = 0;
4821         } else if (property == adev->mode_info.abm_level_property) {
4822                 *val = dm_state->abm_level;
4823                 ret = 0;
4824         }
4825
4826         return ret;
4827 }
4828
4829 static void amdgpu_dm_connector_unregister(struct drm_connector *connector)
4830 {
4831         struct amdgpu_dm_connector *amdgpu_dm_connector = to_amdgpu_dm_connector(connector);
4832
4833         drm_dp_aux_unregister(&amdgpu_dm_connector->dm_dp_aux.aux);
4834 }
4835
4836 static void amdgpu_dm_connector_destroy(struct drm_connector *connector)
4837 {
4838         struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
4839         const struct dc_link *link = aconnector->dc_link;
4840         struct amdgpu_device *adev = connector->dev->dev_private;
4841         struct amdgpu_display_manager *dm = &adev->dm;
4842
4843 #if defined(CONFIG_BACKLIGHT_CLASS_DEVICE) ||\
4844         defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE)
4845
4846         if ((link->connector_signal & (SIGNAL_TYPE_EDP | SIGNAL_TYPE_LVDS)) &&
4847             link->type != dc_connection_none &&
4848             dm->backlight_dev) {
4849                 backlight_device_unregister(dm->backlight_dev);
4850                 dm->backlight_dev = NULL;
4851         }
4852 #endif
4853
4854         if (aconnector->dc_em_sink)
4855                 dc_sink_release(aconnector->dc_em_sink);
4856         aconnector->dc_em_sink = NULL;
4857         if (aconnector->dc_sink)
4858                 dc_sink_release(aconnector->dc_sink);
4859         aconnector->dc_sink = NULL;
4860
4861         drm_dp_cec_unregister_connector(&aconnector->dm_dp_aux.aux);
4862         drm_connector_unregister(connector);
4863         drm_connector_cleanup(connector);
4864         if (aconnector->i2c) {
4865                 i2c_del_adapter(&aconnector->i2c->base);
4866                 kfree(aconnector->i2c);
4867         }
4868         kfree(aconnector->dm_dp_aux.aux.name);
4869
4870         kfree(connector);
4871 }
4872
4873 void amdgpu_dm_connector_funcs_reset(struct drm_connector *connector)
4874 {
4875         struct dm_connector_state *state =
4876                 to_dm_connector_state(connector->state);
4877
4878         if (connector->state)
4879                 __drm_atomic_helper_connector_destroy_state(connector->state);
4880
4881         kfree(state);
4882
4883         state = kzalloc(sizeof(*state), GFP_KERNEL);
4884
4885         if (state) {
4886                 state->scaling = RMX_OFF;
4887                 state->underscan_enable = false;
4888                 state->underscan_hborder = 0;
4889                 state->underscan_vborder = 0;
4890                 state->base.max_requested_bpc = 8;
4891                 state->vcpi_slots = 0;
4892                 state->pbn = 0;
4893                 if (connector->connector_type == DRM_MODE_CONNECTOR_eDP)
4894                         state->abm_level = amdgpu_dm_abm_level;
4895
4896                 __drm_atomic_helper_connector_reset(connector, &state->base);
4897         }
4898 }
4899
4900 struct drm_connector_state *
4901 amdgpu_dm_connector_atomic_duplicate_state(struct drm_connector *connector)
4902 {
4903         struct dm_connector_state *state =
4904                 to_dm_connector_state(connector->state);
4905
4906         struct dm_connector_state *new_state =
4907                         kmemdup(state, sizeof(*state), GFP_KERNEL);
4908
4909         if (!new_state)
4910                 return NULL;
4911
4912         __drm_atomic_helper_connector_duplicate_state(connector, &new_state->base);
4913
4914         new_state->freesync_capable = state->freesync_capable;
4915         new_state->abm_level = state->abm_level;
4916         new_state->scaling = state->scaling;
4917         new_state->underscan_enable = state->underscan_enable;
4918         new_state->underscan_hborder = state->underscan_hborder;
4919         new_state->underscan_vborder = state->underscan_vborder;
4920         new_state->vcpi_slots = state->vcpi_slots;
4921         new_state->pbn = state->pbn;
4922         return &new_state->base;
4923 }
4924
4925 static int
4926 amdgpu_dm_connector_late_register(struct drm_connector *connector)
4927 {
4928         struct amdgpu_dm_connector *amdgpu_dm_connector =
4929                 to_amdgpu_dm_connector(connector);
4930         int r;
4931
4932         if ((connector->connector_type == DRM_MODE_CONNECTOR_DisplayPort) ||
4933             (connector->connector_type == DRM_MODE_CONNECTOR_eDP)) {
4934                 amdgpu_dm_connector->dm_dp_aux.aux.dev = connector->kdev;
4935                 r = drm_dp_aux_register(&amdgpu_dm_connector->dm_dp_aux.aux);
4936                 if (r)
4937                         return r;
4938         }
4939
4940 #if defined(CONFIG_DEBUG_FS)
4941         connector_debugfs_init(amdgpu_dm_connector);
4942 #endif
4943
4944         return 0;
4945 }
4946
4947 static const struct drm_connector_funcs amdgpu_dm_connector_funcs = {
4948         .reset = amdgpu_dm_connector_funcs_reset,
4949         .detect = amdgpu_dm_connector_detect,
4950         .fill_modes = drm_helper_probe_single_connector_modes,
4951         .destroy = amdgpu_dm_connector_destroy,
4952         .atomic_duplicate_state = amdgpu_dm_connector_atomic_duplicate_state,
4953         .atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
4954         .atomic_set_property = amdgpu_dm_connector_atomic_set_property,
4955         .atomic_get_property = amdgpu_dm_connector_atomic_get_property,
4956         .late_register = amdgpu_dm_connector_late_register,
4957         .early_unregister = amdgpu_dm_connector_unregister
4958 };
4959
4960 static int get_modes(struct drm_connector *connector)
4961 {
4962         return amdgpu_dm_connector_get_modes(connector);
4963 }
4964
4965 static void create_eml_sink(struct amdgpu_dm_connector *aconnector)
4966 {
4967         struct dc_sink_init_data init_params = {
4968                         .link = aconnector->dc_link,
4969                         .sink_signal = SIGNAL_TYPE_VIRTUAL
4970         };
4971         struct edid *edid;
4972
4973         if (!aconnector->base.edid_blob_ptr) {
4974                 DRM_ERROR("No EDID firmware found on connector: %s ,forcing to OFF!\n",
4975                                 aconnector->base.name);
4976
4977                 aconnector->base.force = DRM_FORCE_OFF;
4978                 aconnector->base.override_edid = false;
4979                 return;
4980         }
4981
4982         edid = (struct edid *) aconnector->base.edid_blob_ptr->data;
4983
4984         aconnector->edid = edid;
4985
4986         aconnector->dc_em_sink = dc_link_add_remote_sink(
4987                 aconnector->dc_link,
4988                 (uint8_t *)edid,
4989                 (edid->extensions + 1) * EDID_LENGTH,
4990                 &init_params);
4991
4992         if (aconnector->base.force == DRM_FORCE_ON) {
4993                 aconnector->dc_sink = aconnector->dc_link->local_sink ?
4994                 aconnector->dc_link->local_sink :
4995                 aconnector->dc_em_sink;
4996                 dc_sink_retain(aconnector->dc_sink);
4997         }
4998 }
4999
5000 static void handle_edid_mgmt(struct amdgpu_dm_connector *aconnector)
5001 {
5002         struct dc_link *link = (struct dc_link *)aconnector->dc_link;
5003
5004         /*
5005          * In case of headless boot with force on for DP managed connector
5006          * Those settings have to be != 0 to get initial modeset
5007          */
5008         if (link->connector_signal == SIGNAL_TYPE_DISPLAY_PORT) {
5009                 link->verified_link_cap.lane_count = LANE_COUNT_FOUR;
5010                 link->verified_link_cap.link_rate = LINK_RATE_HIGH2;
5011         }
5012
5013
5014         aconnector->base.override_edid = true;
5015         create_eml_sink(aconnector);
5016 }
5017
5018 static struct dc_stream_state *
5019 create_validate_stream_for_sink(struct amdgpu_dm_connector *aconnector,
5020                                 const struct drm_display_mode *drm_mode,
5021                                 const struct dm_connector_state *dm_state,
5022                                 const struct dc_stream_state *old_stream)
5023 {
5024         struct drm_connector *connector = &aconnector->base;
5025         struct amdgpu_device *adev = connector->dev->dev_private;
5026         struct dc_stream_state *stream;
5027         const struct drm_connector_state *drm_state = dm_state ? &dm_state->base : NULL;
5028         int requested_bpc = drm_state ? drm_state->max_requested_bpc : 8;
5029         enum dc_status dc_result = DC_OK;
5030
5031         do {
5032                 stream = create_stream_for_sink(aconnector, drm_mode,
5033                                                 dm_state, old_stream,
5034                                                 requested_bpc);
5035                 if (stream == NULL) {
5036                         DRM_ERROR("Failed to create stream for sink!\n");
5037                         break;
5038                 }
5039
5040                 dc_result = dc_validate_stream(adev->dm.dc, stream);
5041
5042                 if (dc_result != DC_OK) {
5043                         DRM_DEBUG_KMS("Mode %dx%d (clk %d) failed DC validation with error %d\n",
5044                                       drm_mode->hdisplay,
5045                                       drm_mode->vdisplay,
5046                                       drm_mode->clock,
5047                                       dc_result);
5048
5049                         dc_stream_release(stream);
5050                         stream = NULL;
5051                         requested_bpc -= 2; /* lower bpc to retry validation */
5052                 }
5053
5054         } while (stream == NULL && requested_bpc >= 6);
5055
5056         return stream;
5057 }
5058
5059 enum drm_mode_status amdgpu_dm_connector_mode_valid(struct drm_connector *connector,
5060                                    struct drm_display_mode *mode)
5061 {
5062         int result = MODE_ERROR;
5063         struct dc_sink *dc_sink;
5064         /* TODO: Unhardcode stream count */
5065         struct dc_stream_state *stream;
5066         struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
5067
5068         if ((mode->flags & DRM_MODE_FLAG_INTERLACE) ||
5069                         (mode->flags & DRM_MODE_FLAG_DBLSCAN))
5070                 return result;
5071
5072         /*
5073          * Only run this the first time mode_valid is called to initilialize
5074          * EDID mgmt
5075          */
5076         if (aconnector->base.force != DRM_FORCE_UNSPECIFIED &&
5077                 !aconnector->dc_em_sink)
5078                 handle_edid_mgmt(aconnector);
5079
5080         dc_sink = to_amdgpu_dm_connector(connector)->dc_sink;
5081
5082         if (dc_sink == NULL) {
5083                 DRM_ERROR("dc_sink is NULL!\n");
5084                 goto fail;
5085         }
5086
5087         stream = create_validate_stream_for_sink(aconnector, mode, NULL, NULL);
5088         if (stream) {
5089                 dc_stream_release(stream);
5090                 result = MODE_OK;
5091         }
5092
5093 fail:
5094         /* TODO: error handling*/
5095         return result;
5096 }
5097
5098 static int fill_hdr_info_packet(const struct drm_connector_state *state,
5099                                 struct dc_info_packet *out)
5100 {
5101         struct hdmi_drm_infoframe frame;
5102         unsigned char buf[30]; /* 26 + 4 */
5103         ssize_t len;
5104         int ret, i;
5105
5106         memset(out, 0, sizeof(*out));
5107
5108         if (!state->hdr_output_metadata)
5109                 return 0;
5110
5111         ret = drm_hdmi_infoframe_set_hdr_metadata(&frame, state);
5112         if (ret)
5113                 return ret;
5114
5115         len = hdmi_drm_infoframe_pack_only(&frame, buf, sizeof(buf));
5116         if (len < 0)
5117                 return (int)len;
5118
5119         /* Static metadata is a fixed 26 bytes + 4 byte header. */
5120         if (len != 30)
5121                 return -EINVAL;
5122
5123         /* Prepare the infopacket for DC. */
5124         switch (state->connector->connector_type) {
5125         case DRM_MODE_CONNECTOR_HDMIA:
5126                 out->hb0 = 0x87; /* type */
5127                 out->hb1 = 0x01; /* version */
5128                 out->hb2 = 0x1A; /* length */
5129                 out->sb[0] = buf[3]; /* checksum */
5130                 i = 1;
5131                 break;
5132
5133         case DRM_MODE_CONNECTOR_DisplayPort:
5134         case DRM_MODE_CONNECTOR_eDP:
5135                 out->hb0 = 0x00; /* sdp id, zero */
5136                 out->hb1 = 0x87; /* type */
5137                 out->hb2 = 0x1D; /* payload len - 1 */
5138                 out->hb3 = (0x13 << 2); /* sdp version */
5139                 out->sb[0] = 0x01; /* version */
5140                 out->sb[1] = 0x1A; /* length */
5141                 i = 2;
5142                 break;
5143
5144         default:
5145                 return -EINVAL;
5146         }
5147
5148         memcpy(&out->sb[i], &buf[4], 26);
5149         out->valid = true;
5150
5151         print_hex_dump(KERN_DEBUG, "HDR SB:", DUMP_PREFIX_NONE, 16, 1, out->sb,
5152                        sizeof(out->sb), false);
5153
5154         return 0;
5155 }
5156
5157 static bool
5158 is_hdr_metadata_different(const struct drm_connector_state *old_state,
5159                           const struct drm_connector_state *new_state)
5160 {
5161         struct drm_property_blob *old_blob = old_state->hdr_output_metadata;
5162         struct drm_property_blob *new_blob = new_state->hdr_output_metadata;
5163
5164         if (old_blob != new_blob) {
5165                 if (old_blob && new_blob &&
5166                     old_blob->length == new_blob->length)
5167                         return memcmp(old_blob->data, new_blob->data,
5168                                       old_blob->length);
5169
5170                 return true;
5171         }
5172
5173         return false;
5174 }
5175
5176 static int
5177 amdgpu_dm_connector_atomic_check(struct drm_connector *conn,
5178                                  struct drm_atomic_state *state)
5179 {
5180         struct drm_connector_state *new_con_state =
5181                 drm_atomic_get_new_connector_state(state, conn);
5182         struct drm_connector_state *old_con_state =
5183                 drm_atomic_get_old_connector_state(state, conn);
5184         struct drm_crtc *crtc = new_con_state->crtc;
5185         struct drm_crtc_state *new_crtc_state;
5186         int ret;
5187
5188         if (!crtc)
5189                 return 0;
5190
5191         if (is_hdr_metadata_different(old_con_state, new_con_state)) {
5192                 struct dc_info_packet hdr_infopacket;
5193
5194                 ret = fill_hdr_info_packet(new_con_state, &hdr_infopacket);
5195                 if (ret)
5196                         return ret;
5197
5198                 new_crtc_state = drm_atomic_get_crtc_state(state, crtc);
5199                 if (IS_ERR(new_crtc_state))
5200                         return PTR_ERR(new_crtc_state);
5201
5202                 /*
5203                  * DC considers the stream backends changed if the
5204                  * static metadata changes. Forcing the modeset also
5205                  * gives a simple way for userspace to switch from
5206                  * 8bpc to 10bpc when setting the metadata to enter
5207                  * or exit HDR.
5208                  *
5209                  * Changing the static metadata after it's been
5210                  * set is permissible, however. So only force a
5211                  * modeset if we're entering or exiting HDR.
5212                  */
5213                 new_crtc_state->mode_changed =
5214                         !old_con_state->hdr_output_metadata ||
5215                         !new_con_state->hdr_output_metadata;
5216         }
5217
5218         return 0;
5219 }
5220
5221 static const struct drm_connector_helper_funcs
5222 amdgpu_dm_connector_helper_funcs = {
5223         /*
5224          * If hotplugging a second bigger display in FB Con mode, bigger resolution
5225          * modes will be filtered by drm_mode_validate_size(), and those modes
5226          * are missing after user start lightdm. So we need to renew modes list.
5227          * in get_modes call back, not just return the modes count
5228          */
5229         .get_modes = get_modes,
5230         .mode_valid = amdgpu_dm_connector_mode_valid,
5231         .atomic_check = amdgpu_dm_connector_atomic_check,
5232 };
5233
5234 static void dm_crtc_helper_disable(struct drm_crtc *crtc)
5235 {
5236 }
5237
5238 static bool does_crtc_have_active_cursor(struct drm_crtc_state *new_crtc_state)
5239 {
5240         struct drm_device *dev = new_crtc_state->crtc->dev;
5241         struct drm_plane *plane;
5242
5243         drm_for_each_plane_mask(plane, dev, new_crtc_state->plane_mask) {
5244                 if (plane->type == DRM_PLANE_TYPE_CURSOR)
5245                         return true;
5246         }
5247
5248         return false;
5249 }
5250
5251 static int count_crtc_active_planes(struct drm_crtc_state *new_crtc_state)
5252 {
5253         struct drm_atomic_state *state = new_crtc_state->state;
5254         struct drm_plane *plane;
5255         int num_active = 0;
5256
5257         drm_for_each_plane_mask(plane, state->dev, new_crtc_state->plane_mask) {
5258                 struct drm_plane_state *new_plane_state;
5259
5260                 /* Cursor planes are "fake". */
5261                 if (plane->type == DRM_PLANE_TYPE_CURSOR)
5262                         continue;
5263
5264                 new_plane_state = drm_atomic_get_new_plane_state(state, plane);
5265
5266                 if (!new_plane_state) {
5267                         /*
5268                          * The plane is enable on the CRTC and hasn't changed
5269                          * state. This means that it previously passed
5270                          * validation and is therefore enabled.
5271                          */
5272                         num_active += 1;
5273                         continue;
5274                 }
5275
5276                 /* We need a framebuffer to be considered enabled. */
5277                 num_active += (new_plane_state->fb != NULL);
5278         }
5279
5280         return num_active;
5281 }
5282
5283 /*
5284  * Sets whether interrupts should be enabled on a specific CRTC.
5285  * We require that the stream be enabled and that there exist active
5286  * DC planes on the stream.
5287  */
5288 static void
5289 dm_update_crtc_interrupt_state(struct drm_crtc *crtc,
5290                                struct drm_crtc_state *new_crtc_state)
5291 {
5292         struct dm_crtc_state *dm_new_crtc_state =
5293                 to_dm_crtc_state(new_crtc_state);
5294
5295         dm_new_crtc_state->active_planes = 0;
5296         dm_new_crtc_state->interrupts_enabled = false;
5297
5298         if (!dm_new_crtc_state->stream)
5299                 return;
5300
5301         dm_new_crtc_state->active_planes =
5302                 count_crtc_active_planes(new_crtc_state);
5303
5304         dm_new_crtc_state->interrupts_enabled =
5305                 dm_new_crtc_state->active_planes > 0;
5306 }
5307
5308 static int dm_crtc_helper_atomic_check(struct drm_crtc *crtc,
5309                                        struct drm_crtc_state *state)
5310 {
5311         struct amdgpu_device *adev = crtc->dev->dev_private;
5312         struct dc *dc = adev->dm.dc;
5313         struct dm_crtc_state *dm_crtc_state = to_dm_crtc_state(state);
5314         int ret = -EINVAL;
5315
5316         /*
5317          * Update interrupt state for the CRTC. This needs to happen whenever
5318          * the CRTC has changed or whenever any of its planes have changed.
5319          * Atomic check satisfies both of these requirements since the CRTC
5320          * is added to the state by DRM during drm_atomic_helper_check_planes.
5321          */
5322         dm_update_crtc_interrupt_state(crtc, state);
5323
5324         if (unlikely(!dm_crtc_state->stream &&
5325                      modeset_required(state, NULL, dm_crtc_state->stream))) {
5326                 WARN_ON(1);
5327                 return ret;
5328         }
5329
5330         /* In some use cases, like reset, no stream is attached */
5331         if (!dm_crtc_state->stream)
5332                 return 0;
5333
5334         /*
5335          * We want at least one hardware plane enabled to use
5336          * the stream with a cursor enabled.
5337          */
5338         if (state->enable && state->active &&
5339             does_crtc_have_active_cursor(state) &&
5340             dm_crtc_state->active_planes == 0)
5341                 return -EINVAL;
5342
5343         if (dc_validate_stream(dc, dm_crtc_state->stream) == DC_OK)
5344                 return 0;
5345
5346         return ret;
5347 }
5348
5349 static bool dm_crtc_helper_mode_fixup(struct drm_crtc *crtc,
5350                                       const struct drm_display_mode *mode,
5351                                       struct drm_display_mode *adjusted_mode)
5352 {
5353         return true;
5354 }
5355
5356 static const struct drm_crtc_helper_funcs amdgpu_dm_crtc_helper_funcs = {
5357         .disable = dm_crtc_helper_disable,
5358         .atomic_check = dm_crtc_helper_atomic_check,
5359         .mode_fixup = dm_crtc_helper_mode_fixup,
5360         .get_scanout_position = amdgpu_crtc_get_scanout_position,
5361 };
5362
5363 static void dm_encoder_helper_disable(struct drm_encoder *encoder)
5364 {
5365
5366 }
5367
5368 static int convert_dc_color_depth_into_bpc (enum dc_color_depth display_color_depth)
5369 {
5370         switch (display_color_depth) {
5371                 case COLOR_DEPTH_666:
5372                         return 6;
5373                 case COLOR_DEPTH_888:
5374                         return 8;
5375                 case COLOR_DEPTH_101010:
5376                         return 10;
5377                 case COLOR_DEPTH_121212:
5378                         return 12;
5379                 case COLOR_DEPTH_141414:
5380                         return 14;
5381                 case COLOR_DEPTH_161616:
5382                         return 16;
5383                 default:
5384                         break;
5385                 }
5386         return 0;
5387 }
5388
5389 static int dm_encoder_helper_atomic_check(struct drm_encoder *encoder,
5390                                           struct drm_crtc_state *crtc_state,
5391                                           struct drm_connector_state *conn_state)
5392 {
5393         struct drm_atomic_state *state = crtc_state->state;
5394         struct drm_connector *connector = conn_state->connector;
5395         struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
5396         struct dm_connector_state *dm_new_connector_state = to_dm_connector_state(conn_state);
5397         const struct drm_display_mode *adjusted_mode = &crtc_state->adjusted_mode;
5398         struct drm_dp_mst_topology_mgr *mst_mgr;
5399         struct drm_dp_mst_port *mst_port;
5400         enum dc_color_depth color_depth;
5401         int clock, bpp = 0;
5402         bool is_y420 = false;
5403
5404         if (!aconnector->port || !aconnector->dc_sink)
5405                 return 0;
5406
5407         mst_port = aconnector->port;
5408         mst_mgr = &aconnector->mst_port->mst_mgr;
5409
5410         if (!crtc_state->connectors_changed && !crtc_state->mode_changed)
5411                 return 0;
5412
5413         if (!state->duplicated) {
5414                 int max_bpc = conn_state->max_requested_bpc;
5415                 is_y420 = drm_mode_is_420_also(&connector->display_info, adjusted_mode) &&
5416                                 aconnector->force_yuv420_output;
5417                 color_depth = convert_color_depth_from_display_info(connector,
5418                                                                     is_y420,
5419                                                                     max_bpc);
5420                 bpp = convert_dc_color_depth_into_bpc(color_depth) * 3;
5421                 clock = adjusted_mode->clock;
5422                 dm_new_connector_state->pbn = drm_dp_calc_pbn_mode(clock, bpp, false);
5423         }
5424         dm_new_connector_state->vcpi_slots = drm_dp_atomic_find_vcpi_slots(state,
5425                                                                            mst_mgr,
5426                                                                            mst_port,
5427                                                                            dm_new_connector_state->pbn,
5428                                                                            0);
5429         if (dm_new_connector_state->vcpi_slots < 0) {
5430                 DRM_DEBUG_ATOMIC("failed finding vcpi slots: %d\n", (int)dm_new_connector_state->vcpi_slots);
5431                 return dm_new_connector_state->vcpi_slots;
5432         }
5433         return 0;
5434 }
5435
5436 const struct drm_encoder_helper_funcs amdgpu_dm_encoder_helper_funcs = {
5437         .disable = dm_encoder_helper_disable,
5438         .atomic_check = dm_encoder_helper_atomic_check
5439 };
5440
5441 #if defined(CONFIG_DRM_AMD_DC_DCN)
5442 static int dm_update_mst_vcpi_slots_for_dsc(struct drm_atomic_state *state,
5443                                             struct dc_state *dc_state)
5444 {
5445         struct dc_stream_state *stream = NULL;
5446         struct drm_connector *connector;
5447         struct drm_connector_state *new_con_state, *old_con_state;
5448         struct amdgpu_dm_connector *aconnector;
5449         struct dm_connector_state *dm_conn_state;
5450         int i, j, clock, bpp;
5451         int vcpi, pbn_div, pbn = 0;
5452
5453         for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
5454
5455                 aconnector = to_amdgpu_dm_connector(connector);
5456
5457                 if (!aconnector->port)
5458                         continue;
5459
5460                 if (!new_con_state || !new_con_state->crtc)
5461                         continue;
5462
5463                 dm_conn_state = to_dm_connector_state(new_con_state);
5464
5465                 for (j = 0; j < dc_state->stream_count; j++) {
5466                         stream = dc_state->streams[j];
5467                         if (!stream)
5468                                 continue;
5469
5470                         if ((struct amdgpu_dm_connector*)stream->dm_stream_context == aconnector)
5471                                 break;
5472
5473                         stream = NULL;
5474                 }
5475
5476                 if (!stream)
5477                         continue;
5478
5479                 if (stream->timing.flags.DSC != 1) {
5480                         drm_dp_mst_atomic_enable_dsc(state,
5481                                                      aconnector->port,
5482                                                      dm_conn_state->pbn,
5483                                                      0,
5484                                                      false);
5485                         continue;
5486                 }
5487
5488                 pbn_div = dm_mst_get_pbn_divider(stream->link);
5489                 bpp = stream->timing.dsc_cfg.bits_per_pixel;
5490                 clock = stream->timing.pix_clk_100hz / 10;
5491                 pbn = drm_dp_calc_pbn_mode(clock, bpp, true);
5492                 vcpi = drm_dp_mst_atomic_enable_dsc(state,
5493                                                     aconnector->port,
5494                                                     pbn, pbn_div,
5495                                                     true);
5496                 if (vcpi < 0)
5497                         return vcpi;
5498
5499                 dm_conn_state->pbn = pbn;
5500                 dm_conn_state->vcpi_slots = vcpi;
5501         }
5502         return 0;
5503 }
5504 #endif
5505
5506 static void dm_drm_plane_reset(struct drm_plane *plane)
5507 {
5508         struct dm_plane_state *amdgpu_state = NULL;
5509
5510         if (plane->state)
5511                 plane->funcs->atomic_destroy_state(plane, plane->state);
5512
5513         amdgpu_state = kzalloc(sizeof(*amdgpu_state), GFP_KERNEL);
5514         WARN_ON(amdgpu_state == NULL);
5515
5516         if (amdgpu_state)
5517                 __drm_atomic_helper_plane_reset(plane, &amdgpu_state->base);
5518 }
5519
5520 static struct drm_plane_state *
5521 dm_drm_plane_duplicate_state(struct drm_plane *plane)
5522 {
5523         struct dm_plane_state *dm_plane_state, *old_dm_plane_state;
5524
5525         old_dm_plane_state = to_dm_plane_state(plane->state);
5526         dm_plane_state = kzalloc(sizeof(*dm_plane_state), GFP_KERNEL);
5527         if (!dm_plane_state)
5528                 return NULL;
5529
5530         __drm_atomic_helper_plane_duplicate_state(plane, &dm_plane_state->base);
5531
5532         if (old_dm_plane_state->dc_state) {
5533                 dm_plane_state->dc_state = old_dm_plane_state->dc_state;
5534                 dc_plane_state_retain(dm_plane_state->dc_state);
5535         }
5536
5537         return &dm_plane_state->base;
5538 }
5539
5540 void dm_drm_plane_destroy_state(struct drm_plane *plane,
5541                                 struct drm_plane_state *state)
5542 {
5543         struct dm_plane_state *dm_plane_state = to_dm_plane_state(state);
5544
5545         if (dm_plane_state->dc_state)
5546                 dc_plane_state_release(dm_plane_state->dc_state);
5547
5548         drm_atomic_helper_plane_destroy_state(plane, state);
5549 }
5550
5551 static const struct drm_plane_funcs dm_plane_funcs = {
5552         .update_plane   = drm_atomic_helper_update_plane,
5553         .disable_plane  = drm_atomic_helper_disable_plane,
5554         .destroy        = drm_primary_helper_destroy,
5555         .reset = dm_drm_plane_reset,
5556         .atomic_duplicate_state = dm_drm_plane_duplicate_state,
5557         .atomic_destroy_state = dm_drm_plane_destroy_state,
5558 };
5559
5560 static int dm_plane_helper_prepare_fb(struct drm_plane *plane,
5561                                       struct drm_plane_state *new_state)
5562 {
5563         struct amdgpu_framebuffer *afb;
5564         struct drm_gem_object *obj;
5565         struct amdgpu_device *adev;
5566         struct amdgpu_bo *rbo;
5567         struct dm_plane_state *dm_plane_state_new, *dm_plane_state_old;
5568         struct list_head list;
5569         struct ttm_validate_buffer tv;
5570         struct ww_acquire_ctx ticket;
5571         uint64_t tiling_flags;
5572         uint32_t domain;
5573         int r;
5574         bool tmz_surface = false;
5575         bool force_disable_dcc = false;
5576
5577         dm_plane_state_old = to_dm_plane_state(plane->state);
5578         dm_plane_state_new = to_dm_plane_state(new_state);
5579
5580         if (!new_state->fb) {
5581                 DRM_DEBUG_DRIVER("No FB bound\n");
5582                 return 0;
5583         }
5584
5585         afb = to_amdgpu_framebuffer(new_state->fb);
5586         obj = new_state->fb->obj[0];
5587         rbo = gem_to_amdgpu_bo(obj);
5588         adev = amdgpu_ttm_adev(rbo->tbo.bdev);
5589         INIT_LIST_HEAD(&list);
5590
5591         tv.bo = &rbo->tbo;
5592         tv.num_shared = 1;
5593         list_add(&tv.head, &list);
5594
5595         r = ttm_eu_reserve_buffers(&ticket, &list, false, NULL);
5596         if (r) {
5597                 dev_err(adev->dev, "fail to reserve bo (%d)\n", r);
5598                 return r;
5599         }
5600
5601         if (plane->type != DRM_PLANE_TYPE_CURSOR)
5602                 domain = amdgpu_display_supported_domains(adev, rbo->flags);
5603         else
5604                 domain = AMDGPU_GEM_DOMAIN_VRAM;
5605
5606         r = amdgpu_bo_pin(rbo, domain);
5607         if (unlikely(r != 0)) {
5608                 if (r != -ERESTARTSYS)
5609                         DRM_ERROR("Failed to pin framebuffer with error %d\n", r);
5610                 ttm_eu_backoff_reservation(&ticket, &list);
5611                 return r;
5612         }
5613
5614         r = amdgpu_ttm_alloc_gart(&rbo->tbo);
5615         if (unlikely(r != 0)) {
5616                 amdgpu_bo_unpin(rbo);
5617                 ttm_eu_backoff_reservation(&ticket, &list);
5618                 DRM_ERROR("%p bind failed\n", rbo);
5619                 return r;
5620         }
5621
5622         amdgpu_bo_get_tiling_flags(rbo, &tiling_flags);
5623
5624         tmz_surface = amdgpu_bo_encrypted(rbo);
5625
5626         ttm_eu_backoff_reservation(&ticket, &list);
5627
5628         afb->address = amdgpu_bo_gpu_offset(rbo);
5629
5630         amdgpu_bo_ref(rbo);
5631
5632         if (dm_plane_state_new->dc_state &&
5633                         dm_plane_state_old->dc_state != dm_plane_state_new->dc_state) {
5634                 struct dc_plane_state *plane_state = dm_plane_state_new->dc_state;
5635
5636                 force_disable_dcc = adev->asic_type == CHIP_RAVEN && adev->in_suspend;
5637                 fill_plane_buffer_attributes(
5638                         adev, afb, plane_state->format, plane_state->rotation,
5639                         tiling_flags, &plane_state->tiling_info,
5640                         &plane_state->plane_size, &plane_state->dcc,
5641                         &plane_state->address, tmz_surface,
5642                         force_disable_dcc);
5643         }
5644
5645         return 0;
5646 }
5647
5648 static void dm_plane_helper_cleanup_fb(struct drm_plane *plane,
5649                                        struct drm_plane_state *old_state)
5650 {
5651         struct amdgpu_bo *rbo;
5652         int r;
5653
5654         if (!old_state->fb)
5655                 return;
5656
5657         rbo = gem_to_amdgpu_bo(old_state->fb->obj[0]);
5658         r = amdgpu_bo_reserve(rbo, false);
5659         if (unlikely(r)) {
5660                 DRM_ERROR("failed to reserve rbo before unpin\n");
5661                 return;
5662         }
5663
5664         amdgpu_bo_unpin(rbo);
5665         amdgpu_bo_unreserve(rbo);
5666         amdgpu_bo_unref(&rbo);
5667 }
5668
5669 static int dm_plane_atomic_check(struct drm_plane *plane,
5670                                  struct drm_plane_state *state)
5671 {
5672         struct amdgpu_device *adev = plane->dev->dev_private;
5673         struct dc *dc = adev->dm.dc;
5674         struct dm_plane_state *dm_plane_state;
5675         struct dc_scaling_info scaling_info;
5676         int ret;
5677
5678         dm_plane_state = to_dm_plane_state(state);
5679
5680         if (!dm_plane_state->dc_state)
5681                 return 0;
5682
5683         ret = fill_dc_scaling_info(state, &scaling_info);
5684         if (ret)
5685                 return ret;
5686
5687         if (dc_validate_plane(dc, dm_plane_state->dc_state) == DC_OK)
5688                 return 0;
5689
5690         return -EINVAL;
5691 }
5692
5693 static int dm_plane_atomic_async_check(struct drm_plane *plane,
5694                                        struct drm_plane_state *new_plane_state)
5695 {
5696         /* Only support async updates on cursor planes. */
5697         if (plane->type != DRM_PLANE_TYPE_CURSOR)
5698                 return -EINVAL;
5699
5700         return 0;
5701 }
5702
5703 static void dm_plane_atomic_async_update(struct drm_plane *plane,
5704                                          struct drm_plane_state *new_state)
5705 {
5706         struct drm_plane_state *old_state =
5707                 drm_atomic_get_old_plane_state(new_state->state, plane);
5708
5709         swap(plane->state->fb, new_state->fb);
5710
5711         plane->state->src_x = new_state->src_x;
5712         plane->state->src_y = new_state->src_y;
5713         plane->state->src_w = new_state->src_w;
5714         plane->state->src_h = new_state->src_h;
5715         plane->state->crtc_x = new_state->crtc_x;
5716         plane->state->crtc_y = new_state->crtc_y;
5717         plane->state->crtc_w = new_state->crtc_w;
5718         plane->state->crtc_h = new_state->crtc_h;
5719
5720         handle_cursor_update(plane, old_state);
5721 }
5722
5723 static const struct drm_plane_helper_funcs dm_plane_helper_funcs = {
5724         .prepare_fb = dm_plane_helper_prepare_fb,
5725         .cleanup_fb = dm_plane_helper_cleanup_fb,
5726         .atomic_check = dm_plane_atomic_check,
5727         .atomic_async_check = dm_plane_atomic_async_check,
5728         .atomic_async_update = dm_plane_atomic_async_update
5729 };
5730
5731 /*
5732  * TODO: these are currently initialized to rgb formats only.
5733  * For future use cases we should either initialize them dynamically based on
5734  * plane capabilities, or initialize this array to all formats, so internal drm
5735  * check will succeed, and let DC implement proper check
5736  */
5737 static const uint32_t rgb_formats[] = {
5738         DRM_FORMAT_XRGB8888,
5739         DRM_FORMAT_ARGB8888,
5740         DRM_FORMAT_RGBA8888,
5741         DRM_FORMAT_XRGB2101010,
5742         DRM_FORMAT_XBGR2101010,
5743         DRM_FORMAT_ARGB2101010,
5744         DRM_FORMAT_ABGR2101010,
5745         DRM_FORMAT_XBGR8888,
5746         DRM_FORMAT_ABGR8888,
5747         DRM_FORMAT_RGB565,
5748 };
5749
5750 static const uint32_t overlay_formats[] = {
5751         DRM_FORMAT_XRGB8888,
5752         DRM_FORMAT_ARGB8888,
5753         DRM_FORMAT_RGBA8888,
5754         DRM_FORMAT_XBGR8888,
5755         DRM_FORMAT_ABGR8888,
5756         DRM_FORMAT_RGB565
5757 };
5758
5759 static const u32 cursor_formats[] = {
5760         DRM_FORMAT_ARGB8888
5761 };
5762
5763 static int get_plane_formats(const struct drm_plane *plane,
5764                              const struct dc_plane_cap *plane_cap,
5765                              uint32_t *formats, int max_formats)
5766 {
5767         int i, num_formats = 0;
5768
5769         /*
5770          * TODO: Query support for each group of formats directly from
5771          * DC plane caps. This will require adding more formats to the
5772          * caps list.
5773          */
5774
5775         switch (plane->type) {
5776         case DRM_PLANE_TYPE_PRIMARY:
5777                 for (i = 0; i < ARRAY_SIZE(rgb_formats); ++i) {
5778                         if (num_formats >= max_formats)
5779                                 break;
5780
5781                         formats[num_formats++] = rgb_formats[i];
5782                 }
5783
5784                 if (plane_cap && plane_cap->pixel_format_support.nv12)
5785                         formats[num_formats++] = DRM_FORMAT_NV12;
5786                 if (plane_cap && plane_cap->pixel_format_support.p010)
5787                         formats[num_formats++] = DRM_FORMAT_P010;
5788                 if (plane_cap && plane_cap->pixel_format_support.fp16) {
5789                         formats[num_formats++] = DRM_FORMAT_XRGB16161616F;
5790                         formats[num_formats++] = DRM_FORMAT_ARGB16161616F;
5791                         formats[num_formats++] = DRM_FORMAT_XBGR16161616F;
5792                         formats[num_formats++] = DRM_FORMAT_ABGR16161616F;
5793                 }
5794                 break;
5795
5796         case DRM_PLANE_TYPE_OVERLAY:
5797                 for (i = 0; i < ARRAY_SIZE(overlay_formats); ++i) {
5798                         if (num_formats >= max_formats)
5799                                 break;
5800
5801                         formats[num_formats++] = overlay_formats[i];
5802                 }
5803                 break;
5804
5805         case DRM_PLANE_TYPE_CURSOR:
5806                 for (i = 0; i < ARRAY_SIZE(cursor_formats); ++i) {
5807                         if (num_formats >= max_formats)
5808                                 break;
5809
5810                         formats[num_formats++] = cursor_formats[i];
5811                 }
5812                 break;
5813         }
5814
5815         return num_formats;
5816 }
5817
5818 static int amdgpu_dm_plane_init(struct amdgpu_display_manager *dm,
5819                                 struct drm_plane *plane,
5820                                 unsigned long possible_crtcs,
5821                                 const struct dc_plane_cap *plane_cap)
5822 {
5823         uint32_t formats[32];
5824         int num_formats;
5825         int res = -EPERM;
5826
5827         num_formats = get_plane_formats(plane, plane_cap, formats,
5828                                         ARRAY_SIZE(formats));
5829
5830         res = drm_universal_plane_init(dm->adev->ddev, plane, possible_crtcs,
5831                                        &dm_plane_funcs, formats, num_formats,
5832                                        NULL, plane->type, NULL);
5833         if (res)
5834                 return res;
5835
5836         if (plane->type == DRM_PLANE_TYPE_OVERLAY &&
5837             plane_cap && plane_cap->per_pixel_alpha) {
5838                 unsigned int blend_caps = BIT(DRM_MODE_BLEND_PIXEL_NONE) |
5839                                           BIT(DRM_MODE_BLEND_PREMULTI);
5840
5841                 drm_plane_create_alpha_property(plane);
5842                 drm_plane_create_blend_mode_property(plane, blend_caps);
5843         }
5844
5845         if (plane->type == DRM_PLANE_TYPE_PRIMARY &&
5846             plane_cap &&
5847             (plane_cap->pixel_format_support.nv12 ||
5848              plane_cap->pixel_format_support.p010)) {
5849                 /* This only affects YUV formats. */
5850                 drm_plane_create_color_properties(
5851                         plane,
5852                         BIT(DRM_COLOR_YCBCR_BT601) |
5853                         BIT(DRM_COLOR_YCBCR_BT709) |
5854                         BIT(DRM_COLOR_YCBCR_BT2020),
5855                         BIT(DRM_COLOR_YCBCR_LIMITED_RANGE) |
5856                         BIT(DRM_COLOR_YCBCR_FULL_RANGE),
5857                         DRM_COLOR_YCBCR_BT709, DRM_COLOR_YCBCR_LIMITED_RANGE);
5858         }
5859
5860         drm_plane_helper_add(plane, &dm_plane_helper_funcs);
5861
5862         /* Create (reset) the plane state */
5863         if (plane->funcs->reset)
5864                 plane->funcs->reset(plane);
5865
5866         return 0;
5867 }
5868
5869 static int amdgpu_dm_crtc_init(struct amdgpu_display_manager *dm,
5870                                struct drm_plane *plane,
5871                                uint32_t crtc_index)
5872 {
5873         struct amdgpu_crtc *acrtc = NULL;
5874         struct drm_plane *cursor_plane;
5875
5876         int res = -ENOMEM;
5877
5878         cursor_plane = kzalloc(sizeof(*cursor_plane), GFP_KERNEL);
5879         if (!cursor_plane)
5880                 goto fail;
5881
5882         cursor_plane->type = DRM_PLANE_TYPE_CURSOR;
5883         res = amdgpu_dm_plane_init(dm, cursor_plane, 0, NULL);
5884
5885         acrtc = kzalloc(sizeof(struct amdgpu_crtc), GFP_KERNEL);
5886         if (!acrtc)
5887                 goto fail;
5888
5889         res = drm_crtc_init_with_planes(
5890                         dm->ddev,
5891                         &acrtc->base,
5892                         plane,
5893                         cursor_plane,
5894                         &amdgpu_dm_crtc_funcs, NULL);
5895
5896         if (res)
5897                 goto fail;
5898
5899         drm_crtc_helper_add(&acrtc->base, &amdgpu_dm_crtc_helper_funcs);
5900
5901         /* Create (reset) the plane state */
5902         if (acrtc->base.funcs->reset)
5903                 acrtc->base.funcs->reset(&acrtc->base);
5904
5905         acrtc->max_cursor_width = dm->adev->dm.dc->caps.max_cursor_size;
5906         acrtc->max_cursor_height = dm->adev->dm.dc->caps.max_cursor_size;
5907
5908         acrtc->crtc_id = crtc_index;
5909         acrtc->base.enabled = false;
5910         acrtc->otg_inst = -1;
5911
5912         dm->adev->mode_info.crtcs[crtc_index] = acrtc;
5913         drm_crtc_enable_color_mgmt(&acrtc->base, MAX_COLOR_LUT_ENTRIES,
5914                                    true, MAX_COLOR_LUT_ENTRIES);
5915         drm_mode_crtc_set_gamma_size(&acrtc->base, MAX_COLOR_LEGACY_LUT_ENTRIES);
5916
5917         return 0;
5918
5919 fail:
5920         kfree(acrtc);
5921         kfree(cursor_plane);
5922         return res;
5923 }
5924
5925
5926 static int to_drm_connector_type(enum signal_type st)
5927 {
5928         switch (st) {
5929         case SIGNAL_TYPE_HDMI_TYPE_A:
5930                 return DRM_MODE_CONNECTOR_HDMIA;
5931         case SIGNAL_TYPE_EDP:
5932                 return DRM_MODE_CONNECTOR_eDP;
5933         case SIGNAL_TYPE_LVDS:
5934                 return DRM_MODE_CONNECTOR_LVDS;
5935         case SIGNAL_TYPE_RGB:
5936                 return DRM_MODE_CONNECTOR_VGA;
5937         case SIGNAL_TYPE_DISPLAY_PORT:
5938         case SIGNAL_TYPE_DISPLAY_PORT_MST:
5939                 return DRM_MODE_CONNECTOR_DisplayPort;
5940         case SIGNAL_TYPE_DVI_DUAL_LINK:
5941         case SIGNAL_TYPE_DVI_SINGLE_LINK:
5942                 return DRM_MODE_CONNECTOR_DVID;
5943         case SIGNAL_TYPE_VIRTUAL:
5944                 return DRM_MODE_CONNECTOR_VIRTUAL;
5945
5946         default:
5947                 return DRM_MODE_CONNECTOR_Unknown;
5948         }
5949 }
5950
5951 static struct drm_encoder *amdgpu_dm_connector_to_encoder(struct drm_connector *connector)
5952 {
5953         struct drm_encoder *encoder;
5954
5955         /* There is only one encoder per connector */
5956         drm_connector_for_each_possible_encoder(connector, encoder)
5957                 return encoder;
5958
5959         return NULL;
5960 }
5961
5962 static void amdgpu_dm_get_native_mode(struct drm_connector *connector)
5963 {
5964         struct drm_encoder *encoder;
5965         struct amdgpu_encoder *amdgpu_encoder;
5966
5967         encoder = amdgpu_dm_connector_to_encoder(connector);
5968
5969         if (encoder == NULL)
5970                 return;
5971
5972         amdgpu_encoder = to_amdgpu_encoder(encoder);
5973
5974         amdgpu_encoder->native_mode.clock = 0;
5975
5976         if (!list_empty(&connector->probed_modes)) {
5977                 struct drm_display_mode *preferred_mode = NULL;
5978
5979                 list_for_each_entry(preferred_mode,
5980                                     &connector->probed_modes,
5981                                     head) {
5982                         if (preferred_mode->type & DRM_MODE_TYPE_PREFERRED)
5983                                 amdgpu_encoder->native_mode = *preferred_mode;
5984
5985                         break;
5986                 }
5987
5988         }
5989 }
5990
5991 static struct drm_display_mode *
5992 amdgpu_dm_create_common_mode(struct drm_encoder *encoder,
5993                              char *name,
5994                              int hdisplay, int vdisplay)
5995 {
5996         struct drm_device *dev = encoder->dev;
5997         struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
5998         struct drm_display_mode *mode = NULL;
5999         struct drm_display_mode *native_mode = &amdgpu_encoder->native_mode;
6000
6001         mode = drm_mode_duplicate(dev, native_mode);
6002
6003         if (mode == NULL)
6004                 return NULL;
6005
6006         mode->hdisplay = hdisplay;
6007         mode->vdisplay = vdisplay;
6008         mode->type &= ~DRM_MODE_TYPE_PREFERRED;
6009         strscpy(mode->name, name, DRM_DISPLAY_MODE_LEN);
6010
6011         return mode;
6012
6013 }
6014
6015 static void amdgpu_dm_connector_add_common_modes(struct drm_encoder *encoder,
6016                                                  struct drm_connector *connector)
6017 {
6018         struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
6019         struct drm_display_mode *mode = NULL;
6020         struct drm_display_mode *native_mode = &amdgpu_encoder->native_mode;
6021         struct amdgpu_dm_connector *amdgpu_dm_connector =
6022                                 to_amdgpu_dm_connector(connector);
6023         int i;
6024         int n;
6025         struct mode_size {
6026                 char name[DRM_DISPLAY_MODE_LEN];
6027                 int w;
6028                 int h;
6029         } common_modes[] = {
6030                 {  "640x480",  640,  480},
6031                 {  "800x600",  800,  600},
6032                 { "1024x768", 1024,  768},
6033                 { "1280x720", 1280,  720},
6034                 { "1280x800", 1280,  800},
6035                 {"1280x1024", 1280, 1024},
6036                 { "1440x900", 1440,  900},
6037                 {"1680x1050", 1680, 1050},
6038                 {"1600x1200", 1600, 1200},
6039                 {"1920x1080", 1920, 1080},
6040                 {"1920x1200", 1920, 1200}
6041         };
6042
6043         n = ARRAY_SIZE(common_modes);
6044
6045         for (i = 0; i < n; i++) {
6046                 struct drm_display_mode *curmode = NULL;
6047                 bool mode_existed = false;
6048
6049                 if (common_modes[i].w > native_mode->hdisplay ||
6050                     common_modes[i].h > native_mode->vdisplay ||
6051                    (common_modes[i].w == native_mode->hdisplay &&
6052                     common_modes[i].h == native_mode->vdisplay))
6053                         continue;
6054
6055                 list_for_each_entry(curmode, &connector->probed_modes, head) {
6056                         if (common_modes[i].w == curmode->hdisplay &&
6057                             common_modes[i].h == curmode->vdisplay) {
6058                                 mode_existed = true;
6059                                 break;
6060                         }
6061                 }
6062
6063                 if (mode_existed)
6064                         continue;
6065
6066                 mode = amdgpu_dm_create_common_mode(encoder,
6067                                 common_modes[i].name, common_modes[i].w,
6068                                 common_modes[i].h);
6069                 drm_mode_probed_add(connector, mode);
6070                 amdgpu_dm_connector->num_modes++;
6071         }
6072 }
6073
6074 static void amdgpu_dm_connector_ddc_get_modes(struct drm_connector *connector,
6075                                               struct edid *edid)
6076 {
6077         struct amdgpu_dm_connector *amdgpu_dm_connector =
6078                         to_amdgpu_dm_connector(connector);
6079
6080         if (edid) {
6081                 /* empty probed_modes */
6082                 INIT_LIST_HEAD(&connector->probed_modes);
6083                 amdgpu_dm_connector->num_modes =
6084                                 drm_add_edid_modes(connector, edid);
6085
6086                 /* sorting the probed modes before calling function
6087                  * amdgpu_dm_get_native_mode() since EDID can have
6088                  * more than one preferred mode. The modes that are
6089                  * later in the probed mode list could be of higher
6090                  * and preferred resolution. For example, 3840x2160
6091                  * resolution in base EDID preferred timing and 4096x2160
6092                  * preferred resolution in DID extension block later.
6093                  */
6094                 drm_mode_sort(&connector->probed_modes);
6095                 amdgpu_dm_get_native_mode(connector);
6096         } else {
6097                 amdgpu_dm_connector->num_modes = 0;
6098         }
6099 }
6100
6101 static int amdgpu_dm_connector_get_modes(struct drm_connector *connector)
6102 {
6103         struct amdgpu_dm_connector *amdgpu_dm_connector =
6104                         to_amdgpu_dm_connector(connector);
6105         struct drm_encoder *encoder;
6106         struct edid *edid = amdgpu_dm_connector->edid;
6107
6108         encoder = amdgpu_dm_connector_to_encoder(connector);
6109
6110         if (!edid || !drm_edid_is_valid(edid)) {
6111                 amdgpu_dm_connector->num_modes =
6112                                 drm_add_modes_noedid(connector, 640, 480);
6113         } else {
6114                 amdgpu_dm_connector_ddc_get_modes(connector, edid);
6115                 amdgpu_dm_connector_add_common_modes(encoder, connector);
6116         }
6117         amdgpu_dm_fbc_init(connector);
6118
6119         return amdgpu_dm_connector->num_modes;
6120 }
6121
6122 void amdgpu_dm_connector_init_helper(struct amdgpu_display_manager *dm,
6123                                      struct amdgpu_dm_connector *aconnector,
6124                                      int connector_type,
6125                                      struct dc_link *link,
6126                                      int link_index)
6127 {
6128         struct amdgpu_device *adev = dm->ddev->dev_private;
6129
6130         /*
6131          * Some of the properties below require access to state, like bpc.
6132          * Allocate some default initial connector state with our reset helper.
6133          */
6134         if (aconnector->base.funcs->reset)
6135                 aconnector->base.funcs->reset(&aconnector->base);
6136
6137         aconnector->connector_id = link_index;
6138         aconnector->dc_link = link;
6139         aconnector->base.interlace_allowed = false;
6140         aconnector->base.doublescan_allowed = false;
6141         aconnector->base.stereo_allowed = false;
6142         aconnector->base.dpms = DRM_MODE_DPMS_OFF;
6143         aconnector->hpd.hpd = AMDGPU_HPD_NONE; /* not used */
6144         aconnector->audio_inst = -1;
6145         mutex_init(&aconnector->hpd_lock);
6146
6147         /*
6148          * configure support HPD hot plug connector_>polled default value is 0
6149          * which means HPD hot plug not supported
6150          */
6151         switch (connector_type) {
6152         case DRM_MODE_CONNECTOR_HDMIA:
6153                 aconnector->base.polled = DRM_CONNECTOR_POLL_HPD;
6154                 aconnector->base.ycbcr_420_allowed =
6155                         link->link_enc->features.hdmi_ycbcr420_supported ? true : false;
6156                 break;
6157         case DRM_MODE_CONNECTOR_DisplayPort:
6158                 aconnector->base.polled = DRM_CONNECTOR_POLL_HPD;
6159                 aconnector->base.ycbcr_420_allowed =
6160                         link->link_enc->features.dp_ycbcr420_supported ? true : false;
6161                 break;
6162         case DRM_MODE_CONNECTOR_DVID:
6163                 aconnector->base.polled = DRM_CONNECTOR_POLL_HPD;
6164                 break;
6165         default:
6166                 break;
6167         }
6168
6169         drm_object_attach_property(&aconnector->base.base,
6170                                 dm->ddev->mode_config.scaling_mode_property,
6171                                 DRM_MODE_SCALE_NONE);
6172
6173         drm_object_attach_property(&aconnector->base.base,
6174                                 adev->mode_info.underscan_property,
6175                                 UNDERSCAN_OFF);
6176         drm_object_attach_property(&aconnector->base.base,
6177                                 adev->mode_info.underscan_hborder_property,
6178                                 0);
6179         drm_object_attach_property(&aconnector->base.base,
6180                                 adev->mode_info.underscan_vborder_property,
6181                                 0);
6182
6183         if (!aconnector->mst_port)
6184                 drm_connector_attach_max_bpc_property(&aconnector->base, 8, 16);
6185
6186         /* This defaults to the max in the range, but we want 8bpc for non-edp. */
6187         aconnector->base.state->max_bpc = (connector_type == DRM_MODE_CONNECTOR_eDP) ? 16 : 8;
6188         aconnector->base.state->max_requested_bpc = aconnector->base.state->max_bpc;
6189
6190         if (connector_type == DRM_MODE_CONNECTOR_eDP &&
6191             dc_is_dmcu_initialized(adev->dm.dc)) {
6192                 drm_object_attach_property(&aconnector->base.base,
6193                                 adev->mode_info.abm_level_property, 0);
6194         }
6195
6196         if (connector_type == DRM_MODE_CONNECTOR_HDMIA ||
6197             connector_type == DRM_MODE_CONNECTOR_DisplayPort ||
6198             connector_type == DRM_MODE_CONNECTOR_eDP) {
6199                 drm_object_attach_property(
6200                         &aconnector->base.base,
6201                         dm->ddev->mode_config.hdr_output_metadata_property, 0);
6202
6203                 if (!aconnector->mst_port)
6204                         drm_connector_attach_vrr_capable_property(&aconnector->base);
6205
6206 #ifdef CONFIG_DRM_AMD_DC_HDCP
6207                 if (adev->dm.hdcp_workqueue)
6208                         drm_connector_attach_content_protection_property(&aconnector->base, true);
6209 #endif
6210         }
6211 }
6212
6213 static int amdgpu_dm_i2c_xfer(struct i2c_adapter *i2c_adap,
6214                               struct i2c_msg *msgs, int num)
6215 {
6216         struct amdgpu_i2c_adapter *i2c = i2c_get_adapdata(i2c_adap);
6217         struct ddc_service *ddc_service = i2c->ddc_service;
6218         struct i2c_command cmd;
6219         int i;
6220         int result = -EIO;
6221
6222         cmd.payloads = kcalloc(num, sizeof(struct i2c_payload), GFP_KERNEL);
6223
6224         if (!cmd.payloads)
6225                 return result;
6226
6227         cmd.number_of_payloads = num;
6228         cmd.engine = I2C_COMMAND_ENGINE_DEFAULT;
6229         cmd.speed = 100;
6230
6231         for (i = 0; i < num; i++) {
6232                 cmd.payloads[i].write = !(msgs[i].flags & I2C_M_RD);
6233                 cmd.payloads[i].address = msgs[i].addr;
6234                 cmd.payloads[i].length = msgs[i].len;
6235                 cmd.payloads[i].data = msgs[i].buf;
6236         }
6237
6238         if (dc_submit_i2c(
6239                         ddc_service->ctx->dc,
6240                         ddc_service->ddc_pin->hw_info.ddc_channel,
6241                         &cmd))
6242                 result = num;
6243
6244         kfree(cmd.payloads);
6245         return result;
6246 }
6247
6248 static u32 amdgpu_dm_i2c_func(struct i2c_adapter *adap)
6249 {
6250         return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL;
6251 }
6252
6253 static const struct i2c_algorithm amdgpu_dm_i2c_algo = {
6254         .master_xfer = amdgpu_dm_i2c_xfer,
6255         .functionality = amdgpu_dm_i2c_func,
6256 };
6257
6258 static struct amdgpu_i2c_adapter *
6259 create_i2c(struct ddc_service *ddc_service,
6260            int link_index,
6261            int *res)
6262 {
6263         struct amdgpu_device *adev = ddc_service->ctx->driver_context;
6264         struct amdgpu_i2c_adapter *i2c;
6265
6266         i2c = kzalloc(sizeof(struct amdgpu_i2c_adapter), GFP_KERNEL);
6267         if (!i2c)
6268                 return NULL;
6269         i2c->base.owner = THIS_MODULE;
6270         i2c->base.class = I2C_CLASS_DDC;
6271         i2c->base.dev.parent = &adev->pdev->dev;
6272         i2c->base.algo = &amdgpu_dm_i2c_algo;
6273         snprintf(i2c->base.name, sizeof(i2c->base.name), "AMDGPU DM i2c hw bus %d", link_index);
6274         i2c_set_adapdata(&i2c->base, i2c);
6275         i2c->ddc_service = ddc_service;
6276         i2c->ddc_service->ddc_pin->hw_info.ddc_channel = link_index;
6277
6278         return i2c;
6279 }
6280
6281
6282 /*
6283  * Note: this function assumes that dc_link_detect() was called for the
6284  * dc_link which will be represented by this aconnector.
6285  */
6286 static int amdgpu_dm_connector_init(struct amdgpu_display_manager *dm,
6287                                     struct amdgpu_dm_connector *aconnector,
6288                                     uint32_t link_index,
6289                                     struct amdgpu_encoder *aencoder)
6290 {
6291         int res = 0;
6292         int connector_type;
6293         struct dc *dc = dm->dc;
6294         struct dc_link *link = dc_get_link_at_index(dc, link_index);
6295         struct amdgpu_i2c_adapter *i2c;
6296
6297         link->priv = aconnector;
6298
6299         DRM_DEBUG_DRIVER("%s()\n", __func__);
6300
6301         i2c = create_i2c(link->ddc, link->link_index, &res);
6302         if (!i2c) {
6303                 DRM_ERROR("Failed to create i2c adapter data\n");
6304                 return -ENOMEM;
6305         }
6306
6307         aconnector->i2c = i2c;
6308         res = i2c_add_adapter(&i2c->base);
6309
6310         if (res) {
6311                 DRM_ERROR("Failed to register hw i2c %d\n", link->link_index);
6312                 goto out_free;
6313         }
6314
6315         connector_type = to_drm_connector_type(link->connector_signal);
6316
6317         res = drm_connector_init_with_ddc(
6318                         dm->ddev,
6319                         &aconnector->base,
6320                         &amdgpu_dm_connector_funcs,
6321                         connector_type,
6322                         &i2c->base);
6323
6324         if (res) {
6325                 DRM_ERROR("connector_init failed\n");
6326                 aconnector->connector_id = -1;
6327                 goto out_free;
6328         }
6329
6330         drm_connector_helper_add(
6331                         &aconnector->base,
6332                         &amdgpu_dm_connector_helper_funcs);
6333
6334         amdgpu_dm_connector_init_helper(
6335                 dm,
6336                 aconnector,
6337                 connector_type,
6338                 link,
6339                 link_index);
6340
6341         drm_connector_attach_encoder(
6342                 &aconnector->base, &aencoder->base);
6343
6344         if (connector_type == DRM_MODE_CONNECTOR_DisplayPort
6345                 || connector_type == DRM_MODE_CONNECTOR_eDP)
6346                 amdgpu_dm_initialize_dp_connector(dm, aconnector, link->link_index);
6347
6348 out_free:
6349         if (res) {
6350                 kfree(i2c);
6351                 aconnector->i2c = NULL;
6352         }
6353         return res;
6354 }
6355
6356 int amdgpu_dm_get_encoder_crtc_mask(struct amdgpu_device *adev)
6357 {
6358         switch (adev->mode_info.num_crtc) {
6359         case 1:
6360                 return 0x1;
6361         case 2:
6362                 return 0x3;
6363         case 3:
6364                 return 0x7;
6365         case 4:
6366                 return 0xf;
6367         case 5:
6368                 return 0x1f;
6369         case 6:
6370         default:
6371                 return 0x3f;
6372         }
6373 }
6374
6375 static int amdgpu_dm_encoder_init(struct drm_device *dev,
6376                                   struct amdgpu_encoder *aencoder,
6377                                   uint32_t link_index)
6378 {
6379         struct amdgpu_device *adev = dev->dev_private;
6380
6381         int res = drm_encoder_init(dev,
6382                                    &aencoder->base,
6383                                    &amdgpu_dm_encoder_funcs,
6384                                    DRM_MODE_ENCODER_TMDS,
6385                                    NULL);
6386
6387         aencoder->base.possible_crtcs = amdgpu_dm_get_encoder_crtc_mask(adev);
6388
6389         if (!res)
6390                 aencoder->encoder_id = link_index;
6391         else
6392                 aencoder->encoder_id = -1;
6393
6394         drm_encoder_helper_add(&aencoder->base, &amdgpu_dm_encoder_helper_funcs);
6395
6396         return res;
6397 }
6398
6399 static void manage_dm_interrupts(struct amdgpu_device *adev,
6400                                  struct amdgpu_crtc *acrtc,
6401                                  bool enable)
6402 {
6403         /*
6404          * this is not correct translation but will work as soon as VBLANK
6405          * constant is the same as PFLIP
6406          */
6407         int irq_type =
6408                 amdgpu_display_crtc_idx_to_irq_type(
6409                         adev,
6410                         acrtc->crtc_id);
6411
6412         if (enable) {
6413                 drm_crtc_vblank_on(&acrtc->base);
6414                 amdgpu_irq_get(
6415                         adev,
6416                         &adev->pageflip_irq,
6417                         irq_type);
6418         } else {
6419
6420                 amdgpu_irq_put(
6421                         adev,
6422                         &adev->pageflip_irq,
6423                         irq_type);
6424                 drm_crtc_vblank_off(&acrtc->base);
6425         }
6426 }
6427
6428 static bool
6429 is_scaling_state_different(const struct dm_connector_state *dm_state,
6430                            const struct dm_connector_state *old_dm_state)
6431 {
6432         if (dm_state->scaling != old_dm_state->scaling)
6433                 return true;
6434         if (!dm_state->underscan_enable && old_dm_state->underscan_enable) {
6435                 if (old_dm_state->underscan_hborder != 0 && old_dm_state->underscan_vborder != 0)
6436                         return true;
6437         } else  if (dm_state->underscan_enable && !old_dm_state->underscan_enable) {
6438                 if (dm_state->underscan_hborder != 0 && dm_state->underscan_vborder != 0)
6439                         return true;
6440         } else if (dm_state->underscan_hborder != old_dm_state->underscan_hborder ||
6441                    dm_state->underscan_vborder != old_dm_state->underscan_vborder)
6442                 return true;
6443         return false;
6444 }
6445
6446 #ifdef CONFIG_DRM_AMD_DC_HDCP
6447 static bool is_content_protection_different(struct drm_connector_state *state,
6448                                             const struct drm_connector_state *old_state,
6449                                             const struct drm_connector *connector, struct hdcp_workqueue *hdcp_w)
6450 {
6451         struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
6452
6453         if (old_state->hdcp_content_type != state->hdcp_content_type &&
6454             state->content_protection != DRM_MODE_CONTENT_PROTECTION_UNDESIRED) {
6455                 state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED;
6456                 return true;
6457         }
6458
6459         /* CP is being re enabled, ignore this */
6460         if (old_state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED &&
6461             state->content_protection == DRM_MODE_CONTENT_PROTECTION_DESIRED) {
6462                 state->content_protection = DRM_MODE_CONTENT_PROTECTION_ENABLED;
6463                 return false;
6464         }
6465
6466         /* S3 resume case, since old state will always be 0 (UNDESIRED) and the restored state will be ENABLED */
6467         if (old_state->content_protection == DRM_MODE_CONTENT_PROTECTION_UNDESIRED &&
6468             state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED)
6469                 state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED;
6470
6471         /* Check if something is connected/enabled, otherwise we start hdcp but nothing is connected/enabled
6472          * hot-plug, headless s3, dpms
6473          */
6474         if (state->content_protection == DRM_MODE_CONTENT_PROTECTION_DESIRED && connector->dpms == DRM_MODE_DPMS_ON &&
6475             aconnector->dc_sink != NULL)
6476                 return true;
6477
6478         if (old_state->content_protection == state->content_protection)
6479                 return false;
6480
6481         if (state->content_protection == DRM_MODE_CONTENT_PROTECTION_UNDESIRED)
6482                 return true;
6483
6484         return false;
6485 }
6486
6487 #endif
6488 static void remove_stream(struct amdgpu_device *adev,
6489                           struct amdgpu_crtc *acrtc,
6490                           struct dc_stream_state *stream)
6491 {
6492         /* this is the update mode case */
6493
6494         acrtc->otg_inst = -1;
6495         acrtc->enabled = false;
6496 }
6497
6498 static int get_cursor_position(struct drm_plane *plane, struct drm_crtc *crtc,
6499                                struct dc_cursor_position *position)
6500 {
6501         struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
6502         int x, y;
6503         int xorigin = 0, yorigin = 0;
6504
6505         position->enable = false;
6506         position->x = 0;
6507         position->y = 0;
6508
6509         if (!crtc || !plane->state->fb)
6510                 return 0;
6511
6512         if ((plane->state->crtc_w > amdgpu_crtc->max_cursor_width) ||
6513             (plane->state->crtc_h > amdgpu_crtc->max_cursor_height)) {
6514                 DRM_ERROR("%s: bad cursor width or height %d x %d\n",
6515                           __func__,
6516                           plane->state->crtc_w,
6517                           plane->state->crtc_h);
6518                 return -EINVAL;
6519         }
6520
6521         x = plane->state->crtc_x;
6522         y = plane->state->crtc_y;
6523
6524         if (x <= -amdgpu_crtc->max_cursor_width ||
6525             y <= -amdgpu_crtc->max_cursor_height)
6526                 return 0;
6527
6528         if (x < 0) {
6529                 xorigin = min(-x, amdgpu_crtc->max_cursor_width - 1);
6530                 x = 0;
6531         }
6532         if (y < 0) {
6533                 yorigin = min(-y, amdgpu_crtc->max_cursor_height - 1);
6534                 y = 0;
6535         }
6536         position->enable = true;
6537         position->translate_by_source = true;
6538         position->x = x;
6539         position->y = y;
6540         position->x_hotspot = xorigin;
6541         position->y_hotspot = yorigin;
6542
6543         return 0;
6544 }
6545
6546 static void handle_cursor_update(struct drm_plane *plane,
6547                                  struct drm_plane_state *old_plane_state)
6548 {
6549         struct amdgpu_device *adev = plane->dev->dev_private;
6550         struct amdgpu_framebuffer *afb = to_amdgpu_framebuffer(plane->state->fb);
6551         struct drm_crtc *crtc = afb ? plane->state->crtc : old_plane_state->crtc;
6552         struct dm_crtc_state *crtc_state = crtc ? to_dm_crtc_state(crtc->state) : NULL;
6553         struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
6554         uint64_t address = afb ? afb->address : 0;
6555         struct dc_cursor_position position;
6556         struct dc_cursor_attributes attributes;
6557         int ret;
6558
6559         if (!plane->state->fb && !old_plane_state->fb)
6560                 return;
6561
6562         DRM_DEBUG_DRIVER("%s: crtc_id=%d with size %d to %d\n",
6563                          __func__,
6564                          amdgpu_crtc->crtc_id,
6565                          plane->state->crtc_w,
6566                          plane->state->crtc_h);
6567
6568         ret = get_cursor_position(plane, crtc, &position);
6569         if (ret)
6570                 return;
6571
6572         if (!position.enable) {
6573                 /* turn off cursor */
6574                 if (crtc_state && crtc_state->stream) {
6575                         mutex_lock(&adev->dm.dc_lock);
6576                         dc_stream_set_cursor_position(crtc_state->stream,
6577                                                       &position);
6578                         mutex_unlock(&adev->dm.dc_lock);
6579                 }
6580                 return;
6581         }
6582
6583         amdgpu_crtc->cursor_width = plane->state->crtc_w;
6584         amdgpu_crtc->cursor_height = plane->state->crtc_h;
6585
6586         memset(&attributes, 0, sizeof(attributes));
6587         attributes.address.high_part = upper_32_bits(address);
6588         attributes.address.low_part  = lower_32_bits(address);
6589         attributes.width             = plane->state->crtc_w;
6590         attributes.height            = plane->state->crtc_h;
6591         attributes.color_format      = CURSOR_MODE_COLOR_PRE_MULTIPLIED_ALPHA;
6592         attributes.rotation_angle    = 0;
6593         attributes.attribute_flags.value = 0;
6594
6595         attributes.pitch = attributes.width;
6596
6597         if (crtc_state->stream) {
6598                 mutex_lock(&adev->dm.dc_lock);
6599                 if (!dc_stream_set_cursor_attributes(crtc_state->stream,
6600                                                          &attributes))
6601                         DRM_ERROR("DC failed to set cursor attributes\n");
6602
6603                 if (!dc_stream_set_cursor_position(crtc_state->stream,
6604                                                    &position))
6605                         DRM_ERROR("DC failed to set cursor position\n");
6606                 mutex_unlock(&adev->dm.dc_lock);
6607         }
6608 }
6609
6610 static void prepare_flip_isr(struct amdgpu_crtc *acrtc)
6611 {
6612
6613         assert_spin_locked(&acrtc->base.dev->event_lock);
6614         WARN_ON(acrtc->event);
6615
6616         acrtc->event = acrtc->base.state->event;
6617
6618         /* Set the flip status */
6619         acrtc->pflip_status = AMDGPU_FLIP_SUBMITTED;
6620
6621         /* Mark this event as consumed */
6622         acrtc->base.state->event = NULL;
6623
6624         DRM_DEBUG_DRIVER("crtc:%d, pflip_stat:AMDGPU_FLIP_SUBMITTED\n",
6625                                                  acrtc->crtc_id);
6626 }
6627
6628 static void update_freesync_state_on_stream(
6629         struct amdgpu_display_manager *dm,
6630         struct dm_crtc_state *new_crtc_state,
6631         struct dc_stream_state *new_stream,
6632         struct dc_plane_state *surface,
6633         u32 flip_timestamp_in_us)
6634 {
6635         struct mod_vrr_params vrr_params;
6636         struct dc_info_packet vrr_infopacket = {0};
6637         struct amdgpu_device *adev = dm->adev;
6638         unsigned long flags;
6639
6640         if (!new_stream)
6641                 return;
6642
6643         /*
6644          * TODO: Determine why min/max totals and vrefresh can be 0 here.
6645          * For now it's sufficient to just guard against these conditions.
6646          */
6647
6648         if (!new_stream->timing.h_total || !new_stream->timing.v_total)
6649                 return;
6650
6651         spin_lock_irqsave(&adev->ddev->event_lock, flags);
6652         vrr_params = new_crtc_state->vrr_params;
6653
6654         if (surface) {
6655                 mod_freesync_handle_preflip(
6656                         dm->freesync_module,
6657                         surface,
6658                         new_stream,
6659                         flip_timestamp_in_us,
6660                         &vrr_params);
6661
6662                 if (adev->family < AMDGPU_FAMILY_AI &&
6663                     amdgpu_dm_vrr_active(new_crtc_state)) {
6664                         mod_freesync_handle_v_update(dm->freesync_module,
6665                                                      new_stream, &vrr_params);
6666
6667                         /* Need to call this before the frame ends. */
6668                         dc_stream_adjust_vmin_vmax(dm->dc,
6669                                                    new_crtc_state->stream,
6670                                                    &vrr_params.adjust);
6671                 }
6672         }
6673
6674         mod_freesync_build_vrr_infopacket(
6675                 dm->freesync_module,
6676                 new_stream,
6677                 &vrr_params,
6678                 PACKET_TYPE_VRR,
6679                 TRANSFER_FUNC_UNKNOWN,
6680                 &vrr_infopacket);
6681
6682         new_crtc_state->freesync_timing_changed |=
6683                 (memcmp(&new_crtc_state->vrr_params.adjust,
6684                         &vrr_params.adjust,
6685                         sizeof(vrr_params.adjust)) != 0);
6686
6687         new_crtc_state->freesync_vrr_info_changed |=
6688                 (memcmp(&new_crtc_state->vrr_infopacket,
6689                         &vrr_infopacket,
6690                         sizeof(vrr_infopacket)) != 0);
6691
6692         new_crtc_state->vrr_params = vrr_params;
6693         new_crtc_state->vrr_infopacket = vrr_infopacket;
6694
6695         new_stream->adjust = new_crtc_state->vrr_params.adjust;
6696         new_stream->vrr_infopacket = vrr_infopacket;
6697
6698         if (new_crtc_state->freesync_vrr_info_changed)
6699                 DRM_DEBUG_KMS("VRR packet update: crtc=%u enabled=%d state=%d",
6700                               new_crtc_state->base.crtc->base.id,
6701                               (int)new_crtc_state->base.vrr_enabled,
6702                               (int)vrr_params.state);
6703
6704         spin_unlock_irqrestore(&adev->ddev->event_lock, flags);
6705 }
6706
6707 static void pre_update_freesync_state_on_stream(
6708         struct amdgpu_display_manager *dm,
6709         struct dm_crtc_state *new_crtc_state)
6710 {
6711         struct dc_stream_state *new_stream = new_crtc_state->stream;
6712         struct mod_vrr_params vrr_params;
6713         struct mod_freesync_config config = new_crtc_state->freesync_config;
6714         struct amdgpu_device *adev = dm->adev;
6715         unsigned long flags;
6716
6717         if (!new_stream)
6718                 return;
6719
6720         /*
6721          * TODO: Determine why min/max totals and vrefresh can be 0 here.
6722          * For now it's sufficient to just guard against these conditions.
6723          */
6724         if (!new_stream->timing.h_total || !new_stream->timing.v_total)
6725                 return;
6726
6727         spin_lock_irqsave(&adev->ddev->event_lock, flags);
6728         vrr_params = new_crtc_state->vrr_params;
6729
6730         if (new_crtc_state->vrr_supported &&
6731             config.min_refresh_in_uhz &&
6732             config.max_refresh_in_uhz) {
6733                 config.state = new_crtc_state->base.vrr_enabled ?
6734                         VRR_STATE_ACTIVE_VARIABLE :
6735                         VRR_STATE_INACTIVE;
6736         } else {
6737                 config.state = VRR_STATE_UNSUPPORTED;
6738         }
6739
6740         mod_freesync_build_vrr_params(dm->freesync_module,
6741                                       new_stream,
6742                                       &config, &vrr_params);
6743
6744         new_crtc_state->freesync_timing_changed |=
6745                 (memcmp(&new_crtc_state->vrr_params.adjust,
6746                         &vrr_params.adjust,
6747                         sizeof(vrr_params.adjust)) != 0);
6748
6749         new_crtc_state->vrr_params = vrr_params;
6750         spin_unlock_irqrestore(&adev->ddev->event_lock, flags);
6751 }
6752
6753 static void amdgpu_dm_handle_vrr_transition(struct dm_crtc_state *old_state,
6754                                             struct dm_crtc_state *new_state)
6755 {
6756         bool old_vrr_active = amdgpu_dm_vrr_active(old_state);
6757         bool new_vrr_active = amdgpu_dm_vrr_active(new_state);
6758
6759         if (!old_vrr_active && new_vrr_active) {
6760                 /* Transition VRR inactive -> active:
6761                  * While VRR is active, we must not disable vblank irq, as a
6762                  * reenable after disable would compute bogus vblank/pflip
6763                  * timestamps if it likely happened inside display front-porch.
6764                  *
6765                  * We also need vupdate irq for the actual core vblank handling
6766                  * at end of vblank.
6767                  */
6768                 dm_set_vupdate_irq(new_state->base.crtc, true);
6769                 drm_crtc_vblank_get(new_state->base.crtc);
6770                 DRM_DEBUG_DRIVER("%s: crtc=%u VRR off->on: Get vblank ref\n",
6771                                  __func__, new_state->base.crtc->base.id);
6772         } else if (old_vrr_active && !new_vrr_active) {
6773                 /* Transition VRR active -> inactive:
6774                  * Allow vblank irq disable again for fixed refresh rate.
6775                  */
6776                 dm_set_vupdate_irq(new_state->base.crtc, false);
6777                 drm_crtc_vblank_put(new_state->base.crtc);
6778                 DRM_DEBUG_DRIVER("%s: crtc=%u VRR on->off: Drop vblank ref\n",
6779                                  __func__, new_state->base.crtc->base.id);
6780         }
6781 }
6782
6783 static void amdgpu_dm_commit_cursors(struct drm_atomic_state *state)
6784 {
6785         struct drm_plane *plane;
6786         struct drm_plane_state *old_plane_state, *new_plane_state;
6787         int i;
6788
6789         /*
6790          * TODO: Make this per-stream so we don't issue redundant updates for
6791          * commits with multiple streams.
6792          */
6793         for_each_oldnew_plane_in_state(state, plane, old_plane_state,
6794                                        new_plane_state, i)
6795                 if (plane->type == DRM_PLANE_TYPE_CURSOR)
6796                         handle_cursor_update(plane, old_plane_state);
6797 }
6798
6799 static void amdgpu_dm_commit_planes(struct drm_atomic_state *state,
6800                                     struct dc_state *dc_state,
6801                                     struct drm_device *dev,
6802                                     struct amdgpu_display_manager *dm,
6803                                     struct drm_crtc *pcrtc,
6804                                     bool wait_for_vblank)
6805 {
6806         uint32_t i;
6807         uint64_t timestamp_ns;
6808         struct drm_plane *plane;
6809         struct drm_plane_state *old_plane_state, *new_plane_state;
6810         struct amdgpu_crtc *acrtc_attach = to_amdgpu_crtc(pcrtc);
6811         struct drm_crtc_state *new_pcrtc_state =
6812                         drm_atomic_get_new_crtc_state(state, pcrtc);
6813         struct dm_crtc_state *acrtc_state = to_dm_crtc_state(new_pcrtc_state);
6814         struct dm_crtc_state *dm_old_crtc_state =
6815                         to_dm_crtc_state(drm_atomic_get_old_crtc_state(state, pcrtc));
6816         int planes_count = 0, vpos, hpos;
6817         long r;
6818         unsigned long flags;
6819         struct amdgpu_bo *abo;
6820         uint64_t tiling_flags;
6821         bool tmz_surface = false;
6822         uint32_t target_vblank, last_flip_vblank;
6823         bool vrr_active = amdgpu_dm_vrr_active(acrtc_state);
6824         bool pflip_present = false;
6825         struct {
6826                 struct dc_surface_update surface_updates[MAX_SURFACES];
6827                 struct dc_plane_info plane_infos[MAX_SURFACES];
6828                 struct dc_scaling_info scaling_infos[MAX_SURFACES];
6829                 struct dc_flip_addrs flip_addrs[MAX_SURFACES];
6830                 struct dc_stream_update stream_update;
6831         } *bundle;
6832
6833         bundle = kzalloc(sizeof(*bundle), GFP_KERNEL);
6834
6835         if (!bundle) {
6836                 dm_error("Failed to allocate update bundle\n");
6837                 goto cleanup;
6838         }
6839
6840         /*
6841          * Disable the cursor first if we're disabling all the planes.
6842          * It'll remain on the screen after the planes are re-enabled
6843          * if we don't.
6844          */
6845         if (acrtc_state->active_planes == 0)
6846                 amdgpu_dm_commit_cursors(state);
6847
6848         /* update planes when needed */
6849         for_each_oldnew_plane_in_state(state, plane, old_plane_state, new_plane_state, i) {
6850                 struct drm_crtc *crtc = new_plane_state->crtc;
6851                 struct drm_crtc_state *new_crtc_state;
6852                 struct drm_framebuffer *fb = new_plane_state->fb;
6853                 bool plane_needs_flip;
6854                 struct dc_plane_state *dc_plane;
6855                 struct dm_plane_state *dm_new_plane_state = to_dm_plane_state(new_plane_state);
6856
6857                 /* Cursor plane is handled after stream updates */
6858                 if (plane->type == DRM_PLANE_TYPE_CURSOR)
6859                         continue;
6860
6861                 if (!fb || !crtc || pcrtc != crtc)
6862                         continue;
6863
6864                 new_crtc_state = drm_atomic_get_new_crtc_state(state, crtc);
6865                 if (!new_crtc_state->active)
6866                         continue;
6867
6868                 dc_plane = dm_new_plane_state->dc_state;
6869
6870                 bundle->surface_updates[planes_count].surface = dc_plane;
6871                 if (new_pcrtc_state->color_mgmt_changed) {
6872                         bundle->surface_updates[planes_count].gamma = dc_plane->gamma_correction;
6873                         bundle->surface_updates[planes_count].in_transfer_func = dc_plane->in_transfer_func;
6874                         bundle->surface_updates[planes_count].gamut_remap_matrix = &dc_plane->gamut_remap_matrix;
6875                 }
6876
6877                 fill_dc_scaling_info(new_plane_state,
6878                                      &bundle->scaling_infos[planes_count]);
6879
6880                 bundle->surface_updates[planes_count].scaling_info =
6881                         &bundle->scaling_infos[planes_count];
6882
6883                 plane_needs_flip = old_plane_state->fb && new_plane_state->fb;
6884
6885                 pflip_present = pflip_present || plane_needs_flip;
6886
6887                 if (!plane_needs_flip) {
6888                         planes_count += 1;
6889                         continue;
6890                 }
6891
6892                 abo = gem_to_amdgpu_bo(fb->obj[0]);
6893
6894                 /*
6895                  * Wait for all fences on this FB. Do limited wait to avoid
6896                  * deadlock during GPU reset when this fence will not signal
6897                  * but we hold reservation lock for the BO.
6898                  */
6899                 r = dma_resv_wait_timeout_rcu(abo->tbo.base.resv, true,
6900                                                         false,
6901                                                         msecs_to_jiffies(5000));
6902                 if (unlikely(r <= 0))
6903                         DRM_ERROR("Waiting for fences timed out!");
6904
6905                 /*
6906                  * TODO This might fail and hence better not used, wait
6907                  * explicitly on fences instead
6908                  * and in general should be called for
6909                  * blocking commit to as per framework helpers
6910                  */
6911                 r = amdgpu_bo_reserve(abo, true);
6912                 if (unlikely(r != 0))
6913                         DRM_ERROR("failed to reserve buffer before flip\n");
6914
6915                 amdgpu_bo_get_tiling_flags(abo, &tiling_flags);
6916
6917                 tmz_surface = amdgpu_bo_encrypted(abo);
6918
6919                 amdgpu_bo_unreserve(abo);
6920
6921                 fill_dc_plane_info_and_addr(
6922                         dm->adev, new_plane_state, tiling_flags,
6923                         &bundle->plane_infos[planes_count],
6924                         &bundle->flip_addrs[planes_count].address,
6925                         tmz_surface,
6926                         false);
6927
6928                 DRM_DEBUG_DRIVER("plane: id=%d dcc_en=%d\n",
6929                                  new_plane_state->plane->index,
6930                                  bundle->plane_infos[planes_count].dcc.enable);
6931
6932                 bundle->surface_updates[planes_count].plane_info =
6933                         &bundle->plane_infos[planes_count];
6934
6935                 /*
6936                  * Only allow immediate flips for fast updates that don't
6937                  * change FB pitch, DCC state, rotation or mirroing.
6938                  */
6939                 bundle->flip_addrs[planes_count].flip_immediate =
6940                         crtc->state->async_flip &&
6941                         acrtc_state->update_type == UPDATE_TYPE_FAST;
6942
6943                 timestamp_ns = ktime_get_ns();
6944                 bundle->flip_addrs[planes_count].flip_timestamp_in_us = div_u64(timestamp_ns, 1000);
6945                 bundle->surface_updates[planes_count].flip_addr = &bundle->flip_addrs[planes_count];
6946                 bundle->surface_updates[planes_count].surface = dc_plane;
6947
6948                 if (!bundle->surface_updates[planes_count].surface) {
6949                         DRM_ERROR("No surface for CRTC: id=%d\n",
6950                                         acrtc_attach->crtc_id);
6951                         continue;
6952                 }
6953
6954                 if (plane == pcrtc->primary)
6955                         update_freesync_state_on_stream(
6956                                 dm,
6957                                 acrtc_state,
6958                                 acrtc_state->stream,
6959                                 dc_plane,
6960                                 bundle->flip_addrs[planes_count].flip_timestamp_in_us);
6961
6962                 DRM_DEBUG_DRIVER("%s Flipping to hi: 0x%x, low: 0x%x\n",
6963                                  __func__,
6964                                  bundle->flip_addrs[planes_count].address.grph.addr.high_part,
6965                                  bundle->flip_addrs[planes_count].address.grph.addr.low_part);
6966
6967                 planes_count += 1;
6968
6969         }
6970
6971         if (pflip_present) {
6972                 if (!vrr_active) {
6973                         /* Use old throttling in non-vrr fixed refresh rate mode
6974                          * to keep flip scheduling based on target vblank counts
6975                          * working in a backwards compatible way, e.g., for
6976                          * clients using the GLX_OML_sync_control extension or
6977                          * DRI3/Present extension with defined target_msc.
6978                          */
6979                         last_flip_vblank = amdgpu_get_vblank_counter_kms(pcrtc);
6980                 }
6981                 else {
6982                         /* For variable refresh rate mode only:
6983                          * Get vblank of last completed flip to avoid > 1 vrr
6984                          * flips per video frame by use of throttling, but allow
6985                          * flip programming anywhere in the possibly large
6986                          * variable vrr vblank interval for fine-grained flip
6987                          * timing control and more opportunity to avoid stutter
6988                          * on late submission of flips.
6989                          */
6990                         spin_lock_irqsave(&pcrtc->dev->event_lock, flags);
6991                         last_flip_vblank = acrtc_attach->last_flip_vblank;
6992                         spin_unlock_irqrestore(&pcrtc->dev->event_lock, flags);
6993                 }
6994
6995                 target_vblank = last_flip_vblank + wait_for_vblank;
6996
6997                 /*
6998                  * Wait until we're out of the vertical blank period before the one
6999                  * targeted by the flip
7000                  */
7001                 while ((acrtc_attach->enabled &&
7002                         (amdgpu_display_get_crtc_scanoutpos(dm->ddev, acrtc_attach->crtc_id,
7003                                                             0, &vpos, &hpos, NULL,
7004                                                             NULL, &pcrtc->hwmode)
7005                          & (DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_IN_VBLANK)) ==
7006                         (DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_IN_VBLANK) &&
7007                         (int)(target_vblank -
7008                           amdgpu_get_vblank_counter_kms(pcrtc)) > 0)) {
7009                         usleep_range(1000, 1100);
7010                 }
7011
7012                 if (acrtc_attach->base.state->event) {
7013                         drm_crtc_vblank_get(pcrtc);
7014
7015                         spin_lock_irqsave(&pcrtc->dev->event_lock, flags);
7016
7017                         WARN_ON(acrtc_attach->pflip_status != AMDGPU_FLIP_NONE);
7018                         prepare_flip_isr(acrtc_attach);
7019
7020                         spin_unlock_irqrestore(&pcrtc->dev->event_lock, flags);
7021                 }
7022
7023                 if (acrtc_state->stream) {
7024                         if (acrtc_state->freesync_vrr_info_changed)
7025                                 bundle->stream_update.vrr_infopacket =
7026                                         &acrtc_state->stream->vrr_infopacket;
7027                 }
7028         }
7029
7030         /* Update the planes if changed or disable if we don't have any. */
7031         if ((planes_count || acrtc_state->active_planes == 0) &&
7032                 acrtc_state->stream) {
7033                 bundle->stream_update.stream = acrtc_state->stream;
7034                 if (new_pcrtc_state->mode_changed) {
7035                         bundle->stream_update.src = acrtc_state->stream->src;
7036                         bundle->stream_update.dst = acrtc_state->stream->dst;
7037                 }
7038
7039                 if (new_pcrtc_state->color_mgmt_changed) {
7040                         /*
7041                          * TODO: This isn't fully correct since we've actually
7042                          * already modified the stream in place.
7043                          */
7044                         bundle->stream_update.gamut_remap =
7045                                 &acrtc_state->stream->gamut_remap_matrix;
7046                         bundle->stream_update.output_csc_transform =
7047                                 &acrtc_state->stream->csc_color_matrix;
7048                         bundle->stream_update.out_transfer_func =
7049                                 acrtc_state->stream->out_transfer_func;
7050                 }
7051
7052                 acrtc_state->stream->abm_level = acrtc_state->abm_level;
7053                 if (acrtc_state->abm_level != dm_old_crtc_state->abm_level)
7054                         bundle->stream_update.abm_level = &acrtc_state->abm_level;
7055
7056                 /*
7057                  * If FreeSync state on the stream has changed then we need to
7058                  * re-adjust the min/max bounds now that DC doesn't handle this
7059                  * as part of commit.
7060                  */
7061                 if (amdgpu_dm_vrr_active(dm_old_crtc_state) !=
7062                     amdgpu_dm_vrr_active(acrtc_state)) {
7063                         spin_lock_irqsave(&pcrtc->dev->event_lock, flags);
7064                         dc_stream_adjust_vmin_vmax(
7065                                 dm->dc, acrtc_state->stream,
7066                                 &acrtc_state->vrr_params.adjust);
7067                         spin_unlock_irqrestore(&pcrtc->dev->event_lock, flags);
7068                 }
7069                 mutex_lock(&dm->dc_lock);
7070                 if ((acrtc_state->update_type > UPDATE_TYPE_FAST) &&
7071                                 acrtc_state->stream->link->psr_settings.psr_allow_active)
7072                         amdgpu_dm_psr_disable(acrtc_state->stream);
7073
7074                 dc_commit_updates_for_stream(dm->dc,
7075                                                      bundle->surface_updates,
7076                                                      planes_count,
7077                                                      acrtc_state->stream,
7078                                                      &bundle->stream_update,
7079                                                      dc_state);
7080
7081                 if ((acrtc_state->update_type > UPDATE_TYPE_FAST) &&
7082                                 acrtc_state->stream->link->psr_settings.psr_version != DC_PSR_VERSION_UNSUPPORTED &&
7083                                 !acrtc_state->stream->link->psr_settings.psr_feature_enabled)
7084                         amdgpu_dm_link_setup_psr(acrtc_state->stream);
7085                 else if ((acrtc_state->update_type == UPDATE_TYPE_FAST) &&
7086                                 acrtc_state->stream->link->psr_settings.psr_feature_enabled &&
7087                                 !acrtc_state->stream->link->psr_settings.psr_allow_active) {
7088                         amdgpu_dm_psr_enable(acrtc_state->stream);
7089                 }
7090
7091                 mutex_unlock(&dm->dc_lock);
7092         }
7093
7094         /*
7095          * Update cursor state *after* programming all the planes.
7096          * This avoids redundant programming in the case where we're going
7097          * to be disabling a single plane - those pipes are being disabled.
7098          */
7099         if (acrtc_state->active_planes)
7100                 amdgpu_dm_commit_cursors(state);
7101
7102 cleanup:
7103         kfree(bundle);
7104 }
7105
7106 static void amdgpu_dm_commit_audio(struct drm_device *dev,
7107                                    struct drm_atomic_state *state)
7108 {
7109         struct amdgpu_device *adev = dev->dev_private;
7110         struct amdgpu_dm_connector *aconnector;
7111         struct drm_connector *connector;
7112         struct drm_connector_state *old_con_state, *new_con_state;
7113         struct drm_crtc_state *new_crtc_state;
7114         struct dm_crtc_state *new_dm_crtc_state;
7115         const struct dc_stream_status *status;
7116         int i, inst;
7117
7118         /* Notify device removals. */
7119         for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
7120                 if (old_con_state->crtc != new_con_state->crtc) {
7121                         /* CRTC changes require notification. */
7122                         goto notify;
7123                 }
7124
7125                 if (!new_con_state->crtc)
7126                         continue;
7127
7128                 new_crtc_state = drm_atomic_get_new_crtc_state(
7129                         state, new_con_state->crtc);
7130
7131                 if (!new_crtc_state)
7132                         continue;
7133
7134                 if (!drm_atomic_crtc_needs_modeset(new_crtc_state))
7135                         continue;
7136
7137         notify:
7138                 aconnector = to_amdgpu_dm_connector(connector);
7139
7140                 mutex_lock(&adev->dm.audio_lock);
7141                 inst = aconnector->audio_inst;
7142                 aconnector->audio_inst = -1;
7143                 mutex_unlock(&adev->dm.audio_lock);
7144
7145                 amdgpu_dm_audio_eld_notify(adev, inst);
7146         }
7147
7148         /* Notify audio device additions. */
7149         for_each_new_connector_in_state(state, connector, new_con_state, i) {
7150                 if (!new_con_state->crtc)
7151                         continue;
7152
7153                 new_crtc_state = drm_atomic_get_new_crtc_state(
7154                         state, new_con_state->crtc);
7155
7156                 if (!new_crtc_state)
7157                         continue;
7158
7159                 if (!drm_atomic_crtc_needs_modeset(new_crtc_state))
7160                         continue;
7161
7162                 new_dm_crtc_state = to_dm_crtc_state(new_crtc_state);
7163                 if (!new_dm_crtc_state->stream)
7164                         continue;
7165
7166                 status = dc_stream_get_status(new_dm_crtc_state->stream);
7167                 if (!status)
7168                         continue;
7169
7170                 aconnector = to_amdgpu_dm_connector(connector);
7171
7172                 mutex_lock(&adev->dm.audio_lock);
7173                 inst = status->audio_inst;
7174                 aconnector->audio_inst = inst;
7175                 mutex_unlock(&adev->dm.audio_lock);
7176
7177                 amdgpu_dm_audio_eld_notify(adev, inst);
7178         }
7179 }
7180
7181 /*
7182  * Enable interrupts on CRTCs that are newly active, undergone
7183  * a modeset, or have active planes again.
7184  *
7185  * Done in two passes, based on the for_modeset flag:
7186  * Pass 1: For CRTCs going through modeset
7187  * Pass 2: For CRTCs going from 0 to n active planes
7188  *
7189  * Interrupts can only be enabled after the planes are programmed,
7190  * so this requires a two-pass approach since we don't want to
7191  * just defer the interrupts until after commit planes every time.
7192  */
7193 static void amdgpu_dm_enable_crtc_interrupts(struct drm_device *dev,
7194                                              struct drm_atomic_state *state,
7195                                              bool for_modeset)
7196 {
7197         struct amdgpu_device *adev = dev->dev_private;
7198         struct drm_crtc *crtc;
7199         struct drm_crtc_state *old_crtc_state, *new_crtc_state;
7200         int i;
7201 #ifdef CONFIG_DEBUG_FS
7202         enum amdgpu_dm_pipe_crc_source source;
7203 #endif
7204
7205         for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state,
7206                                       new_crtc_state, i) {
7207                 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
7208                 struct dm_crtc_state *dm_new_crtc_state =
7209                         to_dm_crtc_state(new_crtc_state);
7210                 struct dm_crtc_state *dm_old_crtc_state =
7211                         to_dm_crtc_state(old_crtc_state);
7212                 bool modeset = drm_atomic_crtc_needs_modeset(new_crtc_state);
7213                 bool run_pass;
7214
7215                 run_pass = (for_modeset && modeset) ||
7216                            (!for_modeset && !modeset &&
7217                             !dm_old_crtc_state->interrupts_enabled);
7218
7219                 if (!run_pass)
7220                         continue;
7221
7222                 if (!dm_new_crtc_state->interrupts_enabled)
7223                         continue;
7224
7225                 manage_dm_interrupts(adev, acrtc, true);
7226
7227 #ifdef CONFIG_DEBUG_FS
7228                 /* The stream has changed so CRC capture needs to re-enabled. */
7229                 source = dm_new_crtc_state->crc_src;
7230                 if (amdgpu_dm_is_valid_crc_source(source)) {
7231                         amdgpu_dm_crtc_configure_crc_source(
7232                                 crtc, dm_new_crtc_state,
7233                                 dm_new_crtc_state->crc_src);
7234                 }
7235 #endif
7236         }
7237 }
7238
7239 /*
7240  * amdgpu_dm_crtc_copy_transient_flags - copy mirrored flags from DRM to DC
7241  * @crtc_state: the DRM CRTC state
7242  * @stream_state: the DC stream state.
7243  *
7244  * Copy the mirrored transient state flags from DRM, to DC. It is used to bring
7245  * a dc_stream_state's flags in sync with a drm_crtc_state's flags.
7246  */
7247 static void amdgpu_dm_crtc_copy_transient_flags(struct drm_crtc_state *crtc_state,
7248                                                 struct dc_stream_state *stream_state)
7249 {
7250         stream_state->mode_changed = drm_atomic_crtc_needs_modeset(crtc_state);
7251 }
7252
7253 static int amdgpu_dm_atomic_commit(struct drm_device *dev,
7254                                    struct drm_atomic_state *state,
7255                                    bool nonblock)
7256 {
7257         struct drm_crtc *crtc;
7258         struct drm_crtc_state *old_crtc_state, *new_crtc_state;
7259         struct amdgpu_device *adev = dev->dev_private;
7260         int i;
7261
7262         /*
7263          * We evade vblank and pflip interrupts on CRTCs that are undergoing
7264          * a modeset, being disabled, or have no active planes.
7265          *
7266          * It's done in atomic commit rather than commit tail for now since
7267          * some of these interrupt handlers access the current CRTC state and
7268          * potentially the stream pointer itself.
7269          *
7270          * Since the atomic state is swapped within atomic commit and not within
7271          * commit tail this would leave to new state (that hasn't been committed yet)
7272          * being accesssed from within the handlers.
7273          *
7274          * TODO: Fix this so we can do this in commit tail and not have to block
7275          * in atomic check.
7276          */
7277         for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
7278                 struct dm_crtc_state *dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
7279                 struct dm_crtc_state *dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
7280                 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
7281
7282                 if (dm_old_crtc_state->interrupts_enabled &&
7283                     (!dm_new_crtc_state->interrupts_enabled ||
7284                      drm_atomic_crtc_needs_modeset(new_crtc_state)))
7285                         manage_dm_interrupts(adev, acrtc, false);
7286         }
7287         /*
7288          * Add check here for SoC's that support hardware cursor plane, to
7289          * unset legacy_cursor_update
7290          */
7291
7292         return drm_atomic_helper_commit(dev, state, nonblock);
7293
7294         /*TODO Handle EINTR, reenable IRQ*/
7295 }
7296
7297 /**
7298  * amdgpu_dm_atomic_commit_tail() - AMDgpu DM's commit tail implementation.
7299  * @state: The atomic state to commit
7300  *
7301  * This will tell DC to commit the constructed DC state from atomic_check,
7302  * programming the hardware. Any failures here implies a hardware failure, since
7303  * atomic check should have filtered anything non-kosher.
7304  */
7305 static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state)
7306 {
7307         struct drm_device *dev = state->dev;
7308         struct amdgpu_device *adev = dev->dev_private;
7309         struct amdgpu_display_manager *dm = &adev->dm;
7310         struct dm_atomic_state *dm_state;
7311         struct dc_state *dc_state = NULL, *dc_state_temp = NULL;
7312         uint32_t i, j;
7313         struct drm_crtc *crtc;
7314         struct drm_crtc_state *old_crtc_state, *new_crtc_state;
7315         unsigned long flags;
7316         bool wait_for_vblank = true;
7317         struct drm_connector *connector;
7318         struct drm_connector_state *old_con_state, *new_con_state;
7319         struct dm_crtc_state *dm_old_crtc_state, *dm_new_crtc_state;
7320         int crtc_disable_count = 0;
7321
7322         drm_atomic_helper_update_legacy_modeset_state(dev, state);
7323
7324         dm_state = dm_atomic_get_new_state(state);
7325         if (dm_state && dm_state->context) {
7326                 dc_state = dm_state->context;
7327         } else {
7328                 /* No state changes, retain current state. */
7329                 dc_state_temp = dc_create_state(dm->dc);
7330                 ASSERT(dc_state_temp);
7331                 dc_state = dc_state_temp;
7332                 dc_resource_state_copy_construct_current(dm->dc, dc_state);
7333         }
7334
7335         /* update changed items */
7336         for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
7337                 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
7338
7339                 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
7340                 dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
7341
7342                 DRM_DEBUG_DRIVER(
7343                         "amdgpu_crtc id:%d crtc_state_flags: enable:%d, active:%d, "
7344                         "planes_changed:%d, mode_changed:%d,active_changed:%d,"
7345                         "connectors_changed:%d\n",
7346                         acrtc->crtc_id,
7347                         new_crtc_state->enable,
7348                         new_crtc_state->active,
7349                         new_crtc_state->planes_changed,
7350                         new_crtc_state->mode_changed,
7351                         new_crtc_state->active_changed,
7352                         new_crtc_state->connectors_changed);
7353
7354                 /* Copy all transient state flags into dc state */
7355                 if (dm_new_crtc_state->stream) {
7356                         amdgpu_dm_crtc_copy_transient_flags(&dm_new_crtc_state->base,
7357                                                             dm_new_crtc_state->stream);
7358                 }
7359
7360                 /* handles headless hotplug case, updating new_state and
7361                  * aconnector as needed
7362                  */
7363
7364                 if (modeset_required(new_crtc_state, dm_new_crtc_state->stream, dm_old_crtc_state->stream)) {
7365
7366                         DRM_DEBUG_DRIVER("Atomic commit: SET crtc id %d: [%p]\n", acrtc->crtc_id, acrtc);
7367
7368                         if (!dm_new_crtc_state->stream) {
7369                                 /*
7370                                  * this could happen because of issues with
7371                                  * userspace notifications delivery.
7372                                  * In this case userspace tries to set mode on
7373                                  * display which is disconnected in fact.
7374                                  * dc_sink is NULL in this case on aconnector.
7375                                  * We expect reset mode will come soon.
7376                                  *
7377                                  * This can also happen when unplug is done
7378                                  * during resume sequence ended
7379                                  *
7380                                  * In this case, we want to pretend we still
7381                                  * have a sink to keep the pipe running so that
7382                                  * hw state is consistent with the sw state
7383                                  */
7384                                 DRM_DEBUG_DRIVER("%s: Failed to create new stream for crtc %d\n",
7385                                                 __func__, acrtc->base.base.id);
7386                                 continue;
7387                         }
7388
7389                         if (dm_old_crtc_state->stream)
7390                                 remove_stream(adev, acrtc, dm_old_crtc_state->stream);
7391
7392                         pm_runtime_get_noresume(dev->dev);
7393
7394                         acrtc->enabled = true;
7395                         acrtc->hw_mode = new_crtc_state->mode;
7396                         crtc->hwmode = new_crtc_state->mode;
7397                 } else if (modereset_required(new_crtc_state)) {
7398                         DRM_DEBUG_DRIVER("Atomic commit: RESET. crtc id %d:[%p]\n", acrtc->crtc_id, acrtc);
7399                         /* i.e. reset mode */
7400                         if (dm_old_crtc_state->stream) {
7401                                 if (dm_old_crtc_state->stream->link->psr_settings.psr_allow_active)
7402                                         amdgpu_dm_psr_disable(dm_old_crtc_state->stream);
7403
7404                                 remove_stream(adev, acrtc, dm_old_crtc_state->stream);
7405                         }
7406                 }
7407         } /* for_each_crtc_in_state() */
7408
7409         if (dc_state) {
7410                 dm_enable_per_frame_crtc_master_sync(dc_state);
7411                 mutex_lock(&dm->dc_lock);
7412                 WARN_ON(!dc_commit_state(dm->dc, dc_state));
7413                 mutex_unlock(&dm->dc_lock);
7414         }
7415
7416         for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) {
7417                 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
7418
7419                 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
7420
7421                 if (dm_new_crtc_state->stream != NULL) {
7422                         const struct dc_stream_status *status =
7423                                         dc_stream_get_status(dm_new_crtc_state->stream);
7424
7425                         if (!status)
7426                                 status = dc_stream_get_status_from_state(dc_state,
7427                                                                          dm_new_crtc_state->stream);
7428
7429                         if (!status)
7430                                 DC_ERR("got no status for stream %p on acrtc%p\n", dm_new_crtc_state->stream, acrtc);
7431                         else
7432                                 acrtc->otg_inst = status->primary_otg_inst;
7433                 }
7434         }
7435 #ifdef CONFIG_DRM_AMD_DC_HDCP
7436         for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
7437                 struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state);
7438                 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(dm_new_con_state->base.crtc);
7439                 struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
7440
7441                 new_crtc_state = NULL;
7442
7443                 if (acrtc)
7444                         new_crtc_state = drm_atomic_get_new_crtc_state(state, &acrtc->base);
7445
7446                 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
7447
7448                 if (dm_new_crtc_state && dm_new_crtc_state->stream == NULL &&
7449                     connector->state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED) {
7450                         hdcp_reset_display(adev->dm.hdcp_workqueue, aconnector->dc_link->link_index);
7451                         new_con_state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED;
7452                         continue;
7453                 }
7454
7455                 if (is_content_protection_different(new_con_state, old_con_state, connector, adev->dm.hdcp_workqueue))
7456                         hdcp_update_display(
7457                                 adev->dm.hdcp_workqueue, aconnector->dc_link->link_index, aconnector,
7458                                 new_con_state->hdcp_content_type,
7459                                 new_con_state->content_protection == DRM_MODE_CONTENT_PROTECTION_DESIRED ? true
7460                                                                                                          : false);
7461         }
7462 #endif
7463
7464         /* Handle connector state changes */
7465         for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
7466                 struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state);
7467                 struct dm_connector_state *dm_old_con_state = to_dm_connector_state(old_con_state);
7468                 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(dm_new_con_state->base.crtc);
7469                 struct dc_surface_update dummy_updates[MAX_SURFACES];
7470                 struct dc_stream_update stream_update;
7471                 struct dc_info_packet hdr_packet;
7472                 struct dc_stream_status *status = NULL;
7473                 bool abm_changed, hdr_changed, scaling_changed;
7474
7475                 memset(&dummy_updates, 0, sizeof(dummy_updates));
7476                 memset(&stream_update, 0, sizeof(stream_update));
7477
7478                 if (acrtc) {
7479                         new_crtc_state = drm_atomic_get_new_crtc_state(state, &acrtc->base);
7480                         old_crtc_state = drm_atomic_get_old_crtc_state(state, &acrtc->base);
7481                 }
7482
7483                 /* Skip any modesets/resets */
7484                 if (!acrtc || drm_atomic_crtc_needs_modeset(new_crtc_state))
7485                         continue;
7486
7487                 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
7488                 dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
7489
7490                 scaling_changed = is_scaling_state_different(dm_new_con_state,
7491                                                              dm_old_con_state);
7492
7493                 abm_changed = dm_new_crtc_state->abm_level !=
7494                               dm_old_crtc_state->abm_level;
7495
7496                 hdr_changed =
7497                         is_hdr_metadata_different(old_con_state, new_con_state);
7498
7499                 if (!scaling_changed && !abm_changed && !hdr_changed)
7500                         continue;
7501
7502                 stream_update.stream = dm_new_crtc_state->stream;
7503                 if (scaling_changed) {
7504                         update_stream_scaling_settings(&dm_new_con_state->base.crtc->mode,
7505                                         dm_new_con_state, dm_new_crtc_state->stream);
7506
7507                         stream_update.src = dm_new_crtc_state->stream->src;
7508                         stream_update.dst = dm_new_crtc_state->stream->dst;
7509                 }
7510
7511                 if (abm_changed) {
7512                         dm_new_crtc_state->stream->abm_level = dm_new_crtc_state->abm_level;
7513
7514                         stream_update.abm_level = &dm_new_crtc_state->abm_level;
7515                 }
7516
7517                 if (hdr_changed) {
7518                         fill_hdr_info_packet(new_con_state, &hdr_packet);
7519                         stream_update.hdr_static_metadata = &hdr_packet;
7520                 }
7521
7522                 status = dc_stream_get_status(dm_new_crtc_state->stream);
7523                 WARN_ON(!status);
7524                 WARN_ON(!status->plane_count);
7525
7526                 /*
7527                  * TODO: DC refuses to perform stream updates without a dc_surface_update.
7528                  * Here we create an empty update on each plane.
7529                  * To fix this, DC should permit updating only stream properties.
7530                  */
7531                 for (j = 0; j < status->plane_count; j++)
7532                         dummy_updates[j].surface = status->plane_states[0];
7533
7534
7535                 mutex_lock(&dm->dc_lock);
7536                 dc_commit_updates_for_stream(dm->dc,
7537                                                      dummy_updates,
7538                                                      status->plane_count,
7539                                                      dm_new_crtc_state->stream,
7540                                                      &stream_update,
7541                                                      dc_state);
7542                 mutex_unlock(&dm->dc_lock);
7543         }
7544
7545         /* Count number of newly disabled CRTCs for dropping PM refs later. */
7546         for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state,
7547                                       new_crtc_state, i) {
7548                 if (old_crtc_state->active && !new_crtc_state->active)
7549                         crtc_disable_count++;
7550
7551                 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
7552                 dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
7553
7554                 /* Update freesync active state. */
7555                 pre_update_freesync_state_on_stream(dm, dm_new_crtc_state);
7556
7557                 /* Handle vrr on->off / off->on transitions */
7558                 amdgpu_dm_handle_vrr_transition(dm_old_crtc_state,
7559                                                 dm_new_crtc_state);
7560         }
7561
7562         /* Enable interrupts for CRTCs going through a modeset. */
7563         amdgpu_dm_enable_crtc_interrupts(dev, state, true);
7564
7565         for_each_new_crtc_in_state(state, crtc, new_crtc_state, j)
7566                 if (new_crtc_state->async_flip)
7567                         wait_for_vblank = false;
7568
7569         /* update planes when needed per crtc*/
7570         for_each_new_crtc_in_state(state, crtc, new_crtc_state, j) {
7571                 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
7572
7573                 if (dm_new_crtc_state->stream)
7574                         amdgpu_dm_commit_planes(state, dc_state, dev,
7575                                                 dm, crtc, wait_for_vblank);
7576         }
7577
7578         /* Enable interrupts for CRTCs going from 0 to n active planes. */
7579         amdgpu_dm_enable_crtc_interrupts(dev, state, false);
7580
7581         /* Update audio instances for each connector. */
7582         amdgpu_dm_commit_audio(dev, state);
7583
7584         /*
7585          * send vblank event on all events not handled in flip and
7586          * mark consumed event for drm_atomic_helper_commit_hw_done
7587          */
7588         spin_lock_irqsave(&adev->ddev->event_lock, flags);
7589         for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) {
7590
7591                 if (new_crtc_state->event)
7592                         drm_send_event_locked(dev, &new_crtc_state->event->base);
7593
7594                 new_crtc_state->event = NULL;
7595         }
7596         spin_unlock_irqrestore(&adev->ddev->event_lock, flags);
7597
7598         /* Signal HW programming completion */
7599         drm_atomic_helper_commit_hw_done(state);
7600
7601         if (wait_for_vblank)
7602                 drm_atomic_helper_wait_for_flip_done(dev, state);
7603
7604         drm_atomic_helper_cleanup_planes(dev, state);
7605
7606         /*
7607          * Finally, drop a runtime PM reference for each newly disabled CRTC,
7608          * so we can put the GPU into runtime suspend if we're not driving any
7609          * displays anymore
7610          */
7611         for (i = 0; i < crtc_disable_count; i++)
7612                 pm_runtime_put_autosuspend(dev->dev);
7613         pm_runtime_mark_last_busy(dev->dev);
7614
7615         if (dc_state_temp)
7616                 dc_release_state(dc_state_temp);
7617 }
7618
7619
7620 static int dm_force_atomic_commit(struct drm_connector *connector)
7621 {
7622         int ret = 0;
7623         struct drm_device *ddev = connector->dev;
7624         struct drm_atomic_state *state = drm_atomic_state_alloc(ddev);
7625         struct amdgpu_crtc *disconnected_acrtc = to_amdgpu_crtc(connector->encoder->crtc);
7626         struct drm_plane *plane = disconnected_acrtc->base.primary;
7627         struct drm_connector_state *conn_state;
7628         struct drm_crtc_state *crtc_state;
7629         struct drm_plane_state *plane_state;
7630
7631         if (!state)
7632                 return -ENOMEM;
7633
7634         state->acquire_ctx = ddev->mode_config.acquire_ctx;
7635
7636         /* Construct an atomic state to restore previous display setting */
7637
7638         /*
7639          * Attach connectors to drm_atomic_state
7640          */
7641         conn_state = drm_atomic_get_connector_state(state, connector);
7642
7643         ret = PTR_ERR_OR_ZERO(conn_state);
7644         if (ret)
7645                 goto err;
7646
7647         /* Attach crtc to drm_atomic_state*/
7648         crtc_state = drm_atomic_get_crtc_state(state, &disconnected_acrtc->base);
7649
7650         ret = PTR_ERR_OR_ZERO(crtc_state);
7651         if (ret)
7652                 goto err;
7653
7654         /* force a restore */
7655         crtc_state->mode_changed = true;
7656
7657         /* Attach plane to drm_atomic_state */
7658         plane_state = drm_atomic_get_plane_state(state, plane);
7659
7660         ret = PTR_ERR_OR_ZERO(plane_state);
7661         if (ret)
7662                 goto err;
7663
7664
7665         /* Call commit internally with the state we just constructed */
7666         ret = drm_atomic_commit(state);
7667         if (!ret)
7668                 return 0;
7669
7670 err:
7671         DRM_ERROR("Restoring old state failed with %i\n", ret);
7672         drm_atomic_state_put(state);
7673
7674         return ret;
7675 }
7676
7677 /*
7678  * This function handles all cases when set mode does not come upon hotplug.
7679  * This includes when a display is unplugged then plugged back into the
7680  * same port and when running without usermode desktop manager supprot
7681  */
7682 void dm_restore_drm_connector_state(struct drm_device *dev,
7683                                     struct drm_connector *connector)
7684 {
7685         struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
7686         struct amdgpu_crtc *disconnected_acrtc;
7687         struct dm_crtc_state *acrtc_state;
7688
7689         if (!aconnector->dc_sink || !connector->state || !connector->encoder)
7690                 return;
7691
7692         disconnected_acrtc = to_amdgpu_crtc(connector->encoder->crtc);
7693         if (!disconnected_acrtc)
7694                 return;
7695
7696         acrtc_state = to_dm_crtc_state(disconnected_acrtc->base.state);
7697         if (!acrtc_state->stream)
7698                 return;
7699
7700         /*
7701          * If the previous sink is not released and different from the current,
7702          * we deduce we are in a state where we can not rely on usermode call
7703          * to turn on the display, so we do it here
7704          */
7705         if (acrtc_state->stream->sink != aconnector->dc_sink)
7706                 dm_force_atomic_commit(&aconnector->base);
7707 }
7708
7709 /*
7710  * Grabs all modesetting locks to serialize against any blocking commits,
7711  * Waits for completion of all non blocking commits.
7712  */
7713 static int do_aquire_global_lock(struct drm_device *dev,
7714                                  struct drm_atomic_state *state)
7715 {
7716         struct drm_crtc *crtc;
7717         struct drm_crtc_commit *commit;
7718         long ret;
7719
7720         /*
7721          * Adding all modeset locks to aquire_ctx will
7722          * ensure that when the framework release it the
7723          * extra locks we are locking here will get released to
7724          */
7725         ret = drm_modeset_lock_all_ctx(dev, state->acquire_ctx);
7726         if (ret)
7727                 return ret;
7728
7729         list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
7730                 spin_lock(&crtc->commit_lock);
7731                 commit = list_first_entry_or_null(&crtc->commit_list,
7732                                 struct drm_crtc_commit, commit_entry);
7733                 if (commit)
7734                         drm_crtc_commit_get(commit);
7735                 spin_unlock(&crtc->commit_lock);
7736
7737                 if (!commit)
7738                         continue;
7739
7740                 /*
7741                  * Make sure all pending HW programming completed and
7742                  * page flips done
7743                  */
7744                 ret = wait_for_completion_interruptible_timeout(&commit->hw_done, 10*HZ);
7745
7746                 if (ret > 0)
7747                         ret = wait_for_completion_interruptible_timeout(
7748                                         &commit->flip_done, 10*HZ);
7749
7750                 if (ret == 0)
7751                         DRM_ERROR("[CRTC:%d:%s] hw_done or flip_done "
7752                                   "timed out\n", crtc->base.id, crtc->name);
7753
7754                 drm_crtc_commit_put(commit);
7755         }
7756
7757         return ret < 0 ? ret : 0;
7758 }
7759
7760 static void get_freesync_config_for_crtc(
7761         struct dm_crtc_state *new_crtc_state,
7762         struct dm_connector_state *new_con_state)
7763 {
7764         struct mod_freesync_config config = {0};
7765         struct amdgpu_dm_connector *aconnector =
7766                         to_amdgpu_dm_connector(new_con_state->base.connector);
7767         struct drm_display_mode *mode = &new_crtc_state->base.mode;
7768         int vrefresh = drm_mode_vrefresh(mode);
7769
7770         new_crtc_state->vrr_supported = new_con_state->freesync_capable &&
7771                                         vrefresh >= aconnector->min_vfreq &&
7772                                         vrefresh <= aconnector->max_vfreq;
7773
7774         if (new_crtc_state->vrr_supported) {
7775                 new_crtc_state->stream->ignore_msa_timing_param = true;
7776                 config.state = new_crtc_state->base.vrr_enabled ?
7777                                 VRR_STATE_ACTIVE_VARIABLE :
7778                                 VRR_STATE_INACTIVE;
7779                 config.min_refresh_in_uhz =
7780                                 aconnector->min_vfreq * 1000000;
7781                 config.max_refresh_in_uhz =
7782                                 aconnector->max_vfreq * 1000000;
7783                 config.vsif_supported = true;
7784                 config.btr = true;
7785         }
7786
7787         new_crtc_state->freesync_config = config;
7788 }
7789
7790 static void reset_freesync_config_for_crtc(
7791         struct dm_crtc_state *new_crtc_state)
7792 {
7793         new_crtc_state->vrr_supported = false;
7794
7795         memset(&new_crtc_state->vrr_params, 0,
7796                sizeof(new_crtc_state->vrr_params));
7797         memset(&new_crtc_state->vrr_infopacket, 0,
7798                sizeof(new_crtc_state->vrr_infopacket));
7799 }
7800
7801 static int dm_update_crtc_state(struct amdgpu_display_manager *dm,
7802                                 struct drm_atomic_state *state,
7803                                 struct drm_crtc *crtc,
7804                                 struct drm_crtc_state *old_crtc_state,
7805                                 struct drm_crtc_state *new_crtc_state,
7806                                 bool enable,
7807                                 bool *lock_and_validation_needed)
7808 {
7809         struct dm_atomic_state *dm_state = NULL;
7810         struct dm_crtc_state *dm_old_crtc_state, *dm_new_crtc_state;
7811         struct dc_stream_state *new_stream;
7812         int ret = 0;
7813
7814         /*
7815          * TODO Move this code into dm_crtc_atomic_check once we get rid of dc_validation_set
7816          * update changed items
7817          */
7818         struct amdgpu_crtc *acrtc = NULL;
7819         struct amdgpu_dm_connector *aconnector = NULL;
7820         struct drm_connector_state *drm_new_conn_state = NULL, *drm_old_conn_state = NULL;
7821         struct dm_connector_state *dm_new_conn_state = NULL, *dm_old_conn_state = NULL;
7822
7823         new_stream = NULL;
7824
7825         dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
7826         dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
7827         acrtc = to_amdgpu_crtc(crtc);
7828         aconnector = amdgpu_dm_find_first_crtc_matching_connector(state, crtc);
7829
7830         /* TODO This hack should go away */
7831         if (aconnector && enable) {
7832                 /* Make sure fake sink is created in plug-in scenario */
7833                 drm_new_conn_state = drm_atomic_get_new_connector_state(state,
7834                                                             &aconnector->base);
7835                 drm_old_conn_state = drm_atomic_get_old_connector_state(state,
7836                                                             &aconnector->base);
7837
7838                 if (IS_ERR(drm_new_conn_state)) {
7839                         ret = PTR_ERR_OR_ZERO(drm_new_conn_state);
7840                         goto fail;
7841                 }
7842
7843                 dm_new_conn_state = to_dm_connector_state(drm_new_conn_state);
7844                 dm_old_conn_state = to_dm_connector_state(drm_old_conn_state);
7845
7846                 if (!drm_atomic_crtc_needs_modeset(new_crtc_state))
7847                         goto skip_modeset;
7848
7849                 new_stream = create_validate_stream_for_sink(aconnector,
7850                                                              &new_crtc_state->mode,
7851                                                              dm_new_conn_state,
7852                                                              dm_old_crtc_state->stream);
7853
7854                 /*
7855                  * we can have no stream on ACTION_SET if a display
7856                  * was disconnected during S3, in this case it is not an
7857                  * error, the OS will be updated after detection, and
7858                  * will do the right thing on next atomic commit
7859                  */
7860
7861                 if (!new_stream) {
7862                         DRM_DEBUG_DRIVER("%s: Failed to create new stream for crtc %d\n",
7863                                         __func__, acrtc->base.base.id);
7864                         ret = -ENOMEM;
7865                         goto fail;
7866                 }
7867
7868                 dm_new_crtc_state->abm_level = dm_new_conn_state->abm_level;
7869
7870                 ret = fill_hdr_info_packet(drm_new_conn_state,
7871                                            &new_stream->hdr_static_metadata);
7872                 if (ret)
7873                         goto fail;
7874
7875                 /*
7876                  * If we already removed the old stream from the context
7877                  * (and set the new stream to NULL) then we can't reuse
7878                  * the old stream even if the stream and scaling are unchanged.
7879                  * We'll hit the BUG_ON and black screen.
7880                  *
7881                  * TODO: Refactor this function to allow this check to work
7882                  * in all conditions.
7883                  */
7884                 if (dm_new_crtc_state->stream &&
7885                     dc_is_stream_unchanged(new_stream, dm_old_crtc_state->stream) &&
7886                     dc_is_stream_scaling_unchanged(new_stream, dm_old_crtc_state->stream)) {
7887                         new_crtc_state->mode_changed = false;
7888                         DRM_DEBUG_DRIVER("Mode change not required, setting mode_changed to %d",
7889                                          new_crtc_state->mode_changed);
7890                 }
7891         }
7892
7893         /* mode_changed flag may get updated above, need to check again */
7894         if (!drm_atomic_crtc_needs_modeset(new_crtc_state))
7895                 goto skip_modeset;
7896
7897         DRM_DEBUG_DRIVER(
7898                 "amdgpu_crtc id:%d crtc_state_flags: enable:%d, active:%d, "
7899                 "planes_changed:%d, mode_changed:%d,active_changed:%d,"
7900                 "connectors_changed:%d\n",
7901                 acrtc->crtc_id,
7902                 new_crtc_state->enable,
7903                 new_crtc_state->active,
7904                 new_crtc_state->planes_changed,
7905                 new_crtc_state->mode_changed,
7906                 new_crtc_state->active_changed,
7907                 new_crtc_state->connectors_changed);
7908
7909         /* Remove stream for any changed/disabled CRTC */
7910         if (!enable) {
7911
7912                 if (!dm_old_crtc_state->stream)
7913                         goto skip_modeset;
7914
7915                 ret = dm_atomic_get_state(state, &dm_state);
7916                 if (ret)
7917                         goto fail;
7918
7919                 DRM_DEBUG_DRIVER("Disabling DRM crtc: %d\n",
7920                                 crtc->base.id);
7921
7922                 /* i.e. reset mode */
7923                 if (dc_remove_stream_from_ctx(
7924                                 dm->dc,
7925                                 dm_state->context,
7926                                 dm_old_crtc_state->stream) != DC_OK) {
7927                         ret = -EINVAL;
7928                         goto fail;
7929                 }
7930
7931                 dc_stream_release(dm_old_crtc_state->stream);
7932                 dm_new_crtc_state->stream = NULL;
7933
7934                 reset_freesync_config_for_crtc(dm_new_crtc_state);
7935
7936                 *lock_and_validation_needed = true;
7937
7938         } else {/* Add stream for any updated/enabled CRTC */
7939                 /*
7940                  * Quick fix to prevent NULL pointer on new_stream when
7941                  * added MST connectors not found in existing crtc_state in the chained mode
7942                  * TODO: need to dig out the root cause of that
7943                  */
7944                 if (!aconnector || (!aconnector->dc_sink && aconnector->mst_port))
7945                         goto skip_modeset;
7946
7947                 if (modereset_required(new_crtc_state))
7948                         goto skip_modeset;
7949
7950                 if (modeset_required(new_crtc_state, new_stream,
7951                                      dm_old_crtc_state->stream)) {
7952
7953                         WARN_ON(dm_new_crtc_state->stream);
7954
7955                         ret = dm_atomic_get_state(state, &dm_state);
7956                         if (ret)
7957                                 goto fail;
7958
7959                         dm_new_crtc_state->stream = new_stream;
7960
7961                         dc_stream_retain(new_stream);
7962
7963                         DRM_DEBUG_DRIVER("Enabling DRM crtc: %d\n",
7964                                                 crtc->base.id);
7965
7966                         if (dc_add_stream_to_ctx(
7967                                         dm->dc,
7968                                         dm_state->context,
7969                                         dm_new_crtc_state->stream) != DC_OK) {
7970                                 ret = -EINVAL;
7971                                 goto fail;
7972                         }
7973
7974                         *lock_and_validation_needed = true;
7975                 }
7976         }
7977
7978 skip_modeset:
7979         /* Release extra reference */
7980         if (new_stream)
7981                  dc_stream_release(new_stream);
7982
7983         /*
7984          * We want to do dc stream updates that do not require a
7985          * full modeset below.
7986          */
7987         if (!(enable && aconnector && new_crtc_state->enable &&
7988               new_crtc_state->active))
7989                 return 0;
7990         /*
7991          * Given above conditions, the dc state cannot be NULL because:
7992          * 1. We're in the process of enabling CRTCs (just been added
7993          *    to the dc context, or already is on the context)
7994          * 2. Has a valid connector attached, and
7995          * 3. Is currently active and enabled.
7996          * => The dc stream state currently exists.
7997          */
7998         BUG_ON(dm_new_crtc_state->stream == NULL);
7999
8000         /* Scaling or underscan settings */
8001         if (is_scaling_state_different(dm_old_conn_state, dm_new_conn_state))
8002                 update_stream_scaling_settings(
8003                         &new_crtc_state->mode, dm_new_conn_state, dm_new_crtc_state->stream);
8004
8005         /* ABM settings */
8006         dm_new_crtc_state->abm_level = dm_new_conn_state->abm_level;
8007
8008         /*
8009          * Color management settings. We also update color properties
8010          * when a modeset is needed, to ensure it gets reprogrammed.
8011          */
8012         if (dm_new_crtc_state->base.color_mgmt_changed ||
8013             drm_atomic_crtc_needs_modeset(new_crtc_state)) {
8014                 ret = amdgpu_dm_update_crtc_color_mgmt(dm_new_crtc_state);
8015                 if (ret)
8016                         goto fail;
8017         }
8018
8019         /* Update Freesync settings. */
8020         get_freesync_config_for_crtc(dm_new_crtc_state,
8021                                      dm_new_conn_state);
8022
8023         return ret;
8024
8025 fail:
8026         if (new_stream)
8027                 dc_stream_release(new_stream);
8028         return ret;
8029 }
8030
8031 static bool should_reset_plane(struct drm_atomic_state *state,
8032                                struct drm_plane *plane,
8033                                struct drm_plane_state *old_plane_state,
8034                                struct drm_plane_state *new_plane_state)
8035 {
8036         struct drm_plane *other;
8037         struct drm_plane_state *old_other_state, *new_other_state;
8038         struct drm_crtc_state *new_crtc_state;
8039         int i;
8040
8041         /*
8042          * TODO: Remove this hack once the checks below are sufficient
8043          * enough to determine when we need to reset all the planes on
8044          * the stream.
8045          */
8046         if (state->allow_modeset)
8047                 return true;
8048
8049         /* Exit early if we know that we're adding or removing the plane. */
8050         if (old_plane_state->crtc != new_plane_state->crtc)
8051                 return true;
8052
8053         /* old crtc == new_crtc == NULL, plane not in context. */
8054         if (!new_plane_state->crtc)
8055                 return false;
8056
8057         new_crtc_state =
8058                 drm_atomic_get_new_crtc_state(state, new_plane_state->crtc);
8059
8060         if (!new_crtc_state)
8061                 return true;
8062
8063         /* CRTC Degamma changes currently require us to recreate planes. */
8064         if (new_crtc_state->color_mgmt_changed)
8065                 return true;
8066
8067         if (drm_atomic_crtc_needs_modeset(new_crtc_state))
8068                 return true;
8069
8070         /*
8071          * If there are any new primary or overlay planes being added or
8072          * removed then the z-order can potentially change. To ensure
8073          * correct z-order and pipe acquisition the current DC architecture
8074          * requires us to remove and recreate all existing planes.
8075          *
8076          * TODO: Come up with a more elegant solution for this.
8077          */
8078         for_each_oldnew_plane_in_state(state, other, old_other_state, new_other_state, i) {
8079                 if (other->type == DRM_PLANE_TYPE_CURSOR)
8080                         continue;
8081
8082                 if (old_other_state->crtc != new_plane_state->crtc &&
8083                     new_other_state->crtc != new_plane_state->crtc)
8084                         continue;
8085
8086                 if (old_other_state->crtc != new_other_state->crtc)
8087                         return true;
8088
8089                 /* TODO: Remove this once we can handle fast format changes. */
8090                 if (old_other_state->fb && new_other_state->fb &&
8091                     old_other_state->fb->format != new_other_state->fb->format)
8092                         return true;
8093         }
8094
8095         return false;
8096 }
8097
8098 static int dm_update_plane_state(struct dc *dc,
8099                                  struct drm_atomic_state *state,
8100                                  struct drm_plane *plane,
8101                                  struct drm_plane_state *old_plane_state,
8102                                  struct drm_plane_state *new_plane_state,
8103                                  bool enable,
8104                                  bool *lock_and_validation_needed)
8105 {
8106
8107         struct dm_atomic_state *dm_state = NULL;
8108         struct drm_crtc *new_plane_crtc, *old_plane_crtc;
8109         struct drm_crtc_state *old_crtc_state, *new_crtc_state;
8110         struct dm_crtc_state *dm_new_crtc_state, *dm_old_crtc_state;
8111         struct dm_plane_state *dm_new_plane_state, *dm_old_plane_state;
8112         struct amdgpu_crtc *new_acrtc;
8113         bool needs_reset;
8114         int ret = 0;
8115
8116
8117         new_plane_crtc = new_plane_state->crtc;
8118         old_plane_crtc = old_plane_state->crtc;
8119         dm_new_plane_state = to_dm_plane_state(new_plane_state);
8120         dm_old_plane_state = to_dm_plane_state(old_plane_state);
8121
8122         /*TODO Implement better atomic check for cursor plane */
8123         if (plane->type == DRM_PLANE_TYPE_CURSOR) {
8124                 if (!enable || !new_plane_crtc ||
8125                         drm_atomic_plane_disabling(plane->state, new_plane_state))
8126                         return 0;
8127
8128                 new_acrtc = to_amdgpu_crtc(new_plane_crtc);
8129
8130                 if ((new_plane_state->crtc_w > new_acrtc->max_cursor_width) ||
8131                         (new_plane_state->crtc_h > new_acrtc->max_cursor_height)) {
8132                         DRM_DEBUG_ATOMIC("Bad cursor size %d x %d\n",
8133                                                          new_plane_state->crtc_w, new_plane_state->crtc_h);
8134                         return -EINVAL;
8135                 }
8136
8137                 return 0;
8138         }
8139
8140         needs_reset = should_reset_plane(state, plane, old_plane_state,
8141                                          new_plane_state);
8142
8143         /* Remove any changed/removed planes */
8144         if (!enable) {
8145                 if (!needs_reset)
8146                         return 0;
8147
8148                 if (!old_plane_crtc)
8149                         return 0;
8150
8151                 old_crtc_state = drm_atomic_get_old_crtc_state(
8152                                 state, old_plane_crtc);
8153                 dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
8154
8155                 if (!dm_old_crtc_state->stream)
8156                         return 0;
8157
8158                 DRM_DEBUG_ATOMIC("Disabling DRM plane: %d on DRM crtc %d\n",
8159                                 plane->base.id, old_plane_crtc->base.id);
8160
8161                 ret = dm_atomic_get_state(state, &dm_state);
8162                 if (ret)
8163                         return ret;
8164
8165                 if (!dc_remove_plane_from_context(
8166                                 dc,
8167                                 dm_old_crtc_state->stream,
8168                                 dm_old_plane_state->dc_state,
8169                                 dm_state->context)) {
8170
8171                         ret = EINVAL;
8172                         return ret;
8173                 }
8174
8175
8176                 dc_plane_state_release(dm_old_plane_state->dc_state);
8177                 dm_new_plane_state->dc_state = NULL;
8178
8179                 *lock_and_validation_needed = true;
8180
8181         } else { /* Add new planes */
8182                 struct dc_plane_state *dc_new_plane_state;
8183
8184                 if (drm_atomic_plane_disabling(plane->state, new_plane_state))
8185                         return 0;
8186
8187                 if (!new_plane_crtc)
8188                         return 0;
8189
8190                 new_crtc_state = drm_atomic_get_new_crtc_state(state, new_plane_crtc);
8191                 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
8192
8193                 if (!dm_new_crtc_state->stream)
8194                         return 0;
8195
8196                 if (!needs_reset)
8197                         return 0;
8198
8199                 WARN_ON(dm_new_plane_state->dc_state);
8200
8201                 dc_new_plane_state = dc_create_plane_state(dc);
8202                 if (!dc_new_plane_state)
8203                         return -ENOMEM;
8204
8205                 DRM_DEBUG_DRIVER("Enabling DRM plane: %d on DRM crtc %d\n",
8206                                 plane->base.id, new_plane_crtc->base.id);
8207
8208                 ret = fill_dc_plane_attributes(
8209                         new_plane_crtc->dev->dev_private,
8210                         dc_new_plane_state,
8211                         new_plane_state,
8212                         new_crtc_state);
8213                 if (ret) {
8214                         dc_plane_state_release(dc_new_plane_state);
8215                         return ret;
8216                 }
8217
8218                 ret = dm_atomic_get_state(state, &dm_state);
8219                 if (ret) {
8220                         dc_plane_state_release(dc_new_plane_state);
8221                         return ret;
8222                 }
8223
8224                 /*
8225                  * Any atomic check errors that occur after this will
8226                  * not need a release. The plane state will be attached
8227                  * to the stream, and therefore part of the atomic
8228                  * state. It'll be released when the atomic state is
8229                  * cleaned.
8230                  */
8231                 if (!dc_add_plane_to_context(
8232                                 dc,
8233                                 dm_new_crtc_state->stream,
8234                                 dc_new_plane_state,
8235                                 dm_state->context)) {
8236
8237                         dc_plane_state_release(dc_new_plane_state);
8238                         return -EINVAL;
8239                 }
8240
8241                 dm_new_plane_state->dc_state = dc_new_plane_state;
8242
8243                 /* Tell DC to do a full surface update every time there
8244                  * is a plane change. Inefficient, but works for now.
8245                  */
8246                 dm_new_plane_state->dc_state->update_flags.bits.full_update = 1;
8247
8248                 *lock_and_validation_needed = true;
8249         }
8250
8251
8252         return ret;
8253 }
8254
8255 static int
8256 dm_determine_update_type_for_commit(struct amdgpu_display_manager *dm,
8257                                     struct drm_atomic_state *state,
8258                                     enum surface_update_type *out_type)
8259 {
8260         struct dc *dc = dm->dc;
8261         struct dm_atomic_state *dm_state = NULL, *old_dm_state = NULL;
8262         int i, j, num_plane, ret = 0;
8263         struct drm_plane_state *old_plane_state, *new_plane_state;
8264         struct dm_plane_state *new_dm_plane_state, *old_dm_plane_state;
8265         struct drm_crtc *new_plane_crtc;
8266         struct drm_plane *plane;
8267
8268         struct drm_crtc *crtc;
8269         struct drm_crtc_state *new_crtc_state, *old_crtc_state;
8270         struct dm_crtc_state *new_dm_crtc_state, *old_dm_crtc_state;
8271         struct dc_stream_status *status = NULL;
8272         enum surface_update_type update_type = UPDATE_TYPE_FAST;
8273         struct surface_info_bundle {
8274                 struct dc_surface_update surface_updates[MAX_SURFACES];
8275                 struct dc_plane_info plane_infos[MAX_SURFACES];
8276                 struct dc_scaling_info scaling_infos[MAX_SURFACES];
8277                 struct dc_flip_addrs flip_addrs[MAX_SURFACES];
8278                 struct dc_stream_update stream_update;
8279         } *bundle;
8280
8281         bundle = kzalloc(sizeof(*bundle), GFP_KERNEL);
8282
8283         if (!bundle) {
8284                 DRM_ERROR("Failed to allocate update bundle\n");
8285                 /* Set type to FULL to avoid crashing in DC*/
8286                 update_type = UPDATE_TYPE_FULL;
8287                 goto cleanup;
8288         }
8289
8290         for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
8291
8292                 memset(bundle, 0, sizeof(struct surface_info_bundle));
8293
8294                 new_dm_crtc_state = to_dm_crtc_state(new_crtc_state);
8295                 old_dm_crtc_state = to_dm_crtc_state(old_crtc_state);
8296                 num_plane = 0;
8297
8298                 if (new_dm_crtc_state->stream != old_dm_crtc_state->stream) {
8299                         update_type = UPDATE_TYPE_FULL;
8300                         goto cleanup;
8301                 }
8302
8303                 if (!new_dm_crtc_state->stream)
8304                         continue;
8305
8306                 for_each_oldnew_plane_in_state(state, plane, old_plane_state, new_plane_state, j) {
8307                         const struct amdgpu_framebuffer *amdgpu_fb =
8308                                 to_amdgpu_framebuffer(new_plane_state->fb);
8309                         struct dc_plane_info *plane_info = &bundle->plane_infos[num_plane];
8310                         struct dc_flip_addrs *flip_addr = &bundle->flip_addrs[num_plane];
8311                         struct dc_scaling_info *scaling_info = &bundle->scaling_infos[num_plane];
8312                         uint64_t tiling_flags;
8313                         bool tmz_surface = false;
8314
8315                         new_plane_crtc = new_plane_state->crtc;
8316                         new_dm_plane_state = to_dm_plane_state(new_plane_state);
8317                         old_dm_plane_state = to_dm_plane_state(old_plane_state);
8318
8319                         if (plane->type == DRM_PLANE_TYPE_CURSOR)
8320                                 continue;
8321
8322                         if (new_dm_plane_state->dc_state != old_dm_plane_state->dc_state) {
8323                                 update_type = UPDATE_TYPE_FULL;
8324                                 goto cleanup;
8325                         }
8326
8327                         if (crtc != new_plane_crtc)
8328                                 continue;
8329
8330                         bundle->surface_updates[num_plane].surface =
8331                                         new_dm_plane_state->dc_state;
8332
8333                         if (new_crtc_state->mode_changed) {
8334                                 bundle->stream_update.dst = new_dm_crtc_state->stream->dst;
8335                                 bundle->stream_update.src = new_dm_crtc_state->stream->src;
8336                         }
8337
8338                         if (new_crtc_state->color_mgmt_changed) {
8339                                 bundle->surface_updates[num_plane].gamma =
8340                                                 new_dm_plane_state->dc_state->gamma_correction;
8341                                 bundle->surface_updates[num_plane].in_transfer_func =
8342                                                 new_dm_plane_state->dc_state->in_transfer_func;
8343                                 bundle->surface_updates[num_plane].gamut_remap_matrix =
8344                                                 &new_dm_plane_state->dc_state->gamut_remap_matrix;
8345                                 bundle->stream_update.gamut_remap =
8346                                                 &new_dm_crtc_state->stream->gamut_remap_matrix;
8347                                 bundle->stream_update.output_csc_transform =
8348                                                 &new_dm_crtc_state->stream->csc_color_matrix;
8349                                 bundle->stream_update.out_transfer_func =
8350                                                 new_dm_crtc_state->stream->out_transfer_func;
8351                         }
8352
8353                         ret = fill_dc_scaling_info(new_plane_state,
8354                                                    scaling_info);
8355                         if (ret)
8356                                 goto cleanup;
8357
8358                         bundle->surface_updates[num_plane].scaling_info = scaling_info;
8359
8360                         if (amdgpu_fb) {
8361                                 ret = get_fb_info(amdgpu_fb, &tiling_flags, &tmz_surface);
8362                                 if (ret)
8363                                         goto cleanup;
8364
8365                                 ret = fill_dc_plane_info_and_addr(
8366                                         dm->adev, new_plane_state, tiling_flags,
8367                                         plane_info,
8368                                         &flip_addr->address, tmz_surface,
8369                                         false);
8370                                 if (ret)
8371                                         goto cleanup;
8372
8373                                 bundle->surface_updates[num_plane].plane_info = plane_info;
8374                                 bundle->surface_updates[num_plane].flip_addr = flip_addr;
8375                         }
8376
8377                         num_plane++;
8378                 }
8379
8380                 if (num_plane == 0)
8381                         continue;
8382
8383                 ret = dm_atomic_get_state(state, &dm_state);
8384                 if (ret)
8385                         goto cleanup;
8386
8387                 old_dm_state = dm_atomic_get_old_state(state);
8388                 if (!old_dm_state) {
8389                         ret = -EINVAL;
8390                         goto cleanup;
8391                 }
8392
8393                 status = dc_stream_get_status_from_state(old_dm_state->context,
8394                                                          new_dm_crtc_state->stream);
8395                 bundle->stream_update.stream = new_dm_crtc_state->stream;
8396                 /*
8397                  * TODO: DC modifies the surface during this call so we need
8398                  * to lock here - find a way to do this without locking.
8399                  */
8400                 mutex_lock(&dm->dc_lock);
8401                 update_type = dc_check_update_surfaces_for_stream(
8402                                 dc,     bundle->surface_updates, num_plane,
8403                                 &bundle->stream_update, status);
8404                 mutex_unlock(&dm->dc_lock);
8405
8406                 if (update_type > UPDATE_TYPE_MED) {
8407                         update_type = UPDATE_TYPE_FULL;
8408                         goto cleanup;
8409                 }
8410         }
8411
8412 cleanup:
8413         kfree(bundle);
8414
8415         *out_type = update_type;
8416         return ret;
8417 }
8418
8419 static int add_affected_mst_dsc_crtcs(struct drm_atomic_state *state, struct drm_crtc *crtc)
8420 {
8421         struct drm_connector *connector;
8422         struct drm_connector_state *conn_state;
8423         struct amdgpu_dm_connector *aconnector = NULL;
8424         int i;
8425         for_each_new_connector_in_state(state, connector, conn_state, i) {
8426                 if (conn_state->crtc != crtc)
8427                         continue;
8428
8429                 aconnector = to_amdgpu_dm_connector(connector);
8430                 if (!aconnector->port || !aconnector->mst_port)
8431                         aconnector = NULL;
8432                 else
8433                         break;
8434         }
8435
8436         if (!aconnector)
8437                 return 0;
8438
8439         return drm_dp_mst_add_affected_dsc_crtcs(state, &aconnector->mst_port->mst_mgr);
8440 }
8441
8442 /**
8443  * amdgpu_dm_atomic_check() - Atomic check implementation for AMDgpu DM.
8444  * @dev: The DRM device
8445  * @state: The atomic state to commit
8446  *
8447  * Validate that the given atomic state is programmable by DC into hardware.
8448  * This involves constructing a &struct dc_state reflecting the new hardware
8449  * state we wish to commit, then querying DC to see if it is programmable. It's
8450  * important not to modify the existing DC state. Otherwise, atomic_check
8451  * may unexpectedly commit hardware changes.
8452  *
8453  * When validating the DC state, it's important that the right locks are
8454  * acquired. For full updates case which removes/adds/updates streams on one
8455  * CRTC while flipping on another CRTC, acquiring global lock will guarantee
8456  * that any such full update commit will wait for completion of any outstanding
8457  * flip using DRMs synchronization events. See
8458  * dm_determine_update_type_for_commit()
8459  *
8460  * Note that DM adds the affected connectors for all CRTCs in state, when that
8461  * might not seem necessary. This is because DC stream creation requires the
8462  * DC sink, which is tied to the DRM connector state. Cleaning this up should
8463  * be possible but non-trivial - a possible TODO item.
8464  *
8465  * Return: -Error code if validation failed.
8466  */
8467 static int amdgpu_dm_atomic_check(struct drm_device *dev,
8468                                   struct drm_atomic_state *state)
8469 {
8470         struct amdgpu_device *adev = dev->dev_private;
8471         struct dm_atomic_state *dm_state = NULL;
8472         struct dc *dc = adev->dm.dc;
8473         struct drm_connector *connector;
8474         struct drm_connector_state *old_con_state, *new_con_state;
8475         struct drm_crtc *crtc;
8476         struct drm_crtc_state *old_crtc_state, *new_crtc_state;
8477         struct drm_plane *plane;
8478         struct drm_plane_state *old_plane_state, *new_plane_state;
8479         enum surface_update_type update_type = UPDATE_TYPE_FAST;
8480         enum surface_update_type overall_update_type = UPDATE_TYPE_FAST;
8481
8482         int ret, i;
8483
8484         /*
8485          * This bool will be set for true for any modeset/reset
8486          * or plane update which implies non fast surface update.
8487          */
8488         bool lock_and_validation_needed = false;
8489
8490         ret = drm_atomic_helper_check_modeset(dev, state);
8491         if (ret)
8492                 goto fail;
8493
8494         if (adev->asic_type >= CHIP_NAVI10) {
8495                 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
8496                         if (drm_atomic_crtc_needs_modeset(new_crtc_state)) {
8497                                 ret = add_affected_mst_dsc_crtcs(state, crtc);
8498                                 if (ret)
8499                                         goto fail;
8500                         }
8501                 }
8502         }
8503
8504         for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
8505                 if (!drm_atomic_crtc_needs_modeset(new_crtc_state) &&
8506                     !new_crtc_state->color_mgmt_changed &&
8507                     old_crtc_state->vrr_enabled == new_crtc_state->vrr_enabled)
8508                         continue;
8509
8510                 if (!new_crtc_state->enable)
8511                         continue;
8512
8513                 ret = drm_atomic_add_affected_connectors(state, crtc);
8514                 if (ret)
8515                         return ret;
8516
8517                 ret = drm_atomic_add_affected_planes(state, crtc);
8518                 if (ret)
8519                         goto fail;
8520         }
8521
8522         /*
8523          * Add all primary and overlay planes on the CRTC to the state
8524          * whenever a plane is enabled to maintain correct z-ordering
8525          * and to enable fast surface updates.
8526          */
8527         drm_for_each_crtc(crtc, dev) {
8528                 bool modified = false;
8529
8530                 for_each_oldnew_plane_in_state(state, plane, old_plane_state, new_plane_state, i) {
8531                         if (plane->type == DRM_PLANE_TYPE_CURSOR)
8532                                 continue;
8533
8534                         if (new_plane_state->crtc == crtc ||
8535                             old_plane_state->crtc == crtc) {
8536                                 modified = true;
8537                                 break;
8538                         }
8539                 }
8540
8541                 if (!modified)
8542                         continue;
8543
8544                 drm_for_each_plane_mask(plane, state->dev, crtc->state->plane_mask) {
8545                         if (plane->type == DRM_PLANE_TYPE_CURSOR)
8546                                 continue;
8547
8548                         new_plane_state =
8549                                 drm_atomic_get_plane_state(state, plane);
8550
8551                         if (IS_ERR(new_plane_state)) {
8552                                 ret = PTR_ERR(new_plane_state);
8553                                 goto fail;
8554                         }
8555                 }
8556         }
8557
8558         /* Remove exiting planes if they are modified */
8559         for_each_oldnew_plane_in_state_reverse(state, plane, old_plane_state, new_plane_state, i) {
8560                 ret = dm_update_plane_state(dc, state, plane,
8561                                             old_plane_state,
8562                                             new_plane_state,
8563                                             false,
8564                                             &lock_and_validation_needed);
8565                 if (ret)
8566                         goto fail;
8567         }
8568
8569         /* Disable all crtcs which require disable */
8570         for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
8571                 ret = dm_update_crtc_state(&adev->dm, state, crtc,
8572                                            old_crtc_state,
8573                                            new_crtc_state,
8574                                            false,
8575                                            &lock_and_validation_needed);
8576                 if (ret)
8577                         goto fail;
8578         }
8579
8580         /* Enable all crtcs which require enable */
8581         for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
8582                 ret = dm_update_crtc_state(&adev->dm, state, crtc,
8583                                            old_crtc_state,
8584                                            new_crtc_state,
8585                                            true,
8586                                            &lock_and_validation_needed);
8587                 if (ret)
8588                         goto fail;
8589         }
8590
8591         /* Add new/modified planes */
8592         for_each_oldnew_plane_in_state_reverse(state, plane, old_plane_state, new_plane_state, i) {
8593                 ret = dm_update_plane_state(dc, state, plane,
8594                                             old_plane_state,
8595                                             new_plane_state,
8596                                             true,
8597                                             &lock_and_validation_needed);
8598                 if (ret)
8599                         goto fail;
8600         }
8601
8602         /* Run this here since we want to validate the streams we created */
8603         ret = drm_atomic_helper_check_planes(dev, state);
8604         if (ret)
8605                 goto fail;
8606
8607         if (state->legacy_cursor_update) {
8608                 /*
8609                  * This is a fast cursor update coming from the plane update
8610                  * helper, check if it can be done asynchronously for better
8611                  * performance.
8612                  */
8613                 state->async_update =
8614                         !drm_atomic_helper_async_check(dev, state);
8615
8616                 /*
8617                  * Skip the remaining global validation if this is an async
8618                  * update. Cursor updates can be done without affecting
8619                  * state or bandwidth calcs and this avoids the performance
8620                  * penalty of locking the private state object and
8621                  * allocating a new dc_state.
8622                  */
8623                 if (state->async_update)
8624                         return 0;
8625         }
8626
8627         /* Check scaling and underscan changes*/
8628         /* TODO Removed scaling changes validation due to inability to commit
8629          * new stream into context w\o causing full reset. Need to
8630          * decide how to handle.
8631          */
8632         for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
8633                 struct dm_connector_state *dm_old_con_state = to_dm_connector_state(old_con_state);
8634                 struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state);
8635                 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(dm_new_con_state->base.crtc);
8636
8637                 /* Skip any modesets/resets */
8638                 if (!acrtc || drm_atomic_crtc_needs_modeset(
8639                                 drm_atomic_get_new_crtc_state(state, &acrtc->base)))
8640                         continue;
8641
8642                 /* Skip any thing not scale or underscan changes */
8643                 if (!is_scaling_state_different(dm_new_con_state, dm_old_con_state))
8644                         continue;
8645
8646                 overall_update_type = UPDATE_TYPE_FULL;
8647                 lock_and_validation_needed = true;
8648         }
8649
8650         ret = dm_determine_update_type_for_commit(&adev->dm, state, &update_type);
8651         if (ret)
8652                 goto fail;
8653
8654         if (overall_update_type < update_type)
8655                 overall_update_type = update_type;
8656
8657         /*
8658          * lock_and_validation_needed was an old way to determine if we need to set
8659          * the global lock. Leaving it in to check if we broke any corner cases
8660          * lock_and_validation_needed true = UPDATE_TYPE_FULL or UPDATE_TYPE_MED
8661          * lock_and_validation_needed false = UPDATE_TYPE_FAST
8662          */
8663         if (lock_and_validation_needed && overall_update_type <= UPDATE_TYPE_FAST)
8664                 WARN(1, "Global lock should be Set, overall_update_type should be UPDATE_TYPE_MED or UPDATE_TYPE_FULL");
8665
8666         if (overall_update_type > UPDATE_TYPE_FAST) {
8667                 ret = dm_atomic_get_state(state, &dm_state);
8668                 if (ret)
8669                         goto fail;
8670
8671                 ret = do_aquire_global_lock(dev, state);
8672                 if (ret)
8673                         goto fail;
8674
8675 #if defined(CONFIG_DRM_AMD_DC_DCN)
8676                 if (!compute_mst_dsc_configs_for_state(state, dm_state->context))
8677                         goto fail;
8678
8679                 ret = dm_update_mst_vcpi_slots_for_dsc(state, dm_state->context);
8680                 if (ret)
8681                         goto fail;
8682 #endif
8683
8684                 /*
8685                  * Perform validation of MST topology in the state:
8686                  * We need to perform MST atomic check before calling
8687                  * dc_validate_global_state(), or there is a chance
8688                  * to get stuck in an infinite loop and hang eventually.
8689                  */
8690                 ret = drm_dp_mst_atomic_check(state);
8691                 if (ret)
8692                         goto fail;
8693
8694                 if (dc_validate_global_state(dc, dm_state->context, false) != DC_OK) {
8695                         ret = -EINVAL;
8696                         goto fail;
8697                 }
8698         } else {
8699                 /*
8700                  * The commit is a fast update. Fast updates shouldn't change
8701                  * the DC context, affect global validation, and can have their
8702                  * commit work done in parallel with other commits not touching
8703                  * the same resource. If we have a new DC context as part of
8704                  * the DM atomic state from validation we need to free it and
8705                  * retain the existing one instead.
8706                  */
8707                 struct dm_atomic_state *new_dm_state, *old_dm_state;
8708
8709                 new_dm_state = dm_atomic_get_new_state(state);
8710                 old_dm_state = dm_atomic_get_old_state(state);
8711
8712                 if (new_dm_state && old_dm_state) {
8713                         if (new_dm_state->context)
8714                                 dc_release_state(new_dm_state->context);
8715
8716                         new_dm_state->context = old_dm_state->context;
8717
8718                         if (old_dm_state->context)
8719                                 dc_retain_state(old_dm_state->context);
8720                 }
8721         }
8722
8723         /* Store the overall update type for use later in atomic check. */
8724         for_each_new_crtc_in_state (state, crtc, new_crtc_state, i) {
8725                 struct dm_crtc_state *dm_new_crtc_state =
8726                         to_dm_crtc_state(new_crtc_state);
8727
8728                 dm_new_crtc_state->update_type = (int)overall_update_type;
8729         }
8730
8731         /* Must be success */
8732         WARN_ON(ret);
8733         return ret;
8734
8735 fail:
8736         if (ret == -EDEADLK)
8737                 DRM_DEBUG_DRIVER("Atomic check stopped to avoid deadlock.\n");
8738         else if (ret == -EINTR || ret == -EAGAIN || ret == -ERESTARTSYS)
8739                 DRM_DEBUG_DRIVER("Atomic check stopped due to signal.\n");
8740         else
8741                 DRM_DEBUG_DRIVER("Atomic check failed with err: %d \n", ret);
8742
8743         return ret;
8744 }
8745
8746 static bool is_dp_capable_without_timing_msa(struct dc *dc,
8747                                              struct amdgpu_dm_connector *amdgpu_dm_connector)
8748 {
8749         uint8_t dpcd_data;
8750         bool capable = false;
8751
8752         if (amdgpu_dm_connector->dc_link &&
8753                 dm_helpers_dp_read_dpcd(
8754                                 NULL,
8755                                 amdgpu_dm_connector->dc_link,
8756                                 DP_DOWN_STREAM_PORT_COUNT,
8757                                 &dpcd_data,
8758                                 sizeof(dpcd_data))) {
8759                 capable = (dpcd_data & DP_MSA_TIMING_PAR_IGNORED) ? true:false;
8760         }
8761
8762         return capable;
8763 }
8764 void amdgpu_dm_update_freesync_caps(struct drm_connector *connector,
8765                                         struct edid *edid)
8766 {
8767         int i;
8768         bool edid_check_required;
8769         struct detailed_timing *timing;
8770         struct detailed_non_pixel *data;
8771         struct detailed_data_monitor_range *range;
8772         struct amdgpu_dm_connector *amdgpu_dm_connector =
8773                         to_amdgpu_dm_connector(connector);
8774         struct dm_connector_state *dm_con_state = NULL;
8775
8776         struct drm_device *dev = connector->dev;
8777         struct amdgpu_device *adev = dev->dev_private;
8778         bool freesync_capable = false;
8779
8780         if (!connector->state) {
8781                 DRM_ERROR("%s - Connector has no state", __func__);
8782                 goto update;
8783         }
8784
8785         if (!edid) {
8786                 dm_con_state = to_dm_connector_state(connector->state);
8787
8788                 amdgpu_dm_connector->min_vfreq = 0;
8789                 amdgpu_dm_connector->max_vfreq = 0;
8790                 amdgpu_dm_connector->pixel_clock_mhz = 0;
8791
8792                 goto update;
8793         }
8794
8795         dm_con_state = to_dm_connector_state(connector->state);
8796
8797         edid_check_required = false;
8798         if (!amdgpu_dm_connector->dc_sink) {
8799                 DRM_ERROR("dc_sink NULL, could not add free_sync module.\n");
8800                 goto update;
8801         }
8802         if (!adev->dm.freesync_module)
8803                 goto update;
8804         /*
8805          * if edid non zero restrict freesync only for dp and edp
8806          */
8807         if (edid) {
8808                 if (amdgpu_dm_connector->dc_sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT
8809                         || amdgpu_dm_connector->dc_sink->sink_signal == SIGNAL_TYPE_EDP) {
8810                         edid_check_required = is_dp_capable_without_timing_msa(
8811                                                 adev->dm.dc,
8812                                                 amdgpu_dm_connector);
8813                 }
8814         }
8815         if (edid_check_required == true && (edid->version > 1 ||
8816            (edid->version == 1 && edid->revision > 1))) {
8817                 for (i = 0; i < 4; i++) {
8818
8819                         timing  = &edid->detailed_timings[i];
8820                         data    = &timing->data.other_data;
8821                         range   = &data->data.range;
8822                         /*
8823                          * Check if monitor has continuous frequency mode
8824                          */
8825                         if (data->type != EDID_DETAIL_MONITOR_RANGE)
8826                                 continue;
8827                         /*
8828                          * Check for flag range limits only. If flag == 1 then
8829                          * no additional timing information provided.
8830                          * Default GTF, GTF Secondary curve and CVT are not
8831                          * supported
8832                          */
8833                         if (range->flags != 1)
8834                                 continue;
8835
8836                         amdgpu_dm_connector->min_vfreq = range->min_vfreq;
8837                         amdgpu_dm_connector->max_vfreq = range->max_vfreq;
8838                         amdgpu_dm_connector->pixel_clock_mhz =
8839                                 range->pixel_clock_mhz * 10;
8840                         break;
8841                 }
8842
8843                 if (amdgpu_dm_connector->max_vfreq -
8844                     amdgpu_dm_connector->min_vfreq > 10) {
8845
8846                         freesync_capable = true;
8847                 }
8848         }
8849
8850 update:
8851         if (dm_con_state)
8852                 dm_con_state->freesync_capable = freesync_capable;
8853
8854         if (connector->vrr_capable_property)
8855                 drm_connector_set_vrr_capable_property(connector,
8856                                                        freesync_capable);
8857 }
8858
8859 static void amdgpu_dm_set_psr_caps(struct dc_link *link)
8860 {
8861         uint8_t dpcd_data[EDP_PSR_RECEIVER_CAP_SIZE];
8862
8863         if (!(link->connector_signal & SIGNAL_TYPE_EDP))
8864                 return;
8865         if (link->type == dc_connection_none)
8866                 return;
8867         if (dm_helpers_dp_read_dpcd(NULL, link, DP_PSR_SUPPORT,
8868                                         dpcd_data, sizeof(dpcd_data))) {
8869                 link->dpcd_caps.psr_caps.psr_version = dpcd_data[0];
8870
8871                 if (dpcd_data[0] == 0) {
8872                         link->psr_settings.psr_version = DC_PSR_VERSION_UNSUPPORTED;
8873                         link->psr_settings.psr_feature_enabled = false;
8874                 } else {
8875                         link->psr_settings.psr_version = DC_PSR_VERSION_1;
8876                         link->psr_settings.psr_feature_enabled = true;
8877                 }
8878
8879                 DRM_INFO("PSR support:%d\n", link->psr_settings.psr_feature_enabled);
8880         }
8881 }
8882
8883 /*
8884  * amdgpu_dm_link_setup_psr() - configure psr link
8885  * @stream: stream state
8886  *
8887  * Return: true if success
8888  */
8889 static bool amdgpu_dm_link_setup_psr(struct dc_stream_state *stream)
8890 {
8891         struct dc_link *link = NULL;
8892         struct psr_config psr_config = {0};
8893         struct psr_context psr_context = {0};
8894         bool ret = false;
8895
8896         if (stream == NULL)
8897                 return false;
8898
8899         link = stream->link;
8900
8901         psr_config.psr_version = link->dpcd_caps.psr_caps.psr_version;
8902
8903         if (psr_config.psr_version > 0) {
8904                 psr_config.psr_exit_link_training_required = 0x1;
8905                 psr_config.psr_frame_capture_indication_req = 0;
8906                 psr_config.psr_rfb_setup_time = 0x37;
8907                 psr_config.psr_sdp_transmit_line_num_deadline = 0x20;
8908                 psr_config.allow_smu_optimizations = 0x0;
8909
8910                 ret = dc_link_setup_psr(link, stream, &psr_config, &psr_context);
8911
8912         }
8913         DRM_DEBUG_DRIVER("PSR link: %d\n",      link->psr_settings.psr_feature_enabled);
8914
8915         return ret;
8916 }
8917
8918 /*
8919  * amdgpu_dm_psr_enable() - enable psr f/w
8920  * @stream: stream state
8921  *
8922  * Return: true if success
8923  */
8924 bool amdgpu_dm_psr_enable(struct dc_stream_state *stream)
8925 {
8926         struct dc_link *link = stream->link;
8927         unsigned int vsync_rate_hz = 0;
8928         struct dc_static_screen_params params = {0};
8929         /* Calculate number of static frames before generating interrupt to
8930          * enter PSR.
8931          */
8932         // Init fail safe of 2 frames static
8933         unsigned int num_frames_static = 2;
8934
8935         DRM_DEBUG_DRIVER("Enabling psr...\n");
8936
8937         vsync_rate_hz = div64_u64(div64_u64((
8938                         stream->timing.pix_clk_100hz * 100),
8939                         stream->timing.v_total),
8940                         stream->timing.h_total);
8941
8942         /* Round up
8943          * Calculate number of frames such that at least 30 ms of time has
8944          * passed.
8945          */
8946         if (vsync_rate_hz != 0) {
8947                 unsigned int frame_time_microsec = 1000000 / vsync_rate_hz;
8948                 num_frames_static = (30000 / frame_time_microsec) + 1;
8949         }
8950
8951         params.triggers.cursor_update = true;
8952         params.triggers.overlay_update = true;
8953         params.triggers.surface_update = true;
8954         params.num_frames = num_frames_static;
8955
8956         dc_stream_set_static_screen_params(link->ctx->dc,
8957                                            &stream, 1,
8958                                            &params);
8959
8960         return dc_link_set_psr_allow_active(link, true, false);
8961 }
8962
8963 /*
8964  * amdgpu_dm_psr_disable() - disable psr f/w
8965  * @stream:  stream state
8966  *
8967  * Return: true if success
8968  */
8969 static bool amdgpu_dm_psr_disable(struct dc_stream_state *stream)
8970 {
8971
8972         DRM_DEBUG_DRIVER("Disabling psr...\n");
8973
8974         return dc_link_set_psr_allow_active(stream->link, false, true);
8975 }
This page took 0.572693 seconds and 4 git commands to generate.