]> Git Repo - linux.git/blob - drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
Merge tag 'drm-misc-next-2020-07-16' of git://anongit.freedesktop.org/drm/drm-misc...
[linux.git] / drivers / gpu / drm / amd / display / amdgpu_dm / amdgpu_dm.c
1 /*
2  * Copyright 2015 Advanced Micro Devices, Inc.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20  * OTHER DEALINGS IN THE SOFTWARE.
21  *
22  * Authors: AMD
23  *
24  */
25
26 /* The caprices of the preprocessor require that this be declared right here */
27 #define CREATE_TRACE_POINTS
28
29 #include "dm_services_types.h"
30 #include "dc.h"
31 #include "dc/inc/core_types.h"
32 #include "dal_asic_id.h"
33 #include "dmub/dmub_srv.h"
34 #include "dc/inc/hw/dmcu.h"
35 #include "dc/inc/hw/abm.h"
36 #include "dc/dc_dmub_srv.h"
37
38 #include "vid.h"
39 #include "amdgpu.h"
40 #include "amdgpu_display.h"
41 #include "amdgpu_ucode.h"
42 #include "atom.h"
43 #include "amdgpu_dm.h"
44 #ifdef CONFIG_DRM_AMD_DC_HDCP
45 #include "amdgpu_dm_hdcp.h"
46 #include <drm/drm_hdcp.h>
47 #endif
48 #include "amdgpu_pm.h"
49
50 #include "amd_shared.h"
51 #include "amdgpu_dm_irq.h"
52 #include "dm_helpers.h"
53 #include "amdgpu_dm_mst_types.h"
54 #if defined(CONFIG_DEBUG_FS)
55 #include "amdgpu_dm_debugfs.h"
56 #endif
57
58 #include "ivsrcid/ivsrcid_vislands30.h"
59
60 #include <linux/module.h>
61 #include <linux/moduleparam.h>
62 #include <linux/version.h>
63 #include <linux/types.h>
64 #include <linux/pm_runtime.h>
65 #include <linux/pci.h>
66 #include <linux/firmware.h>
67 #include <linux/component.h>
68
69 #include <drm/drm_atomic.h>
70 #include <drm/drm_atomic_uapi.h>
71 #include <drm/drm_atomic_helper.h>
72 #include <drm/drm_dp_mst_helper.h>
73 #include <drm/drm_fb_helper.h>
74 #include <drm/drm_fourcc.h>
75 #include <drm/drm_edid.h>
76 #include <drm/drm_vblank.h>
77 #include <drm/drm_audio_component.h>
78 #include <drm/drm_hdcp.h>
79
80 #if defined(CONFIG_DRM_AMD_DC_DCN)
81 #include "ivsrcid/dcn/irqsrcs_dcn_1_0.h"
82
83 #include "dcn/dcn_1_0_offset.h"
84 #include "dcn/dcn_1_0_sh_mask.h"
85 #include "soc15_hw_ip.h"
86 #include "vega10_ip_offset.h"
87
88 #include "soc15_common.h"
89 #endif
90
91 #include "modules/inc/mod_freesync.h"
92 #include "modules/power/power_helpers.h"
93 #include "modules/inc/mod_info_packet.h"
94
95 #define FIRMWARE_RENOIR_DMUB "amdgpu/renoir_dmcub.bin"
96 MODULE_FIRMWARE(FIRMWARE_RENOIR_DMUB);
97 #if defined(CONFIG_DRM_AMD_DC_DCN3_0)
98 #define FIRMWARE_SIENNA_CICHLID_DMUB "amdgpu/sienna_cichlid_dmcub.bin"
99 MODULE_FIRMWARE(FIRMWARE_SIENNA_CICHLID_DMUB);
100 #endif
101
102 #define FIRMWARE_RAVEN_DMCU             "amdgpu/raven_dmcu.bin"
103 MODULE_FIRMWARE(FIRMWARE_RAVEN_DMCU);
104
105 #define FIRMWARE_NAVI12_DMCU            "amdgpu/navi12_dmcu.bin"
106 MODULE_FIRMWARE(FIRMWARE_NAVI12_DMCU);
107
108 /* Number of bytes in PSP header for firmware. */
109 #define PSP_HEADER_BYTES 0x100
110
111 /* Number of bytes in PSP footer for firmware. */
112 #define PSP_FOOTER_BYTES 0x100
113
114 /**
115  * DOC: overview
116  *
117  * The AMDgpu display manager, **amdgpu_dm** (or even simpler,
118  * **dm**) sits between DRM and DC. It acts as a liason, converting DRM
119  * requests into DC requests, and DC responses into DRM responses.
120  *
121  * The root control structure is &struct amdgpu_display_manager.
122  */
123
124 /* basic init/fini API */
125 static int amdgpu_dm_init(struct amdgpu_device *adev);
126 static void amdgpu_dm_fini(struct amdgpu_device *adev);
127
128 /*
129  * initializes drm_device display related structures, based on the information
130  * provided by DAL. The drm strcutures are: drm_crtc, drm_connector,
131  * drm_encoder, drm_mode_config
132  *
133  * Returns 0 on success
134  */
135 static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev);
136 /* removes and deallocates the drm structures, created by the above function */
137 static void amdgpu_dm_destroy_drm_device(struct amdgpu_display_manager *dm);
138
139 static int amdgpu_dm_plane_init(struct amdgpu_display_manager *dm,
140                                 struct drm_plane *plane,
141                                 unsigned long possible_crtcs,
142                                 const struct dc_plane_cap *plane_cap);
143 static int amdgpu_dm_crtc_init(struct amdgpu_display_manager *dm,
144                                struct drm_plane *plane,
145                                uint32_t link_index);
146 static int amdgpu_dm_connector_init(struct amdgpu_display_manager *dm,
147                                     struct amdgpu_dm_connector *amdgpu_dm_connector,
148                                     uint32_t link_index,
149                                     struct amdgpu_encoder *amdgpu_encoder);
150 static int amdgpu_dm_encoder_init(struct drm_device *dev,
151                                   struct amdgpu_encoder *aencoder,
152                                   uint32_t link_index);
153
154 static int amdgpu_dm_connector_get_modes(struct drm_connector *connector);
155
156 static int amdgpu_dm_atomic_commit(struct drm_device *dev,
157                                    struct drm_atomic_state *state,
158                                    bool nonblock);
159
160 static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state);
161
162 static int amdgpu_dm_atomic_check(struct drm_device *dev,
163                                   struct drm_atomic_state *state);
164
165 static void handle_cursor_update(struct drm_plane *plane,
166                                  struct drm_plane_state *old_plane_state);
167
168 static void amdgpu_dm_set_psr_caps(struct dc_link *link);
169 static bool amdgpu_dm_psr_enable(struct dc_stream_state *stream);
170 static bool amdgpu_dm_link_setup_psr(struct dc_stream_state *stream);
171 static bool amdgpu_dm_psr_disable(struct dc_stream_state *stream);
172
173
174 /*
175  * dm_vblank_get_counter
176  *
177  * @brief
178  * Get counter for number of vertical blanks
179  *
180  * @param
181  * struct amdgpu_device *adev - [in] desired amdgpu device
182  * int disp_idx - [in] which CRTC to get the counter from
183  *
184  * @return
185  * Counter for vertical blanks
186  */
187 static u32 dm_vblank_get_counter(struct amdgpu_device *adev, int crtc)
188 {
189         if (crtc >= adev->mode_info.num_crtc)
190                 return 0;
191         else {
192                 struct amdgpu_crtc *acrtc = adev->mode_info.crtcs[crtc];
193                 struct dm_crtc_state *acrtc_state = to_dm_crtc_state(
194                                 acrtc->base.state);
195
196
197                 if (acrtc_state->stream == NULL) {
198                         DRM_ERROR("dc_stream_state is NULL for crtc '%d'!\n",
199                                   crtc);
200                         return 0;
201                 }
202
203                 return dc_stream_get_vblank_counter(acrtc_state->stream);
204         }
205 }
206
207 static int dm_crtc_get_scanoutpos(struct amdgpu_device *adev, int crtc,
208                                   u32 *vbl, u32 *position)
209 {
210         uint32_t v_blank_start, v_blank_end, h_position, v_position;
211
212         if ((crtc < 0) || (crtc >= adev->mode_info.num_crtc))
213                 return -EINVAL;
214         else {
215                 struct amdgpu_crtc *acrtc = adev->mode_info.crtcs[crtc];
216                 struct dm_crtc_state *acrtc_state = to_dm_crtc_state(
217                                                 acrtc->base.state);
218
219                 if (acrtc_state->stream ==  NULL) {
220                         DRM_ERROR("dc_stream_state is NULL for crtc '%d'!\n",
221                                   crtc);
222                         return 0;
223                 }
224
225                 /*
226                  * TODO rework base driver to use values directly.
227                  * for now parse it back into reg-format
228                  */
229                 dc_stream_get_scanoutpos(acrtc_state->stream,
230                                          &v_blank_start,
231                                          &v_blank_end,
232                                          &h_position,
233                                          &v_position);
234
235                 *position = v_position | (h_position << 16);
236                 *vbl = v_blank_start | (v_blank_end << 16);
237         }
238
239         return 0;
240 }
241
242 static bool dm_is_idle(void *handle)
243 {
244         /* XXX todo */
245         return true;
246 }
247
248 static int dm_wait_for_idle(void *handle)
249 {
250         /* XXX todo */
251         return 0;
252 }
253
254 static bool dm_check_soft_reset(void *handle)
255 {
256         return false;
257 }
258
259 static int dm_soft_reset(void *handle)
260 {
261         /* XXX todo */
262         return 0;
263 }
264
265 static struct amdgpu_crtc *
266 get_crtc_by_otg_inst(struct amdgpu_device *adev,
267                      int otg_inst)
268 {
269         struct drm_device *dev = adev->ddev;
270         struct drm_crtc *crtc;
271         struct amdgpu_crtc *amdgpu_crtc;
272
273         if (otg_inst == -1) {
274                 WARN_ON(1);
275                 return adev->mode_info.crtcs[0];
276         }
277
278         list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
279                 amdgpu_crtc = to_amdgpu_crtc(crtc);
280
281                 if (amdgpu_crtc->otg_inst == otg_inst)
282                         return amdgpu_crtc;
283         }
284
285         return NULL;
286 }
287
288 static inline bool amdgpu_dm_vrr_active(struct dm_crtc_state *dm_state)
289 {
290         return dm_state->freesync_config.state == VRR_STATE_ACTIVE_VARIABLE ||
291                dm_state->freesync_config.state == VRR_STATE_ACTIVE_FIXED;
292 }
293
294 /**
295  * dm_pflip_high_irq() - Handle pageflip interrupt
296  * @interrupt_params: ignored
297  *
298  * Handles the pageflip interrupt by notifying all interested parties
299  * that the pageflip has been completed.
300  */
301 static void dm_pflip_high_irq(void *interrupt_params)
302 {
303         struct amdgpu_crtc *amdgpu_crtc;
304         struct common_irq_params *irq_params = interrupt_params;
305         struct amdgpu_device *adev = irq_params->adev;
306         unsigned long flags;
307         struct drm_pending_vblank_event *e;
308         struct dm_crtc_state *acrtc_state;
309         uint32_t vpos, hpos, v_blank_start, v_blank_end;
310         bool vrr_active;
311
312         amdgpu_crtc = get_crtc_by_otg_inst(adev, irq_params->irq_src - IRQ_TYPE_PFLIP);
313
314         /* IRQ could occur when in initial stage */
315         /* TODO work and BO cleanup */
316         if (amdgpu_crtc == NULL) {
317                 DRM_DEBUG_DRIVER("CRTC is null, returning.\n");
318                 return;
319         }
320
321         spin_lock_irqsave(&adev->ddev->event_lock, flags);
322
323         if (amdgpu_crtc->pflip_status != AMDGPU_FLIP_SUBMITTED){
324                 DRM_DEBUG_DRIVER("amdgpu_crtc->pflip_status = %d !=AMDGPU_FLIP_SUBMITTED(%d) on crtc:%d[%p] \n",
325                                                  amdgpu_crtc->pflip_status,
326                                                  AMDGPU_FLIP_SUBMITTED,
327                                                  amdgpu_crtc->crtc_id,
328                                                  amdgpu_crtc);
329                 spin_unlock_irqrestore(&adev->ddev->event_lock, flags);
330                 return;
331         }
332
333         /* page flip completed. */
334         e = amdgpu_crtc->event;
335         amdgpu_crtc->event = NULL;
336
337         if (!e)
338                 WARN_ON(1);
339
340         acrtc_state = to_dm_crtc_state(amdgpu_crtc->base.state);
341         vrr_active = amdgpu_dm_vrr_active(acrtc_state);
342
343         /* Fixed refresh rate, or VRR scanout position outside front-porch? */
344         if (!vrr_active ||
345             !dc_stream_get_scanoutpos(acrtc_state->stream, &v_blank_start,
346                                       &v_blank_end, &hpos, &vpos) ||
347             (vpos < v_blank_start)) {
348                 /* Update to correct count and vblank timestamp if racing with
349                  * vblank irq. This also updates to the correct vblank timestamp
350                  * even in VRR mode, as scanout is past the front-porch atm.
351                  */
352                 drm_crtc_accurate_vblank_count(&amdgpu_crtc->base);
353
354                 /* Wake up userspace by sending the pageflip event with proper
355                  * count and timestamp of vblank of flip completion.
356                  */
357                 if (e) {
358                         drm_crtc_send_vblank_event(&amdgpu_crtc->base, e);
359
360                         /* Event sent, so done with vblank for this flip */
361                         drm_crtc_vblank_put(&amdgpu_crtc->base);
362                 }
363         } else if (e) {
364                 /* VRR active and inside front-porch: vblank count and
365                  * timestamp for pageflip event will only be up to date after
366                  * drm_crtc_handle_vblank() has been executed from late vblank
367                  * irq handler after start of back-porch (vline 0). We queue the
368                  * pageflip event for send-out by drm_crtc_handle_vblank() with
369                  * updated timestamp and count, once it runs after us.
370                  *
371                  * We need to open-code this instead of using the helper
372                  * drm_crtc_arm_vblank_event(), as that helper would
373                  * call drm_crtc_accurate_vblank_count(), which we must
374                  * not call in VRR mode while we are in front-porch!
375                  */
376
377                 /* sequence will be replaced by real count during send-out. */
378                 e->sequence = drm_crtc_vblank_count(&amdgpu_crtc->base);
379                 e->pipe = amdgpu_crtc->crtc_id;
380
381                 list_add_tail(&e->base.link, &adev->ddev->vblank_event_list);
382                 e = NULL;
383         }
384
385         /* Keep track of vblank of this flip for flip throttling. We use the
386          * cooked hw counter, as that one incremented at start of this vblank
387          * of pageflip completion, so last_flip_vblank is the forbidden count
388          * for queueing new pageflips if vsync + VRR is enabled.
389          */
390         amdgpu_crtc->last_flip_vblank =
391                 amdgpu_get_vblank_counter_kms(&amdgpu_crtc->base);
392
393         amdgpu_crtc->pflip_status = AMDGPU_FLIP_NONE;
394         spin_unlock_irqrestore(&adev->ddev->event_lock, flags);
395
396         DRM_DEBUG_DRIVER("crtc:%d[%p], pflip_stat:AMDGPU_FLIP_NONE, vrr[%d]-fp %d\n",
397                          amdgpu_crtc->crtc_id, amdgpu_crtc,
398                          vrr_active, (int) !e);
399 }
400
401 static void dm_vupdate_high_irq(void *interrupt_params)
402 {
403         struct common_irq_params *irq_params = interrupt_params;
404         struct amdgpu_device *adev = irq_params->adev;
405         struct amdgpu_crtc *acrtc;
406         struct dm_crtc_state *acrtc_state;
407         unsigned long flags;
408
409         acrtc = get_crtc_by_otg_inst(adev, irq_params->irq_src - IRQ_TYPE_VUPDATE);
410
411         if (acrtc) {
412                 acrtc_state = to_dm_crtc_state(acrtc->base.state);
413
414                 DRM_DEBUG_VBL("crtc:%d, vupdate-vrr:%d\n",
415                               acrtc->crtc_id,
416                               amdgpu_dm_vrr_active(acrtc_state));
417
418                 /* Core vblank handling is done here after end of front-porch in
419                  * vrr mode, as vblank timestamping will give valid results
420                  * while now done after front-porch. This will also deliver
421                  * page-flip completion events that have been queued to us
422                  * if a pageflip happened inside front-porch.
423                  */
424                 if (amdgpu_dm_vrr_active(acrtc_state)) {
425                         drm_crtc_handle_vblank(&acrtc->base);
426
427                         /* BTR processing for pre-DCE12 ASICs */
428                         if (acrtc_state->stream &&
429                             adev->family < AMDGPU_FAMILY_AI) {
430                                 spin_lock_irqsave(&adev->ddev->event_lock, flags);
431                                 mod_freesync_handle_v_update(
432                                     adev->dm.freesync_module,
433                                     acrtc_state->stream,
434                                     &acrtc_state->vrr_params);
435
436                                 dc_stream_adjust_vmin_vmax(
437                                     adev->dm.dc,
438                                     acrtc_state->stream,
439                                     &acrtc_state->vrr_params.adjust);
440                                 spin_unlock_irqrestore(&adev->ddev->event_lock, flags);
441                         }
442                 }
443         }
444 }
445
446 /**
447  * dm_crtc_high_irq() - Handles CRTC interrupt
448  * @interrupt_params: used for determining the CRTC instance
449  *
450  * Handles the CRTC/VSYNC interrupt by notfying DRM's VBLANK
451  * event handler.
452  */
453 static void dm_crtc_high_irq(void *interrupt_params)
454 {
455         struct common_irq_params *irq_params = interrupt_params;
456         struct amdgpu_device *adev = irq_params->adev;
457         struct amdgpu_crtc *acrtc;
458         struct dm_crtc_state *acrtc_state;
459         unsigned long flags;
460
461         acrtc = get_crtc_by_otg_inst(adev, irq_params->irq_src - IRQ_TYPE_VBLANK);
462         if (!acrtc)
463                 return;
464
465         acrtc_state = to_dm_crtc_state(acrtc->base.state);
466
467         DRM_DEBUG_VBL("crtc:%d, vupdate-vrr:%d, planes:%d\n", acrtc->crtc_id,
468                          amdgpu_dm_vrr_active(acrtc_state),
469                          acrtc_state->active_planes);
470
471         /**
472          * Core vblank handling at start of front-porch is only possible
473          * in non-vrr mode, as only there vblank timestamping will give
474          * valid results while done in front-porch. Otherwise defer it
475          * to dm_vupdate_high_irq after end of front-porch.
476          */
477         if (!amdgpu_dm_vrr_active(acrtc_state))
478                 drm_crtc_handle_vblank(&acrtc->base);
479
480         /**
481          * Following stuff must happen at start of vblank, for crc
482          * computation and below-the-range btr support in vrr mode.
483          */
484         amdgpu_dm_crtc_handle_crc_irq(&acrtc->base);
485
486         /* BTR updates need to happen before VUPDATE on Vega and above. */
487         if (adev->family < AMDGPU_FAMILY_AI)
488                 return;
489
490         spin_lock_irqsave(&adev->ddev->event_lock, flags);
491
492         if (acrtc_state->stream && acrtc_state->vrr_params.supported &&
493             acrtc_state->freesync_config.state == VRR_STATE_ACTIVE_VARIABLE) {
494                 mod_freesync_handle_v_update(adev->dm.freesync_module,
495                                              acrtc_state->stream,
496                                              &acrtc_state->vrr_params);
497
498                 dc_stream_adjust_vmin_vmax(adev->dm.dc, acrtc_state->stream,
499                                            &acrtc_state->vrr_params.adjust);
500         }
501
502         /*
503          * If there aren't any active_planes then DCH HUBP may be clock-gated.
504          * In that case, pageflip completion interrupts won't fire and pageflip
505          * completion events won't get delivered. Prevent this by sending
506          * pending pageflip events from here if a flip is still pending.
507          *
508          * If any planes are enabled, use dm_pflip_high_irq() instead, to
509          * avoid race conditions between flip programming and completion,
510          * which could cause too early flip completion events.
511          */
512         if (adev->family >= AMDGPU_FAMILY_RV &&
513             acrtc->pflip_status == AMDGPU_FLIP_SUBMITTED &&
514             acrtc_state->active_planes == 0) {
515                 if (acrtc->event) {
516                         drm_crtc_send_vblank_event(&acrtc->base, acrtc->event);
517                         acrtc->event = NULL;
518                         drm_crtc_vblank_put(&acrtc->base);
519                 }
520                 acrtc->pflip_status = AMDGPU_FLIP_NONE;
521         }
522
523         spin_unlock_irqrestore(&adev->ddev->event_lock, flags);
524 }
525
526 static int dm_set_clockgating_state(void *handle,
527                   enum amd_clockgating_state state)
528 {
529         return 0;
530 }
531
532 static int dm_set_powergating_state(void *handle,
533                   enum amd_powergating_state state)
534 {
535         return 0;
536 }
537
538 /* Prototypes of private functions */
539 static int dm_early_init(void* handle);
540
541 /* Allocate memory for FBC compressed data  */
542 static void amdgpu_dm_fbc_init(struct drm_connector *connector)
543 {
544         struct drm_device *dev = connector->dev;
545         struct amdgpu_device *adev = dev->dev_private;
546         struct dm_comressor_info *compressor = &adev->dm.compressor;
547         struct amdgpu_dm_connector *aconn = to_amdgpu_dm_connector(connector);
548         struct drm_display_mode *mode;
549         unsigned long max_size = 0;
550
551         if (adev->dm.dc->fbc_compressor == NULL)
552                 return;
553
554         if (aconn->dc_link->connector_signal != SIGNAL_TYPE_EDP)
555                 return;
556
557         if (compressor->bo_ptr)
558                 return;
559
560
561         list_for_each_entry(mode, &connector->modes, head) {
562                 if (max_size < mode->htotal * mode->vtotal)
563                         max_size = mode->htotal * mode->vtotal;
564         }
565
566         if (max_size) {
567                 int r = amdgpu_bo_create_kernel(adev, max_size * 4, PAGE_SIZE,
568                             AMDGPU_GEM_DOMAIN_GTT, &compressor->bo_ptr,
569                             &compressor->gpu_addr, &compressor->cpu_addr);
570
571                 if (r)
572                         DRM_ERROR("DM: Failed to initialize FBC\n");
573                 else {
574                         adev->dm.dc->ctx->fbc_gpu_addr = compressor->gpu_addr;
575                         DRM_INFO("DM: FBC alloc %lu\n", max_size*4);
576                 }
577
578         }
579
580 }
581
582 static int amdgpu_dm_audio_component_get_eld(struct device *kdev, int port,
583                                           int pipe, bool *enabled,
584                                           unsigned char *buf, int max_bytes)
585 {
586         struct drm_device *dev = dev_get_drvdata(kdev);
587         struct amdgpu_device *adev = dev->dev_private;
588         struct drm_connector *connector;
589         struct drm_connector_list_iter conn_iter;
590         struct amdgpu_dm_connector *aconnector;
591         int ret = 0;
592
593         *enabled = false;
594
595         mutex_lock(&adev->dm.audio_lock);
596
597         drm_connector_list_iter_begin(dev, &conn_iter);
598         drm_for_each_connector_iter(connector, &conn_iter) {
599                 aconnector = to_amdgpu_dm_connector(connector);
600                 if (aconnector->audio_inst != port)
601                         continue;
602
603                 *enabled = true;
604                 ret = drm_eld_size(connector->eld);
605                 memcpy(buf, connector->eld, min(max_bytes, ret));
606
607                 break;
608         }
609         drm_connector_list_iter_end(&conn_iter);
610
611         mutex_unlock(&adev->dm.audio_lock);
612
613         DRM_DEBUG_KMS("Get ELD : idx=%d ret=%d en=%d\n", port, ret, *enabled);
614
615         return ret;
616 }
617
618 static const struct drm_audio_component_ops amdgpu_dm_audio_component_ops = {
619         .get_eld = amdgpu_dm_audio_component_get_eld,
620 };
621
622 static int amdgpu_dm_audio_component_bind(struct device *kdev,
623                                        struct device *hda_kdev, void *data)
624 {
625         struct drm_device *dev = dev_get_drvdata(kdev);
626         struct amdgpu_device *adev = dev->dev_private;
627         struct drm_audio_component *acomp = data;
628
629         acomp->ops = &amdgpu_dm_audio_component_ops;
630         acomp->dev = kdev;
631         adev->dm.audio_component = acomp;
632
633         return 0;
634 }
635
636 static void amdgpu_dm_audio_component_unbind(struct device *kdev,
637                                           struct device *hda_kdev, void *data)
638 {
639         struct drm_device *dev = dev_get_drvdata(kdev);
640         struct amdgpu_device *adev = dev->dev_private;
641         struct drm_audio_component *acomp = data;
642
643         acomp->ops = NULL;
644         acomp->dev = NULL;
645         adev->dm.audio_component = NULL;
646 }
647
648 static const struct component_ops amdgpu_dm_audio_component_bind_ops = {
649         .bind   = amdgpu_dm_audio_component_bind,
650         .unbind = amdgpu_dm_audio_component_unbind,
651 };
652
653 static int amdgpu_dm_audio_init(struct amdgpu_device *adev)
654 {
655         int i, ret;
656
657         if (!amdgpu_audio)
658                 return 0;
659
660         adev->mode_info.audio.enabled = true;
661
662         adev->mode_info.audio.num_pins = adev->dm.dc->res_pool->audio_count;
663
664         for (i = 0; i < adev->mode_info.audio.num_pins; i++) {
665                 adev->mode_info.audio.pin[i].channels = -1;
666                 adev->mode_info.audio.pin[i].rate = -1;
667                 adev->mode_info.audio.pin[i].bits_per_sample = -1;
668                 adev->mode_info.audio.pin[i].status_bits = 0;
669                 adev->mode_info.audio.pin[i].category_code = 0;
670                 adev->mode_info.audio.pin[i].connected = false;
671                 adev->mode_info.audio.pin[i].id =
672                         adev->dm.dc->res_pool->audios[i]->inst;
673                 adev->mode_info.audio.pin[i].offset = 0;
674         }
675
676         ret = component_add(adev->dev, &amdgpu_dm_audio_component_bind_ops);
677         if (ret < 0)
678                 return ret;
679
680         adev->dm.audio_registered = true;
681
682         return 0;
683 }
684
685 static void amdgpu_dm_audio_fini(struct amdgpu_device *adev)
686 {
687         if (!amdgpu_audio)
688                 return;
689
690         if (!adev->mode_info.audio.enabled)
691                 return;
692
693         if (adev->dm.audio_registered) {
694                 component_del(adev->dev, &amdgpu_dm_audio_component_bind_ops);
695                 adev->dm.audio_registered = false;
696         }
697
698         /* TODO: Disable audio? */
699
700         adev->mode_info.audio.enabled = false;
701 }
702
703 static  void amdgpu_dm_audio_eld_notify(struct amdgpu_device *adev, int pin)
704 {
705         struct drm_audio_component *acomp = adev->dm.audio_component;
706
707         if (acomp && acomp->audio_ops && acomp->audio_ops->pin_eld_notify) {
708                 DRM_DEBUG_KMS("Notify ELD: %d\n", pin);
709
710                 acomp->audio_ops->pin_eld_notify(acomp->audio_ops->audio_ptr,
711                                                  pin, -1);
712         }
713 }
714
715 static int dm_dmub_hw_init(struct amdgpu_device *adev)
716 {
717         const struct dmcub_firmware_header_v1_0 *hdr;
718         struct dmub_srv *dmub_srv = adev->dm.dmub_srv;
719         struct dmub_srv_fb_info *fb_info = adev->dm.dmub_fb_info;
720         const struct firmware *dmub_fw = adev->dm.dmub_fw;
721         struct dmcu *dmcu = adev->dm.dc->res_pool->dmcu;
722         struct abm *abm = adev->dm.dc->res_pool->abm;
723         struct dmub_srv_hw_params hw_params;
724         enum dmub_status status;
725         const unsigned char *fw_inst_const, *fw_bss_data;
726         uint32_t i, fw_inst_const_size, fw_bss_data_size;
727         bool has_hw_support;
728
729         if (!dmub_srv)
730                 /* DMUB isn't supported on the ASIC. */
731                 return 0;
732
733         if (!fb_info) {
734                 DRM_ERROR("No framebuffer info for DMUB service.\n");
735                 return -EINVAL;
736         }
737
738         if (!dmub_fw) {
739                 /* Firmware required for DMUB support. */
740                 DRM_ERROR("No firmware provided for DMUB.\n");
741                 return -EINVAL;
742         }
743
744         status = dmub_srv_has_hw_support(dmub_srv, &has_hw_support);
745         if (status != DMUB_STATUS_OK) {
746                 DRM_ERROR("Error checking HW support for DMUB: %d\n", status);
747                 return -EINVAL;
748         }
749
750         if (!has_hw_support) {
751                 DRM_INFO("DMUB unsupported on ASIC\n");
752                 return 0;
753         }
754
755         hdr = (const struct dmcub_firmware_header_v1_0 *)dmub_fw->data;
756
757         fw_inst_const = dmub_fw->data +
758                         le32_to_cpu(hdr->header.ucode_array_offset_bytes) +
759                         PSP_HEADER_BYTES;
760
761         fw_bss_data = dmub_fw->data +
762                       le32_to_cpu(hdr->header.ucode_array_offset_bytes) +
763                       le32_to_cpu(hdr->inst_const_bytes);
764
765         /* Copy firmware and bios info into FB memory. */
766         fw_inst_const_size = le32_to_cpu(hdr->inst_const_bytes) -
767                              PSP_HEADER_BYTES - PSP_FOOTER_BYTES;
768
769         fw_bss_data_size = le32_to_cpu(hdr->bss_data_bytes);
770
771         /* if adev->firmware.load_type == AMDGPU_FW_LOAD_PSP,
772          * amdgpu_ucode_init_single_fw will load dmub firmware
773          * fw_inst_const part to cw0; otherwise, the firmware back door load
774          * will be done by dm_dmub_hw_init
775          */
776         if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) {
777                 memcpy(fb_info->fb[DMUB_WINDOW_0_INST_CONST].cpu_addr, fw_inst_const,
778                                 fw_inst_const_size);
779         }
780
781         if (fw_bss_data_size)
782                 memcpy(fb_info->fb[DMUB_WINDOW_2_BSS_DATA].cpu_addr,
783                        fw_bss_data, fw_bss_data_size);
784
785         /* Copy firmware bios info into FB memory. */
786         memcpy(fb_info->fb[DMUB_WINDOW_3_VBIOS].cpu_addr, adev->bios,
787                adev->bios_size);
788
789         /* Reset regions that need to be reset. */
790         memset(fb_info->fb[DMUB_WINDOW_4_MAILBOX].cpu_addr, 0,
791         fb_info->fb[DMUB_WINDOW_4_MAILBOX].size);
792
793         memset(fb_info->fb[DMUB_WINDOW_5_TRACEBUFF].cpu_addr, 0,
794                fb_info->fb[DMUB_WINDOW_5_TRACEBUFF].size);
795
796         memset(fb_info->fb[DMUB_WINDOW_6_FW_STATE].cpu_addr, 0,
797                fb_info->fb[DMUB_WINDOW_6_FW_STATE].size);
798
799         /* Initialize hardware. */
800         memset(&hw_params, 0, sizeof(hw_params));
801         hw_params.fb_base = adev->gmc.fb_start;
802         hw_params.fb_offset = adev->gmc.aper_base;
803
804         /* backdoor load firmware and trigger dmub running */
805         if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP)
806                 hw_params.load_inst_const = true;
807
808         if (dmcu)
809                 hw_params.psp_version = dmcu->psp_version;
810
811         for (i = 0; i < fb_info->num_fb; ++i)
812                 hw_params.fb[i] = &fb_info->fb[i];
813
814         status = dmub_srv_hw_init(dmub_srv, &hw_params);
815         if (status != DMUB_STATUS_OK) {
816                 DRM_ERROR("Error initializing DMUB HW: %d\n", status);
817                 return -EINVAL;
818         }
819
820         /* Wait for firmware load to finish. */
821         status = dmub_srv_wait_for_auto_load(dmub_srv, 100000);
822         if (status != DMUB_STATUS_OK)
823                 DRM_WARN("Wait for DMUB auto-load failed: %d\n", status);
824
825         /* Init DMCU and ABM if available. */
826         if (dmcu && abm) {
827                 dmcu->funcs->dmcu_init(dmcu);
828                 abm->dmcu_is_running = dmcu->funcs->is_dmcu_initialized(dmcu);
829         }
830
831         adev->dm.dc->ctx->dmub_srv = dc_dmub_srv_create(adev->dm.dc, dmub_srv);
832         if (!adev->dm.dc->ctx->dmub_srv) {
833                 DRM_ERROR("Couldn't allocate DC DMUB server!\n");
834                 return -ENOMEM;
835         }
836
837         DRM_INFO("DMUB hardware initialized: version=0x%08X\n",
838                  adev->dm.dmcub_fw_version);
839
840         return 0;
841 }
842
843 static int amdgpu_dm_init(struct amdgpu_device *adev)
844 {
845         struct dc_init_data init_data;
846 #ifdef CONFIG_DRM_AMD_DC_HDCP
847         struct dc_callback_init init_params;
848 #endif
849         int r;
850
851         adev->dm.ddev = adev->ddev;
852         adev->dm.adev = adev;
853
854         /* Zero all the fields */
855         memset(&init_data, 0, sizeof(init_data));
856 #ifdef CONFIG_DRM_AMD_DC_HDCP
857         memset(&init_params, 0, sizeof(init_params));
858 #endif
859
860         mutex_init(&adev->dm.dc_lock);
861         mutex_init(&adev->dm.audio_lock);
862
863         if(amdgpu_dm_irq_init(adev)) {
864                 DRM_ERROR("amdgpu: failed to initialize DM IRQ support.\n");
865                 goto error;
866         }
867
868         init_data.asic_id.chip_family = adev->family;
869
870         init_data.asic_id.pci_revision_id = adev->pdev->revision;
871         init_data.asic_id.hw_internal_rev = adev->external_rev_id;
872
873         init_data.asic_id.vram_width = adev->gmc.vram_width;
874         /* TODO: initialize init_data.asic_id.vram_type here!!!! */
875         init_data.asic_id.atombios_base_address =
876                 adev->mode_info.atom_context->bios;
877
878         init_data.driver = adev;
879
880         adev->dm.cgs_device = amdgpu_cgs_create_device(adev);
881
882         if (!adev->dm.cgs_device) {
883                 DRM_ERROR("amdgpu: failed to create cgs device.\n");
884                 goto error;
885         }
886
887         init_data.cgs_device = adev->dm.cgs_device;
888
889         init_data.dce_environment = DCE_ENV_PRODUCTION_DRV;
890
891         switch (adev->asic_type) {
892         case CHIP_CARRIZO:
893         case CHIP_STONEY:
894         case CHIP_RAVEN:
895         case CHIP_RENOIR:
896                 init_data.flags.gpu_vm_support = true;
897                 break;
898         default:
899                 break;
900         }
901
902         if (amdgpu_dc_feature_mask & DC_FBC_MASK)
903                 init_data.flags.fbc_support = true;
904
905         if (amdgpu_dc_feature_mask & DC_MULTI_MON_PP_MCLK_SWITCH_MASK)
906                 init_data.flags.multi_mon_pp_mclk_switch = true;
907
908         if (amdgpu_dc_feature_mask & DC_DISABLE_FRACTIONAL_PWM_MASK)
909                 init_data.flags.disable_fractional_pwm = true;
910
911         init_data.flags.power_down_display_on_boot = true;
912
913         init_data.soc_bounding_box = adev->dm.soc_bounding_box;
914
915         /* Display Core create. */
916         adev->dm.dc = dc_create(&init_data);
917
918         if (adev->dm.dc) {
919                 DRM_INFO("Display Core initialized with v%s!\n", DC_VER);
920         } else {
921                 DRM_INFO("Display Core failed to initialize with v%s!\n", DC_VER);
922                 goto error;
923         }
924
925         if (amdgpu_dc_debug_mask & DC_DISABLE_PIPE_SPLIT) {
926                 adev->dm.dc->debug.force_single_disp_pipe_split = false;
927                 adev->dm.dc->debug.pipe_split_policy = MPC_SPLIT_AVOID;
928         }
929
930         if (adev->asic_type != CHIP_CARRIZO && adev->asic_type != CHIP_STONEY)
931                 adev->dm.dc->debug.disable_stutter = amdgpu_pp_feature_mask & PP_STUTTER_MODE ? false : true;
932
933         if (amdgpu_dc_debug_mask & DC_DISABLE_STUTTER)
934                 adev->dm.dc->debug.disable_stutter = true;
935
936         if (amdgpu_dc_debug_mask & DC_DISABLE_DSC)
937                 adev->dm.dc->debug.disable_dsc = true;
938
939         if (amdgpu_dc_debug_mask & DC_DISABLE_CLOCK_GATING)
940                 adev->dm.dc->debug.disable_clock_gate = true;
941
942         r = dm_dmub_hw_init(adev);
943         if (r) {
944                 DRM_ERROR("DMUB interface failed to initialize: status=%d\n", r);
945                 goto error;
946         }
947
948         dc_hardware_init(adev->dm.dc);
949
950         adev->dm.freesync_module = mod_freesync_create(adev->dm.dc);
951         if (!adev->dm.freesync_module) {
952                 DRM_ERROR(
953                 "amdgpu: failed to initialize freesync_module.\n");
954         } else
955                 DRM_DEBUG_DRIVER("amdgpu: freesync_module init done %p.\n",
956                                 adev->dm.freesync_module);
957
958         amdgpu_dm_init_color_mod();
959
960 #ifdef CONFIG_DRM_AMD_DC_HDCP
961         if (adev->asic_type >= CHIP_RAVEN) {
962                 adev->dm.hdcp_workqueue = hdcp_create_workqueue(adev, &init_params.cp_psp, adev->dm.dc);
963
964                 if (!adev->dm.hdcp_workqueue)
965                         DRM_ERROR("amdgpu: failed to initialize hdcp_workqueue.\n");
966                 else
967                         DRM_DEBUG_DRIVER("amdgpu: hdcp_workqueue init done %p.\n", adev->dm.hdcp_workqueue);
968
969                 dc_init_callbacks(adev->dm.dc, &init_params);
970         }
971 #endif
972         if (amdgpu_dm_initialize_drm_device(adev)) {
973                 DRM_ERROR(
974                 "amdgpu: failed to initialize sw for display support.\n");
975                 goto error;
976         }
977
978         /* Update the actual used number of crtc */
979         adev->mode_info.num_crtc = adev->dm.display_indexes_num;
980
981         /* TODO: Add_display_info? */
982
983         /* TODO use dynamic cursor width */
984         adev->ddev->mode_config.cursor_width = adev->dm.dc->caps.max_cursor_size;
985         adev->ddev->mode_config.cursor_height = adev->dm.dc->caps.max_cursor_size;
986
987         if (drm_vblank_init(adev->ddev, adev->dm.display_indexes_num)) {
988                 DRM_ERROR(
989                 "amdgpu: failed to initialize sw for display support.\n");
990                 goto error;
991         }
992
993         DRM_DEBUG_DRIVER("KMS initialized.\n");
994
995         return 0;
996 error:
997         amdgpu_dm_fini(adev);
998
999         return -EINVAL;
1000 }
1001
1002 static void amdgpu_dm_fini(struct amdgpu_device *adev)
1003 {
1004         amdgpu_dm_audio_fini(adev);
1005
1006         amdgpu_dm_destroy_drm_device(&adev->dm);
1007
1008 #ifdef CONFIG_DRM_AMD_DC_HDCP
1009         if (adev->dm.hdcp_workqueue) {
1010                 hdcp_destroy(adev->dm.hdcp_workqueue);
1011                 adev->dm.hdcp_workqueue = NULL;
1012         }
1013
1014         if (adev->dm.dc)
1015                 dc_deinit_callbacks(adev->dm.dc);
1016 #endif
1017         if (adev->dm.dc->ctx->dmub_srv) {
1018                 dc_dmub_srv_destroy(&adev->dm.dc->ctx->dmub_srv);
1019                 adev->dm.dc->ctx->dmub_srv = NULL;
1020         }
1021
1022         if (adev->dm.dmub_bo)
1023                 amdgpu_bo_free_kernel(&adev->dm.dmub_bo,
1024                                       &adev->dm.dmub_bo_gpu_addr,
1025                                       &adev->dm.dmub_bo_cpu_addr);
1026
1027         /* DC Destroy TODO: Replace destroy DAL */
1028         if (adev->dm.dc)
1029                 dc_destroy(&adev->dm.dc);
1030         /*
1031          * TODO: pageflip, vlank interrupt
1032          *
1033          * amdgpu_dm_irq_fini(adev);
1034          */
1035
1036         if (adev->dm.cgs_device) {
1037                 amdgpu_cgs_destroy_device(adev->dm.cgs_device);
1038                 adev->dm.cgs_device = NULL;
1039         }
1040         if (adev->dm.freesync_module) {
1041                 mod_freesync_destroy(adev->dm.freesync_module);
1042                 adev->dm.freesync_module = NULL;
1043         }
1044
1045         mutex_destroy(&adev->dm.audio_lock);
1046         mutex_destroy(&adev->dm.dc_lock);
1047
1048         return;
1049 }
1050
1051 static int load_dmcu_fw(struct amdgpu_device *adev)
1052 {
1053         const char *fw_name_dmcu = NULL;
1054         int r;
1055         const struct dmcu_firmware_header_v1_0 *hdr;
1056
1057         switch(adev->asic_type) {
1058         case CHIP_BONAIRE:
1059         case CHIP_HAWAII:
1060         case CHIP_KAVERI:
1061         case CHIP_KABINI:
1062         case CHIP_MULLINS:
1063         case CHIP_TONGA:
1064         case CHIP_FIJI:
1065         case CHIP_CARRIZO:
1066         case CHIP_STONEY:
1067         case CHIP_POLARIS11:
1068         case CHIP_POLARIS10:
1069         case CHIP_POLARIS12:
1070         case CHIP_VEGAM:
1071         case CHIP_VEGA10:
1072         case CHIP_VEGA12:
1073         case CHIP_VEGA20:
1074         case CHIP_NAVI10:
1075         case CHIP_NAVI14:
1076         case CHIP_RENOIR:
1077 #if defined(CONFIG_DRM_AMD_DC_DCN3_0)
1078         case CHIP_SIENNA_CICHLID:
1079 #endif
1080                 return 0;
1081         case CHIP_NAVI12:
1082                 fw_name_dmcu = FIRMWARE_NAVI12_DMCU;
1083                 break;
1084         case CHIP_RAVEN:
1085                 if (ASICREV_IS_PICASSO(adev->external_rev_id))
1086                         fw_name_dmcu = FIRMWARE_RAVEN_DMCU;
1087                 else if (ASICREV_IS_RAVEN2(adev->external_rev_id))
1088                         fw_name_dmcu = FIRMWARE_RAVEN_DMCU;
1089                 else
1090                         return 0;
1091                 break;
1092         default:
1093                 DRM_ERROR("Unsupported ASIC type: 0x%X\n", adev->asic_type);
1094                 return -EINVAL;
1095         }
1096
1097         if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) {
1098                 DRM_DEBUG_KMS("dm: DMCU firmware not supported on direct or SMU loading\n");
1099                 return 0;
1100         }
1101
1102         r = request_firmware_direct(&adev->dm.fw_dmcu, fw_name_dmcu, adev->dev);
1103         if (r == -ENOENT) {
1104                 /* DMCU firmware is not necessary, so don't raise a fuss if it's missing */
1105                 DRM_DEBUG_KMS("dm: DMCU firmware not found\n");
1106                 adev->dm.fw_dmcu = NULL;
1107                 return 0;
1108         }
1109         if (r) {
1110                 dev_err(adev->dev, "amdgpu_dm: Can't load firmware \"%s\"\n",
1111                         fw_name_dmcu);
1112                 return r;
1113         }
1114
1115         r = amdgpu_ucode_validate(adev->dm.fw_dmcu);
1116         if (r) {
1117                 dev_err(adev->dev, "amdgpu_dm: Can't validate firmware \"%s\"\n",
1118                         fw_name_dmcu);
1119                 release_firmware(adev->dm.fw_dmcu);
1120                 adev->dm.fw_dmcu = NULL;
1121                 return r;
1122         }
1123
1124         hdr = (const struct dmcu_firmware_header_v1_0 *)adev->dm.fw_dmcu->data;
1125         adev->firmware.ucode[AMDGPU_UCODE_ID_DMCU_ERAM].ucode_id = AMDGPU_UCODE_ID_DMCU_ERAM;
1126         adev->firmware.ucode[AMDGPU_UCODE_ID_DMCU_ERAM].fw = adev->dm.fw_dmcu;
1127         adev->firmware.fw_size +=
1128                 ALIGN(le32_to_cpu(hdr->header.ucode_size_bytes) - le32_to_cpu(hdr->intv_size_bytes), PAGE_SIZE);
1129
1130         adev->firmware.ucode[AMDGPU_UCODE_ID_DMCU_INTV].ucode_id = AMDGPU_UCODE_ID_DMCU_INTV;
1131         adev->firmware.ucode[AMDGPU_UCODE_ID_DMCU_INTV].fw = adev->dm.fw_dmcu;
1132         adev->firmware.fw_size +=
1133                 ALIGN(le32_to_cpu(hdr->intv_size_bytes), PAGE_SIZE);
1134
1135         adev->dm.dmcu_fw_version = le32_to_cpu(hdr->header.ucode_version);
1136
1137         DRM_DEBUG_KMS("PSP loading DMCU firmware\n");
1138
1139         return 0;
1140 }
1141
1142 static uint32_t amdgpu_dm_dmub_reg_read(void *ctx, uint32_t address)
1143 {
1144         struct amdgpu_device *adev = ctx;
1145
1146         return dm_read_reg(adev->dm.dc->ctx, address);
1147 }
1148
1149 static void amdgpu_dm_dmub_reg_write(void *ctx, uint32_t address,
1150                                      uint32_t value)
1151 {
1152         struct amdgpu_device *adev = ctx;
1153
1154         return dm_write_reg(adev->dm.dc->ctx, address, value);
1155 }
1156
1157 static int dm_dmub_sw_init(struct amdgpu_device *adev)
1158 {
1159         struct dmub_srv_create_params create_params;
1160         struct dmub_srv_region_params region_params;
1161         struct dmub_srv_region_info region_info;
1162         struct dmub_srv_fb_params fb_params;
1163         struct dmub_srv_fb_info *fb_info;
1164         struct dmub_srv *dmub_srv;
1165         const struct dmcub_firmware_header_v1_0 *hdr;
1166         const char *fw_name_dmub;
1167         enum dmub_asic dmub_asic;
1168         enum dmub_status status;
1169         int r;
1170
1171         switch (adev->asic_type) {
1172         case CHIP_RENOIR:
1173                 dmub_asic = DMUB_ASIC_DCN21;
1174                 fw_name_dmub = FIRMWARE_RENOIR_DMUB;
1175                 break;
1176 #if defined(CONFIG_DRM_AMD_DC_DCN3_0)
1177         case CHIP_SIENNA_CICHLID:
1178                 dmub_asic = DMUB_ASIC_DCN30;
1179                 fw_name_dmub = FIRMWARE_SIENNA_CICHLID_DMUB;
1180                 break;
1181 #endif
1182
1183         default:
1184                 /* ASIC doesn't support DMUB. */
1185                 return 0;
1186         }
1187
1188         r = request_firmware_direct(&adev->dm.dmub_fw, fw_name_dmub, adev->dev);
1189         if (r) {
1190                 DRM_ERROR("DMUB firmware loading failed: %d\n", r);
1191                 return 0;
1192         }
1193
1194         r = amdgpu_ucode_validate(adev->dm.dmub_fw);
1195         if (r) {
1196                 DRM_ERROR("Couldn't validate DMUB firmware: %d\n", r);
1197                 return 0;
1198         }
1199
1200         hdr = (const struct dmcub_firmware_header_v1_0 *)adev->dm.dmub_fw->data;
1201
1202         if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
1203                 adev->firmware.ucode[AMDGPU_UCODE_ID_DMCUB].ucode_id =
1204                         AMDGPU_UCODE_ID_DMCUB;
1205                 adev->firmware.ucode[AMDGPU_UCODE_ID_DMCUB].fw =
1206                         adev->dm.dmub_fw;
1207                 adev->firmware.fw_size +=
1208                         ALIGN(le32_to_cpu(hdr->inst_const_bytes), PAGE_SIZE);
1209
1210                 DRM_INFO("Loading DMUB firmware via PSP: version=0x%08X\n",
1211                          adev->dm.dmcub_fw_version);
1212         }
1213
1214         adev->dm.dmcub_fw_version = le32_to_cpu(hdr->header.ucode_version);
1215
1216         adev->dm.dmub_srv = kzalloc(sizeof(*adev->dm.dmub_srv), GFP_KERNEL);
1217         dmub_srv = adev->dm.dmub_srv;
1218
1219         if (!dmub_srv) {
1220                 DRM_ERROR("Failed to allocate DMUB service!\n");
1221                 return -ENOMEM;
1222         }
1223
1224         memset(&create_params, 0, sizeof(create_params));
1225         create_params.user_ctx = adev;
1226         create_params.funcs.reg_read = amdgpu_dm_dmub_reg_read;
1227         create_params.funcs.reg_write = amdgpu_dm_dmub_reg_write;
1228         create_params.asic = dmub_asic;
1229
1230         /* Create the DMUB service. */
1231         status = dmub_srv_create(dmub_srv, &create_params);
1232         if (status != DMUB_STATUS_OK) {
1233                 DRM_ERROR("Error creating DMUB service: %d\n", status);
1234                 return -EINVAL;
1235         }
1236
1237         /* Calculate the size of all the regions for the DMUB service. */
1238         memset(&region_params, 0, sizeof(region_params));
1239
1240         region_params.inst_const_size = le32_to_cpu(hdr->inst_const_bytes) -
1241                                         PSP_HEADER_BYTES - PSP_FOOTER_BYTES;
1242         region_params.bss_data_size = le32_to_cpu(hdr->bss_data_bytes);
1243         region_params.vbios_size = adev->bios_size;
1244         region_params.fw_bss_data = region_params.bss_data_size ?
1245                 adev->dm.dmub_fw->data +
1246                 le32_to_cpu(hdr->header.ucode_array_offset_bytes) +
1247                 le32_to_cpu(hdr->inst_const_bytes) : NULL;
1248         region_params.fw_inst_const =
1249                 adev->dm.dmub_fw->data +
1250                 le32_to_cpu(hdr->header.ucode_array_offset_bytes) +
1251                 PSP_HEADER_BYTES;
1252
1253         status = dmub_srv_calc_region_info(dmub_srv, &region_params,
1254                                            &region_info);
1255
1256         if (status != DMUB_STATUS_OK) {
1257                 DRM_ERROR("Error calculating DMUB region info: %d\n", status);
1258                 return -EINVAL;
1259         }
1260
1261         /*
1262          * Allocate a framebuffer based on the total size of all the regions.
1263          * TODO: Move this into GART.
1264          */
1265         r = amdgpu_bo_create_kernel(adev, region_info.fb_size, PAGE_SIZE,
1266                                     AMDGPU_GEM_DOMAIN_VRAM, &adev->dm.dmub_bo,
1267                                     &adev->dm.dmub_bo_gpu_addr,
1268                                     &adev->dm.dmub_bo_cpu_addr);
1269         if (r)
1270                 return r;
1271
1272         /* Rebase the regions on the framebuffer address. */
1273         memset(&fb_params, 0, sizeof(fb_params));
1274         fb_params.cpu_addr = adev->dm.dmub_bo_cpu_addr;
1275         fb_params.gpu_addr = adev->dm.dmub_bo_gpu_addr;
1276         fb_params.region_info = &region_info;
1277
1278         adev->dm.dmub_fb_info =
1279                 kzalloc(sizeof(*adev->dm.dmub_fb_info), GFP_KERNEL);
1280         fb_info = adev->dm.dmub_fb_info;
1281
1282         if (!fb_info) {
1283                 DRM_ERROR(
1284                         "Failed to allocate framebuffer info for DMUB service!\n");
1285                 return -ENOMEM;
1286         }
1287
1288         status = dmub_srv_calc_fb_info(dmub_srv, &fb_params, fb_info);
1289         if (status != DMUB_STATUS_OK) {
1290                 DRM_ERROR("Error calculating DMUB FB info: %d\n", status);
1291                 return -EINVAL;
1292         }
1293
1294         return 0;
1295 }
1296
1297 static int dm_sw_init(void *handle)
1298 {
1299         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1300         int r;
1301
1302         r = dm_dmub_sw_init(adev);
1303         if (r)
1304                 return r;
1305
1306         return load_dmcu_fw(adev);
1307 }
1308
1309 static int dm_sw_fini(void *handle)
1310 {
1311         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1312
1313         kfree(adev->dm.dmub_fb_info);
1314         adev->dm.dmub_fb_info = NULL;
1315
1316         if (adev->dm.dmub_srv) {
1317                 dmub_srv_destroy(adev->dm.dmub_srv);
1318                 adev->dm.dmub_srv = NULL;
1319         }
1320
1321         release_firmware(adev->dm.dmub_fw);
1322         adev->dm.dmub_fw = NULL;
1323
1324         release_firmware(adev->dm.fw_dmcu);
1325         adev->dm.fw_dmcu = NULL;
1326
1327         return 0;
1328 }
1329
1330 static int detect_mst_link_for_all_connectors(struct drm_device *dev)
1331 {
1332         struct amdgpu_dm_connector *aconnector;
1333         struct drm_connector *connector;
1334         struct drm_connector_list_iter iter;
1335         int ret = 0;
1336
1337         drm_connector_list_iter_begin(dev, &iter);
1338         drm_for_each_connector_iter(connector, &iter) {
1339                 aconnector = to_amdgpu_dm_connector(connector);
1340                 if (aconnector->dc_link->type == dc_connection_mst_branch &&
1341                     aconnector->mst_mgr.aux) {
1342                         DRM_DEBUG_DRIVER("DM_MST: starting TM on aconnector: %p [id: %d]\n",
1343                                          aconnector,
1344                                          aconnector->base.base.id);
1345
1346                         ret = drm_dp_mst_topology_mgr_set_mst(&aconnector->mst_mgr, true);
1347                         if (ret < 0) {
1348                                 DRM_ERROR("DM_MST: Failed to start MST\n");
1349                                 aconnector->dc_link->type =
1350                                         dc_connection_single;
1351                                 break;
1352                         }
1353                 }
1354         }
1355         drm_connector_list_iter_end(&iter);
1356
1357         return ret;
1358 }
1359
1360 static int dm_late_init(void *handle)
1361 {
1362         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1363
1364         struct dmcu_iram_parameters params;
1365         unsigned int linear_lut[16];
1366         int i;
1367         struct dmcu *dmcu = NULL;
1368         bool ret;
1369
1370         if (!adev->dm.fw_dmcu)
1371                 return detect_mst_link_for_all_connectors(adev->ddev);
1372
1373         dmcu = adev->dm.dc->res_pool->dmcu;
1374
1375         for (i = 0; i < 16; i++)
1376                 linear_lut[i] = 0xFFFF * i / 15;
1377
1378         params.set = 0;
1379         params.backlight_ramping_start = 0xCCCC;
1380         params.backlight_ramping_reduction = 0xCCCCCCCC;
1381         params.backlight_lut_array_size = 16;
1382         params.backlight_lut_array = linear_lut;
1383
1384         /* Min backlight level after ABM reduction,  Don't allow below 1%
1385          * 0xFFFF x 0.01 = 0x28F
1386          */
1387         params.min_abm_backlight = 0x28F;
1388
1389         ret = dmcu_load_iram(dmcu, params);
1390
1391         if (!ret)
1392                 return -EINVAL;
1393
1394         return detect_mst_link_for_all_connectors(adev->ddev);
1395 }
1396
1397 static void s3_handle_mst(struct drm_device *dev, bool suspend)
1398 {
1399         struct amdgpu_dm_connector *aconnector;
1400         struct drm_connector *connector;
1401         struct drm_connector_list_iter iter;
1402         struct drm_dp_mst_topology_mgr *mgr;
1403         int ret;
1404         bool need_hotplug = false;
1405
1406         drm_connector_list_iter_begin(dev, &iter);
1407         drm_for_each_connector_iter(connector, &iter) {
1408                 aconnector = to_amdgpu_dm_connector(connector);
1409                 if (aconnector->dc_link->type != dc_connection_mst_branch ||
1410                     aconnector->mst_port)
1411                         continue;
1412
1413                 mgr = &aconnector->mst_mgr;
1414
1415                 if (suspend) {
1416                         drm_dp_mst_topology_mgr_suspend(mgr);
1417                 } else {
1418                         ret = drm_dp_mst_topology_mgr_resume(mgr, true);
1419                         if (ret < 0) {
1420                                 drm_dp_mst_topology_mgr_set_mst(mgr, false);
1421                                 need_hotplug = true;
1422                         }
1423                 }
1424         }
1425         drm_connector_list_iter_end(&iter);
1426
1427         if (need_hotplug)
1428                 drm_kms_helper_hotplug_event(dev);
1429 }
1430
1431 static int amdgpu_dm_smu_write_watermarks_table(struct amdgpu_device *adev)
1432 {
1433         struct smu_context *smu = &adev->smu;
1434         int ret = 0;
1435
1436         if (!is_support_sw_smu(adev))
1437                 return 0;
1438
1439         /* This interface is for dGPU Navi1x.Linux dc-pplib interface depends
1440          * on window driver dc implementation.
1441          * For Navi1x, clock settings of dcn watermarks are fixed. the settings
1442          * should be passed to smu during boot up and resume from s3.
1443          * boot up: dc calculate dcn watermark clock settings within dc_create,
1444          * dcn20_resource_construct
1445          * then call pplib functions below to pass the settings to smu:
1446          * smu_set_watermarks_for_clock_ranges
1447          * smu_set_watermarks_table
1448          * navi10_set_watermarks_table
1449          * smu_write_watermarks_table
1450          *
1451          * For Renoir, clock settings of dcn watermark are also fixed values.
1452          * dc has implemented different flow for window driver:
1453          * dc_hardware_init / dc_set_power_state
1454          * dcn10_init_hw
1455          * notify_wm_ranges
1456          * set_wm_ranges
1457          * -- Linux
1458          * smu_set_watermarks_for_clock_ranges
1459          * renoir_set_watermarks_table
1460          * smu_write_watermarks_table
1461          *
1462          * For Linux,
1463          * dc_hardware_init -> amdgpu_dm_init
1464          * dc_set_power_state --> dm_resume
1465          *
1466          * therefore, this function apply to navi10/12/14 but not Renoir
1467          * *
1468          */
1469         switch(adev->asic_type) {
1470         case CHIP_NAVI10:
1471         case CHIP_NAVI14:
1472         case CHIP_NAVI12:
1473                 break;
1474         default:
1475                 return 0;
1476         }
1477
1478         mutex_lock(&smu->mutex);
1479
1480         /* pass data to smu controller */
1481         if ((smu->watermarks_bitmap & WATERMARKS_EXIST) &&
1482                         !(smu->watermarks_bitmap & WATERMARKS_LOADED)) {
1483                 ret = smu_write_watermarks_table(smu);
1484
1485                 if (ret) {
1486                         mutex_unlock(&smu->mutex);
1487                         DRM_ERROR("Failed to update WMTABLE!\n");
1488                         return ret;
1489                 }
1490                 smu->watermarks_bitmap |= WATERMARKS_LOADED;
1491         }
1492
1493         mutex_unlock(&smu->mutex);
1494
1495         return 0;
1496 }
1497
1498 /**
1499  * dm_hw_init() - Initialize DC device
1500  * @handle: The base driver device containing the amdgpu_dm device.
1501  *
1502  * Initialize the &struct amdgpu_display_manager device. This involves calling
1503  * the initializers of each DM component, then populating the struct with them.
1504  *
1505  * Although the function implies hardware initialization, both hardware and
1506  * software are initialized here. Splitting them out to their relevant init
1507  * hooks is a future TODO item.
1508  *
1509  * Some notable things that are initialized here:
1510  *
1511  * - Display Core, both software and hardware
1512  * - DC modules that we need (freesync and color management)
1513  * - DRM software states
1514  * - Interrupt sources and handlers
1515  * - Vblank support
1516  * - Debug FS entries, if enabled
1517  */
1518 static int dm_hw_init(void *handle)
1519 {
1520         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1521         /* Create DAL display manager */
1522         amdgpu_dm_init(adev);
1523         amdgpu_dm_hpd_init(adev);
1524
1525         return 0;
1526 }
1527
1528 /**
1529  * dm_hw_fini() - Teardown DC device
1530  * @handle: The base driver device containing the amdgpu_dm device.
1531  *
1532  * Teardown components within &struct amdgpu_display_manager that require
1533  * cleanup. This involves cleaning up the DRM device, DC, and any modules that
1534  * were loaded. Also flush IRQ workqueues and disable them.
1535  */
1536 static int dm_hw_fini(void *handle)
1537 {
1538         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1539
1540         amdgpu_dm_hpd_fini(adev);
1541
1542         amdgpu_dm_irq_fini(adev);
1543         amdgpu_dm_fini(adev);
1544         return 0;
1545 }
1546
1547
1548 static int dm_enable_vblank(struct drm_crtc *crtc);
1549 static void dm_disable_vblank(struct drm_crtc *crtc);
1550
1551 static void dm_gpureset_toggle_interrupts(struct amdgpu_device *adev,
1552                                  struct dc_state *state, bool enable)
1553 {
1554         enum dc_irq_source irq_source;
1555         struct amdgpu_crtc *acrtc;
1556         int rc = -EBUSY;
1557         int i = 0;
1558
1559         for (i = 0; i < state->stream_count; i++) {
1560                 acrtc = get_crtc_by_otg_inst(
1561                                 adev, state->stream_status[i].primary_otg_inst);
1562
1563                 if (acrtc && state->stream_status[i].plane_count != 0) {
1564                         irq_source = IRQ_TYPE_PFLIP + acrtc->otg_inst;
1565                         rc = dc_interrupt_set(adev->dm.dc, irq_source, enable) ? 0 : -EBUSY;
1566                         DRM_DEBUG("crtc %d - vupdate irq %sabling: r=%d\n",
1567                                   acrtc->crtc_id, enable ? "en" : "dis", rc);
1568                         if (rc)
1569                                 DRM_WARN("Failed to %s pflip interrupts\n",
1570                                          enable ? "enable" : "disable");
1571
1572                         if (enable) {
1573                                 rc = dm_enable_vblank(&acrtc->base);
1574                                 if (rc)
1575                                         DRM_WARN("Failed to enable vblank interrupts\n");
1576                         } else {
1577                                 dm_disable_vblank(&acrtc->base);
1578                         }
1579
1580                 }
1581         }
1582
1583 }
1584
1585 static enum dc_status amdgpu_dm_commit_zero_streams(struct dc *dc)
1586 {
1587         struct dc_state *context = NULL;
1588         enum dc_status res = DC_ERROR_UNEXPECTED;
1589         int i;
1590         struct dc_stream_state *del_streams[MAX_PIPES];
1591         int del_streams_count = 0;
1592
1593         memset(del_streams, 0, sizeof(del_streams));
1594
1595         context = dc_create_state(dc);
1596         if (context == NULL)
1597                 goto context_alloc_fail;
1598
1599         dc_resource_state_copy_construct_current(dc, context);
1600
1601         /* First remove from context all streams */
1602         for (i = 0; i < context->stream_count; i++) {
1603                 struct dc_stream_state *stream = context->streams[i];
1604
1605                 del_streams[del_streams_count++] = stream;
1606         }
1607
1608         /* Remove all planes for removed streams and then remove the streams */
1609         for (i = 0; i < del_streams_count; i++) {
1610                 if (!dc_rem_all_planes_for_stream(dc, del_streams[i], context)) {
1611                         res = DC_FAIL_DETACH_SURFACES;
1612                         goto fail;
1613                 }
1614
1615                 res = dc_remove_stream_from_ctx(dc, context, del_streams[i]);
1616                 if (res != DC_OK)
1617                         goto fail;
1618         }
1619
1620
1621         res = dc_validate_global_state(dc, context, false);
1622
1623         if (res != DC_OK) {
1624                 DRM_ERROR("%s:resource validation failed, dc_status:%d\n", __func__, res);
1625                 goto fail;
1626         }
1627
1628         res = dc_commit_state(dc, context);
1629
1630 fail:
1631         dc_release_state(context);
1632
1633 context_alloc_fail:
1634         return res;
1635 }
1636
1637 static int dm_suspend(void *handle)
1638 {
1639         struct amdgpu_device *adev = handle;
1640         struct amdgpu_display_manager *dm = &adev->dm;
1641         int ret = 0;
1642
1643         if (adev->in_gpu_reset) {
1644                 mutex_lock(&dm->dc_lock);
1645                 dm->cached_dc_state = dc_copy_state(dm->dc->current_state);
1646
1647                 dm_gpureset_toggle_interrupts(adev, dm->cached_dc_state, false);
1648
1649                 amdgpu_dm_commit_zero_streams(dm->dc);
1650
1651                 amdgpu_dm_irq_suspend(adev);
1652
1653                 return ret;
1654         }
1655
1656         WARN_ON(adev->dm.cached_state);
1657         adev->dm.cached_state = drm_atomic_helper_suspend(adev->ddev);
1658
1659         s3_handle_mst(adev->ddev, true);
1660
1661         amdgpu_dm_irq_suspend(adev);
1662
1663
1664         dc_set_power_state(dm->dc, DC_ACPI_CM_POWER_STATE_D3);
1665
1666         return 0;
1667 }
1668
1669 static struct amdgpu_dm_connector *
1670 amdgpu_dm_find_first_crtc_matching_connector(struct drm_atomic_state *state,
1671                                              struct drm_crtc *crtc)
1672 {
1673         uint32_t i;
1674         struct drm_connector_state *new_con_state;
1675         struct drm_connector *connector;
1676         struct drm_crtc *crtc_from_state;
1677
1678         for_each_new_connector_in_state(state, connector, new_con_state, i) {
1679                 crtc_from_state = new_con_state->crtc;
1680
1681                 if (crtc_from_state == crtc)
1682                         return to_amdgpu_dm_connector(connector);
1683         }
1684
1685         return NULL;
1686 }
1687
1688 static void emulated_link_detect(struct dc_link *link)
1689 {
1690         struct dc_sink_init_data sink_init_data = { 0 };
1691         struct display_sink_capability sink_caps = { 0 };
1692         enum dc_edid_status edid_status;
1693         struct dc_context *dc_ctx = link->ctx;
1694         struct dc_sink *sink = NULL;
1695         struct dc_sink *prev_sink = NULL;
1696
1697         link->type = dc_connection_none;
1698         prev_sink = link->local_sink;
1699
1700         if (prev_sink != NULL)
1701                 dc_sink_retain(prev_sink);
1702
1703         switch (link->connector_signal) {
1704         case SIGNAL_TYPE_HDMI_TYPE_A: {
1705                 sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C;
1706                 sink_caps.signal = SIGNAL_TYPE_HDMI_TYPE_A;
1707                 break;
1708         }
1709
1710         case SIGNAL_TYPE_DVI_SINGLE_LINK: {
1711                 sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C;
1712                 sink_caps.signal = SIGNAL_TYPE_DVI_SINGLE_LINK;
1713                 break;
1714         }
1715
1716         case SIGNAL_TYPE_DVI_DUAL_LINK: {
1717                 sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C;
1718                 sink_caps.signal = SIGNAL_TYPE_DVI_DUAL_LINK;
1719                 break;
1720         }
1721
1722         case SIGNAL_TYPE_LVDS: {
1723                 sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C;
1724                 sink_caps.signal = SIGNAL_TYPE_LVDS;
1725                 break;
1726         }
1727
1728         case SIGNAL_TYPE_EDP: {
1729                 sink_caps.transaction_type =
1730                         DDC_TRANSACTION_TYPE_I2C_OVER_AUX;
1731                 sink_caps.signal = SIGNAL_TYPE_EDP;
1732                 break;
1733         }
1734
1735         case SIGNAL_TYPE_DISPLAY_PORT: {
1736                 sink_caps.transaction_type =
1737                         DDC_TRANSACTION_TYPE_I2C_OVER_AUX;
1738                 sink_caps.signal = SIGNAL_TYPE_VIRTUAL;
1739                 break;
1740         }
1741
1742         default:
1743                 DC_ERROR("Invalid connector type! signal:%d\n",
1744                         link->connector_signal);
1745                 return;
1746         }
1747
1748         sink_init_data.link = link;
1749         sink_init_data.sink_signal = sink_caps.signal;
1750
1751         sink = dc_sink_create(&sink_init_data);
1752         if (!sink) {
1753                 DC_ERROR("Failed to create sink!\n");
1754                 return;
1755         }
1756
1757         /* dc_sink_create returns a new reference */
1758         link->local_sink = sink;
1759
1760         edid_status = dm_helpers_read_local_edid(
1761                         link->ctx,
1762                         link,
1763                         sink);
1764
1765         if (edid_status != EDID_OK)
1766                 DC_ERROR("Failed to read EDID");
1767
1768 }
1769
1770 static void dm_gpureset_commit_state(struct dc_state *dc_state,
1771                                      struct amdgpu_display_manager *dm)
1772 {
1773         struct {
1774                 struct dc_surface_update surface_updates[MAX_SURFACES];
1775                 struct dc_plane_info plane_infos[MAX_SURFACES];
1776                 struct dc_scaling_info scaling_infos[MAX_SURFACES];
1777                 struct dc_flip_addrs flip_addrs[MAX_SURFACES];
1778                 struct dc_stream_update stream_update;
1779         } * bundle;
1780         int k, m;
1781
1782         bundle = kzalloc(sizeof(*bundle), GFP_KERNEL);
1783
1784         if (!bundle) {
1785                 dm_error("Failed to allocate update bundle\n");
1786                 goto cleanup;
1787         }
1788
1789         for (k = 0; k < dc_state->stream_count; k++) {
1790                 bundle->stream_update.stream = dc_state->streams[k];
1791
1792                 for (m = 0; m < dc_state->stream_status->plane_count; m++) {
1793                         bundle->surface_updates[m].surface =
1794                                 dc_state->stream_status->plane_states[m];
1795                         bundle->surface_updates[m].surface->force_full_update =
1796                                 true;
1797                 }
1798                 dc_commit_updates_for_stream(
1799                         dm->dc, bundle->surface_updates,
1800                         dc_state->stream_status->plane_count,
1801                         dc_state->streams[k], &bundle->stream_update, dc_state);
1802         }
1803
1804 cleanup:
1805         kfree(bundle);
1806
1807         return;
1808 }
1809
1810 static int dm_resume(void *handle)
1811 {
1812         struct amdgpu_device *adev = handle;
1813         struct drm_device *ddev = adev->ddev;
1814         struct amdgpu_display_manager *dm = &adev->dm;
1815         struct amdgpu_dm_connector *aconnector;
1816         struct drm_connector *connector;
1817         struct drm_connector_list_iter iter;
1818         struct drm_crtc *crtc;
1819         struct drm_crtc_state *new_crtc_state;
1820         struct dm_crtc_state *dm_new_crtc_state;
1821         struct drm_plane *plane;
1822         struct drm_plane_state *new_plane_state;
1823         struct dm_plane_state *dm_new_plane_state;
1824         struct dm_atomic_state *dm_state = to_dm_atomic_state(dm->atomic_obj.state);
1825         enum dc_connection_type new_connection_type = dc_connection_none;
1826         struct dc_state *dc_state;
1827         int i, r, j;
1828
1829         if (adev->in_gpu_reset) {
1830                 dc_state = dm->cached_dc_state;
1831
1832                 r = dm_dmub_hw_init(adev);
1833                 if (r)
1834                         DRM_ERROR("DMUB interface failed to initialize: status=%d\n", r);
1835
1836                 dc_set_power_state(dm->dc, DC_ACPI_CM_POWER_STATE_D0);
1837                 dc_resume(dm->dc);
1838
1839                 amdgpu_dm_irq_resume_early(adev);
1840
1841                 for (i = 0; i < dc_state->stream_count; i++) {
1842                         dc_state->streams[i]->mode_changed = true;
1843                         for (j = 0; j < dc_state->stream_status->plane_count; j++) {
1844                                 dc_state->stream_status->plane_states[j]->update_flags.raw
1845                                         = 0xffffffff;
1846                         }
1847                 }
1848
1849                 WARN_ON(!dc_commit_state(dm->dc, dc_state));
1850
1851                 dm_gpureset_commit_state(dm->cached_dc_state, dm);
1852
1853                 dm_gpureset_toggle_interrupts(adev, dm->cached_dc_state, true);
1854
1855                 dc_release_state(dm->cached_dc_state);
1856                 dm->cached_dc_state = NULL;
1857
1858                 amdgpu_dm_irq_resume_late(adev);
1859
1860                 mutex_unlock(&dm->dc_lock);
1861
1862                 return 0;
1863         }
1864         /* Recreate dc_state - DC invalidates it when setting power state to S3. */
1865         dc_release_state(dm_state->context);
1866         dm_state->context = dc_create_state(dm->dc);
1867         /* TODO: Remove dc_state->dccg, use dc->dccg directly. */
1868         dc_resource_state_construct(dm->dc, dm_state->context);
1869
1870         /* Before powering on DC we need to re-initialize DMUB. */
1871         r = dm_dmub_hw_init(adev);
1872         if (r)
1873                 DRM_ERROR("DMUB interface failed to initialize: status=%d\n", r);
1874
1875         /* power on hardware */
1876         dc_set_power_state(dm->dc, DC_ACPI_CM_POWER_STATE_D0);
1877
1878         /* program HPD filter */
1879         dc_resume(dm->dc);
1880
1881         /*
1882          * early enable HPD Rx IRQ, should be done before set mode as short
1883          * pulse interrupts are used for MST
1884          */
1885         amdgpu_dm_irq_resume_early(adev);
1886
1887         /* On resume we need to rewrite the MSTM control bits to enable MST*/
1888         s3_handle_mst(ddev, false);
1889
1890         /* Do detection*/
1891         drm_connector_list_iter_begin(ddev, &iter);
1892         drm_for_each_connector_iter(connector, &iter) {
1893                 aconnector = to_amdgpu_dm_connector(connector);
1894
1895                 /*
1896                  * this is the case when traversing through already created
1897                  * MST connectors, should be skipped
1898                  */
1899                 if (aconnector->mst_port)
1900                         continue;
1901
1902                 mutex_lock(&aconnector->hpd_lock);
1903                 if (!dc_link_detect_sink(aconnector->dc_link, &new_connection_type))
1904                         DRM_ERROR("KMS: Failed to detect connector\n");
1905
1906                 if (aconnector->base.force && new_connection_type == dc_connection_none)
1907                         emulated_link_detect(aconnector->dc_link);
1908                 else
1909                         dc_link_detect(aconnector->dc_link, DETECT_REASON_HPD);
1910
1911                 if (aconnector->fake_enable && aconnector->dc_link->local_sink)
1912                         aconnector->fake_enable = false;
1913
1914                 if (aconnector->dc_sink)
1915                         dc_sink_release(aconnector->dc_sink);
1916                 aconnector->dc_sink = NULL;
1917                 amdgpu_dm_update_connector_after_detect(aconnector);
1918                 mutex_unlock(&aconnector->hpd_lock);
1919         }
1920         drm_connector_list_iter_end(&iter);
1921
1922         /* Force mode set in atomic commit */
1923         for_each_new_crtc_in_state(dm->cached_state, crtc, new_crtc_state, i)
1924                 new_crtc_state->active_changed = true;
1925
1926         /*
1927          * atomic_check is expected to create the dc states. We need to release
1928          * them here, since they were duplicated as part of the suspend
1929          * procedure.
1930          */
1931         for_each_new_crtc_in_state(dm->cached_state, crtc, new_crtc_state, i) {
1932                 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
1933                 if (dm_new_crtc_state->stream) {
1934                         WARN_ON(kref_read(&dm_new_crtc_state->stream->refcount) > 1);
1935                         dc_stream_release(dm_new_crtc_state->stream);
1936                         dm_new_crtc_state->stream = NULL;
1937                 }
1938         }
1939
1940         for_each_new_plane_in_state(dm->cached_state, plane, new_plane_state, i) {
1941                 dm_new_plane_state = to_dm_plane_state(new_plane_state);
1942                 if (dm_new_plane_state->dc_state) {
1943                         WARN_ON(kref_read(&dm_new_plane_state->dc_state->refcount) > 1);
1944                         dc_plane_state_release(dm_new_plane_state->dc_state);
1945                         dm_new_plane_state->dc_state = NULL;
1946                 }
1947         }
1948
1949         drm_atomic_helper_resume(ddev, dm->cached_state);
1950
1951         dm->cached_state = NULL;
1952
1953         amdgpu_dm_irq_resume_late(adev);
1954
1955         amdgpu_dm_smu_write_watermarks_table(adev);
1956
1957         return 0;
1958 }
1959
1960 /**
1961  * DOC: DM Lifecycle
1962  *
1963  * DM (and consequently DC) is registered in the amdgpu base driver as a IP
1964  * block. When CONFIG_DRM_AMD_DC is enabled, the DM device IP block is added to
1965  * the base driver's device list to be initialized and torn down accordingly.
1966  *
1967  * The functions to do so are provided as hooks in &struct amd_ip_funcs.
1968  */
1969
1970 static const struct amd_ip_funcs amdgpu_dm_funcs = {
1971         .name = "dm",
1972         .early_init = dm_early_init,
1973         .late_init = dm_late_init,
1974         .sw_init = dm_sw_init,
1975         .sw_fini = dm_sw_fini,
1976         .hw_init = dm_hw_init,
1977         .hw_fini = dm_hw_fini,
1978         .suspend = dm_suspend,
1979         .resume = dm_resume,
1980         .is_idle = dm_is_idle,
1981         .wait_for_idle = dm_wait_for_idle,
1982         .check_soft_reset = dm_check_soft_reset,
1983         .soft_reset = dm_soft_reset,
1984         .set_clockgating_state = dm_set_clockgating_state,
1985         .set_powergating_state = dm_set_powergating_state,
1986 };
1987
1988 const struct amdgpu_ip_block_version dm_ip_block =
1989 {
1990         .type = AMD_IP_BLOCK_TYPE_DCE,
1991         .major = 1,
1992         .minor = 0,
1993         .rev = 0,
1994         .funcs = &amdgpu_dm_funcs,
1995 };
1996
1997
1998 /**
1999  * DOC: atomic
2000  *
2001  * *WIP*
2002  */
2003
2004 static const struct drm_mode_config_funcs amdgpu_dm_mode_funcs = {
2005         .fb_create = amdgpu_display_user_framebuffer_create,
2006         .output_poll_changed = drm_fb_helper_output_poll_changed,
2007         .atomic_check = amdgpu_dm_atomic_check,
2008         .atomic_commit = amdgpu_dm_atomic_commit,
2009 };
2010
2011 static struct drm_mode_config_helper_funcs amdgpu_dm_mode_config_helperfuncs = {
2012         .atomic_commit_tail = amdgpu_dm_atomic_commit_tail
2013 };
2014
2015 static void update_connector_ext_caps(struct amdgpu_dm_connector *aconnector)
2016 {
2017         u32 max_cll, min_cll, max, min, q, r;
2018         struct amdgpu_dm_backlight_caps *caps;
2019         struct amdgpu_display_manager *dm;
2020         struct drm_connector *conn_base;
2021         struct amdgpu_device *adev;
2022         static const u8 pre_computed_values[] = {
2023                 50, 51, 52, 53, 55, 56, 57, 58, 59, 61, 62, 63, 65, 66, 68, 69,
2024                 71, 72, 74, 75, 77, 79, 81, 82, 84, 86, 88, 90, 92, 94, 96, 98};
2025
2026         if (!aconnector || !aconnector->dc_link)
2027                 return;
2028
2029         conn_base = &aconnector->base;
2030         adev = conn_base->dev->dev_private;
2031         dm = &adev->dm;
2032         caps = &dm->backlight_caps;
2033         caps->ext_caps = &aconnector->dc_link->dpcd_sink_ext_caps;
2034         caps->aux_support = false;
2035         max_cll = conn_base->hdr_sink_metadata.hdmi_type1.max_cll;
2036         min_cll = conn_base->hdr_sink_metadata.hdmi_type1.min_cll;
2037
2038         if (caps->ext_caps->bits.oled == 1 ||
2039             caps->ext_caps->bits.sdr_aux_backlight_control == 1 ||
2040             caps->ext_caps->bits.hdr_aux_backlight_control == 1)
2041                 caps->aux_support = true;
2042
2043         /* From the specification (CTA-861-G), for calculating the maximum
2044          * luminance we need to use:
2045          *      Luminance = 50*2**(CV/32)
2046          * Where CV is a one-byte value.
2047          * For calculating this expression we may need float point precision;
2048          * to avoid this complexity level, we take advantage that CV is divided
2049          * by a constant. From the Euclids division algorithm, we know that CV
2050          * can be written as: CV = 32*q + r. Next, we replace CV in the
2051          * Luminance expression and get 50*(2**q)*(2**(r/32)), hence we just
2052          * need to pre-compute the value of r/32. For pre-computing the values
2053          * We just used the following Ruby line:
2054          *      (0...32).each {|cv| puts (50*2**(cv/32.0)).round}
2055          * The results of the above expressions can be verified at
2056          * pre_computed_values.
2057          */
2058         q = max_cll >> 5;
2059         r = max_cll % 32;
2060         max = (1 << q) * pre_computed_values[r];
2061
2062         // min luminance: maxLum * (CV/255)^2 / 100
2063         q = DIV_ROUND_CLOSEST(min_cll, 255);
2064         min = max * DIV_ROUND_CLOSEST((q * q), 100);
2065
2066         caps->aux_max_input_signal = max;
2067         caps->aux_min_input_signal = min;
2068 }
2069
2070 void amdgpu_dm_update_connector_after_detect(
2071                 struct amdgpu_dm_connector *aconnector)
2072 {
2073         struct drm_connector *connector = &aconnector->base;
2074         struct drm_device *dev = connector->dev;
2075         struct dc_sink *sink;
2076
2077         /* MST handled by drm_mst framework */
2078         if (aconnector->mst_mgr.mst_state == true)
2079                 return;
2080
2081
2082         sink = aconnector->dc_link->local_sink;
2083         if (sink)
2084                 dc_sink_retain(sink);
2085
2086         /*
2087          * Edid mgmt connector gets first update only in mode_valid hook and then
2088          * the connector sink is set to either fake or physical sink depends on link status.
2089          * Skip if already done during boot.
2090          */
2091         if (aconnector->base.force != DRM_FORCE_UNSPECIFIED
2092                         && aconnector->dc_em_sink) {
2093
2094                 /*
2095                  * For S3 resume with headless use eml_sink to fake stream
2096                  * because on resume connector->sink is set to NULL
2097                  */
2098                 mutex_lock(&dev->mode_config.mutex);
2099
2100                 if (sink) {
2101                         if (aconnector->dc_sink) {
2102                                 amdgpu_dm_update_freesync_caps(connector, NULL);
2103                                 /*
2104                                  * retain and release below are used to
2105                                  * bump up refcount for sink because the link doesn't point
2106                                  * to it anymore after disconnect, so on next crtc to connector
2107                                  * reshuffle by UMD we will get into unwanted dc_sink release
2108                                  */
2109                                 dc_sink_release(aconnector->dc_sink);
2110                         }
2111                         aconnector->dc_sink = sink;
2112                         dc_sink_retain(aconnector->dc_sink);
2113                         amdgpu_dm_update_freesync_caps(connector,
2114                                         aconnector->edid);
2115                 } else {
2116                         amdgpu_dm_update_freesync_caps(connector, NULL);
2117                         if (!aconnector->dc_sink) {
2118                                 aconnector->dc_sink = aconnector->dc_em_sink;
2119                                 dc_sink_retain(aconnector->dc_sink);
2120                         }
2121                 }
2122
2123                 mutex_unlock(&dev->mode_config.mutex);
2124
2125                 if (sink)
2126                         dc_sink_release(sink);
2127                 return;
2128         }
2129
2130         /*
2131          * TODO: temporary guard to look for proper fix
2132          * if this sink is MST sink, we should not do anything
2133          */
2134         if (sink && sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT_MST) {
2135                 dc_sink_release(sink);
2136                 return;
2137         }
2138
2139         if (aconnector->dc_sink == sink) {
2140                 /*
2141                  * We got a DP short pulse (Link Loss, DP CTS, etc...).
2142                  * Do nothing!!
2143                  */
2144                 DRM_DEBUG_DRIVER("DCHPD: connector_id=%d: dc_sink didn't change.\n",
2145                                 aconnector->connector_id);
2146                 if (sink)
2147                         dc_sink_release(sink);
2148                 return;
2149         }
2150
2151         DRM_DEBUG_DRIVER("DCHPD: connector_id=%d: Old sink=%p New sink=%p\n",
2152                 aconnector->connector_id, aconnector->dc_sink, sink);
2153
2154         mutex_lock(&dev->mode_config.mutex);
2155
2156         /*
2157          * 1. Update status of the drm connector
2158          * 2. Send an event and let userspace tell us what to do
2159          */
2160         if (sink) {
2161                 /*
2162                  * TODO: check if we still need the S3 mode update workaround.
2163                  * If yes, put it here.
2164                  */
2165                 if (aconnector->dc_sink)
2166                         amdgpu_dm_update_freesync_caps(connector, NULL);
2167
2168                 aconnector->dc_sink = sink;
2169                 dc_sink_retain(aconnector->dc_sink);
2170                 if (sink->dc_edid.length == 0) {
2171                         aconnector->edid = NULL;
2172                         if (aconnector->dc_link->aux_mode) {
2173                                 drm_dp_cec_unset_edid(
2174                                         &aconnector->dm_dp_aux.aux);
2175                         }
2176                 } else {
2177                         aconnector->edid =
2178                                 (struct edid *)sink->dc_edid.raw_edid;
2179
2180                         drm_connector_update_edid_property(connector,
2181                                                            aconnector->edid);
2182
2183                         if (aconnector->dc_link->aux_mode)
2184                                 drm_dp_cec_set_edid(&aconnector->dm_dp_aux.aux,
2185                                                     aconnector->edid);
2186                 }
2187
2188                 amdgpu_dm_update_freesync_caps(connector, aconnector->edid);
2189                 update_connector_ext_caps(aconnector);
2190         } else {
2191                 drm_dp_cec_unset_edid(&aconnector->dm_dp_aux.aux);
2192                 amdgpu_dm_update_freesync_caps(connector, NULL);
2193                 drm_connector_update_edid_property(connector, NULL);
2194                 aconnector->num_modes = 0;
2195                 dc_sink_release(aconnector->dc_sink);
2196                 aconnector->dc_sink = NULL;
2197                 aconnector->edid = NULL;
2198 #ifdef CONFIG_DRM_AMD_DC_HDCP
2199                 /* Set CP to DESIRED if it was ENABLED, so we can re-enable it again on hotplug */
2200                 if (connector->state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED)
2201                         connector->state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED;
2202 #endif
2203         }
2204
2205         mutex_unlock(&dev->mode_config.mutex);
2206
2207         if (sink)
2208                 dc_sink_release(sink);
2209 }
2210
2211 static void handle_hpd_irq(void *param)
2212 {
2213         struct amdgpu_dm_connector *aconnector = (struct amdgpu_dm_connector *)param;
2214         struct drm_connector *connector = &aconnector->base;
2215         struct drm_device *dev = connector->dev;
2216         enum dc_connection_type new_connection_type = dc_connection_none;
2217 #ifdef CONFIG_DRM_AMD_DC_HDCP
2218         struct amdgpu_device *adev = dev->dev_private;
2219 #endif
2220
2221         /*
2222          * In case of failure or MST no need to update connector status or notify the OS
2223          * since (for MST case) MST does this in its own context.
2224          */
2225         mutex_lock(&aconnector->hpd_lock);
2226
2227 #ifdef CONFIG_DRM_AMD_DC_HDCP
2228         if (adev->dm.hdcp_workqueue)
2229                 hdcp_reset_display(adev->dm.hdcp_workqueue, aconnector->dc_link->link_index);
2230 #endif
2231         if (aconnector->fake_enable)
2232                 aconnector->fake_enable = false;
2233
2234         if (!dc_link_detect_sink(aconnector->dc_link, &new_connection_type))
2235                 DRM_ERROR("KMS: Failed to detect connector\n");
2236
2237         if (aconnector->base.force && new_connection_type == dc_connection_none) {
2238                 emulated_link_detect(aconnector->dc_link);
2239
2240
2241                 drm_modeset_lock_all(dev);
2242                 dm_restore_drm_connector_state(dev, connector);
2243                 drm_modeset_unlock_all(dev);
2244
2245                 if (aconnector->base.force == DRM_FORCE_UNSPECIFIED)
2246                         drm_kms_helper_hotplug_event(dev);
2247
2248         } else if (dc_link_detect(aconnector->dc_link, DETECT_REASON_HPD)) {
2249                 amdgpu_dm_update_connector_after_detect(aconnector);
2250
2251
2252                 drm_modeset_lock_all(dev);
2253                 dm_restore_drm_connector_state(dev, connector);
2254                 drm_modeset_unlock_all(dev);
2255
2256                 if (aconnector->base.force == DRM_FORCE_UNSPECIFIED)
2257                         drm_kms_helper_hotplug_event(dev);
2258         }
2259         mutex_unlock(&aconnector->hpd_lock);
2260
2261 }
2262
2263 static void dm_handle_hpd_rx_irq(struct amdgpu_dm_connector *aconnector)
2264 {
2265         uint8_t esi[DP_PSR_ERROR_STATUS - DP_SINK_COUNT_ESI] = { 0 };
2266         uint8_t dret;
2267         bool new_irq_handled = false;
2268         int dpcd_addr;
2269         int dpcd_bytes_to_read;
2270
2271         const int max_process_count = 30;
2272         int process_count = 0;
2273
2274         const struct dc_link_status *link_status = dc_link_get_status(aconnector->dc_link);
2275
2276         if (link_status->dpcd_caps->dpcd_rev.raw < 0x12) {
2277                 dpcd_bytes_to_read = DP_LANE0_1_STATUS - DP_SINK_COUNT;
2278                 /* DPCD 0x200 - 0x201 for downstream IRQ */
2279                 dpcd_addr = DP_SINK_COUNT;
2280         } else {
2281                 dpcd_bytes_to_read = DP_PSR_ERROR_STATUS - DP_SINK_COUNT_ESI;
2282                 /* DPCD 0x2002 - 0x2005 for downstream IRQ */
2283                 dpcd_addr = DP_SINK_COUNT_ESI;
2284         }
2285
2286         dret = drm_dp_dpcd_read(
2287                 &aconnector->dm_dp_aux.aux,
2288                 dpcd_addr,
2289                 esi,
2290                 dpcd_bytes_to_read);
2291
2292         while (dret == dpcd_bytes_to_read &&
2293                 process_count < max_process_count) {
2294                 uint8_t retry;
2295                 dret = 0;
2296
2297                 process_count++;
2298
2299                 DRM_DEBUG_DRIVER("ESI %02x %02x %02x\n", esi[0], esi[1], esi[2]);
2300                 /* handle HPD short pulse irq */
2301                 if (aconnector->mst_mgr.mst_state)
2302                         drm_dp_mst_hpd_irq(
2303                                 &aconnector->mst_mgr,
2304                                 esi,
2305                                 &new_irq_handled);
2306
2307                 if (new_irq_handled) {
2308                         /* ACK at DPCD to notify down stream */
2309                         const int ack_dpcd_bytes_to_write =
2310                                 dpcd_bytes_to_read - 1;
2311
2312                         for (retry = 0; retry < 3; retry++) {
2313                                 uint8_t wret;
2314
2315                                 wret = drm_dp_dpcd_write(
2316                                         &aconnector->dm_dp_aux.aux,
2317                                         dpcd_addr + 1,
2318                                         &esi[1],
2319                                         ack_dpcd_bytes_to_write);
2320                                 if (wret == ack_dpcd_bytes_to_write)
2321                                         break;
2322                         }
2323
2324                         /* check if there is new irq to be handled */
2325                         dret = drm_dp_dpcd_read(
2326                                 &aconnector->dm_dp_aux.aux,
2327                                 dpcd_addr,
2328                                 esi,
2329                                 dpcd_bytes_to_read);
2330
2331                         new_irq_handled = false;
2332                 } else {
2333                         break;
2334                 }
2335         }
2336
2337         if (process_count == max_process_count)
2338                 DRM_DEBUG_DRIVER("Loop exceeded max iterations\n");
2339 }
2340
2341 static void handle_hpd_rx_irq(void *param)
2342 {
2343         struct amdgpu_dm_connector *aconnector = (struct amdgpu_dm_connector *)param;
2344         struct drm_connector *connector = &aconnector->base;
2345         struct drm_device *dev = connector->dev;
2346         struct dc_link *dc_link = aconnector->dc_link;
2347         bool is_mst_root_connector = aconnector->mst_mgr.mst_state;
2348         enum dc_connection_type new_connection_type = dc_connection_none;
2349 #ifdef CONFIG_DRM_AMD_DC_HDCP
2350         union hpd_irq_data hpd_irq_data;
2351         struct amdgpu_device *adev = dev->dev_private;
2352
2353         memset(&hpd_irq_data, 0, sizeof(hpd_irq_data));
2354 #endif
2355
2356         /*
2357          * TODO:Temporary add mutex to protect hpd interrupt not have a gpio
2358          * conflict, after implement i2c helper, this mutex should be
2359          * retired.
2360          */
2361         if (dc_link->type != dc_connection_mst_branch)
2362                 mutex_lock(&aconnector->hpd_lock);
2363
2364
2365 #ifdef CONFIG_DRM_AMD_DC_HDCP
2366         if (dc_link_handle_hpd_rx_irq(dc_link, &hpd_irq_data, NULL) &&
2367 #else
2368         if (dc_link_handle_hpd_rx_irq(dc_link, NULL, NULL) &&
2369 #endif
2370                         !is_mst_root_connector) {
2371                 /* Downstream Port status changed. */
2372                 if (!dc_link_detect_sink(dc_link, &new_connection_type))
2373                         DRM_ERROR("KMS: Failed to detect connector\n");
2374
2375                 if (aconnector->base.force && new_connection_type == dc_connection_none) {
2376                         emulated_link_detect(dc_link);
2377
2378                         if (aconnector->fake_enable)
2379                                 aconnector->fake_enable = false;
2380
2381                         amdgpu_dm_update_connector_after_detect(aconnector);
2382
2383
2384                         drm_modeset_lock_all(dev);
2385                         dm_restore_drm_connector_state(dev, connector);
2386                         drm_modeset_unlock_all(dev);
2387
2388                         drm_kms_helper_hotplug_event(dev);
2389                 } else if (dc_link_detect(dc_link, DETECT_REASON_HPDRX)) {
2390
2391                         if (aconnector->fake_enable)
2392                                 aconnector->fake_enable = false;
2393
2394                         amdgpu_dm_update_connector_after_detect(aconnector);
2395
2396
2397                         drm_modeset_lock_all(dev);
2398                         dm_restore_drm_connector_state(dev, connector);
2399                         drm_modeset_unlock_all(dev);
2400
2401                         drm_kms_helper_hotplug_event(dev);
2402                 }
2403         }
2404 #ifdef CONFIG_DRM_AMD_DC_HDCP
2405         if (hpd_irq_data.bytes.device_service_irq.bits.CP_IRQ) {
2406                 if (adev->dm.hdcp_workqueue)
2407                         hdcp_handle_cpirq(adev->dm.hdcp_workqueue,  aconnector->base.index);
2408         }
2409 #endif
2410         if ((dc_link->cur_link_settings.lane_count != LANE_COUNT_UNKNOWN) ||
2411             (dc_link->type == dc_connection_mst_branch))
2412                 dm_handle_hpd_rx_irq(aconnector);
2413
2414         if (dc_link->type != dc_connection_mst_branch) {
2415                 drm_dp_cec_irq(&aconnector->dm_dp_aux.aux);
2416                 mutex_unlock(&aconnector->hpd_lock);
2417         }
2418 }
2419
2420 static void register_hpd_handlers(struct amdgpu_device *adev)
2421 {
2422         struct drm_device *dev = adev->ddev;
2423         struct drm_connector *connector;
2424         struct amdgpu_dm_connector *aconnector;
2425         const struct dc_link *dc_link;
2426         struct dc_interrupt_params int_params = {0};
2427
2428         int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
2429         int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
2430
2431         list_for_each_entry(connector,
2432                         &dev->mode_config.connector_list, head) {
2433
2434                 aconnector = to_amdgpu_dm_connector(connector);
2435                 dc_link = aconnector->dc_link;
2436
2437                 if (DC_IRQ_SOURCE_INVALID != dc_link->irq_source_hpd) {
2438                         int_params.int_context = INTERRUPT_LOW_IRQ_CONTEXT;
2439                         int_params.irq_source = dc_link->irq_source_hpd;
2440
2441                         amdgpu_dm_irq_register_interrupt(adev, &int_params,
2442                                         handle_hpd_irq,
2443                                         (void *) aconnector);
2444                 }
2445
2446                 if (DC_IRQ_SOURCE_INVALID != dc_link->irq_source_hpd_rx) {
2447
2448                         /* Also register for DP short pulse (hpd_rx). */
2449                         int_params.int_context = INTERRUPT_LOW_IRQ_CONTEXT;
2450                         int_params.irq_source = dc_link->irq_source_hpd_rx;
2451
2452                         amdgpu_dm_irq_register_interrupt(adev, &int_params,
2453                                         handle_hpd_rx_irq,
2454                                         (void *) aconnector);
2455                 }
2456         }
2457 }
2458
2459 /* Register IRQ sources and initialize IRQ callbacks */
2460 static int dce110_register_irq_handlers(struct amdgpu_device *adev)
2461 {
2462         struct dc *dc = adev->dm.dc;
2463         struct common_irq_params *c_irq_params;
2464         struct dc_interrupt_params int_params = {0};
2465         int r;
2466         int i;
2467         unsigned client_id = AMDGPU_IRQ_CLIENTID_LEGACY;
2468
2469         if (adev->asic_type >= CHIP_VEGA10)
2470                 client_id = SOC15_IH_CLIENTID_DCE;
2471
2472         int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
2473         int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
2474
2475         /*
2476          * Actions of amdgpu_irq_add_id():
2477          * 1. Register a set() function with base driver.
2478          *    Base driver will call set() function to enable/disable an
2479          *    interrupt in DC hardware.
2480          * 2. Register amdgpu_dm_irq_handler().
2481          *    Base driver will call amdgpu_dm_irq_handler() for ALL interrupts
2482          *    coming from DC hardware.
2483          *    amdgpu_dm_irq_handler() will re-direct the interrupt to DC
2484          *    for acknowledging and handling. */
2485
2486         /* Use VBLANK interrupt */
2487         for (i = VISLANDS30_IV_SRCID_D1_VERTICAL_INTERRUPT0; i <= VISLANDS30_IV_SRCID_D6_VERTICAL_INTERRUPT0; i++) {
2488                 r = amdgpu_irq_add_id(adev, client_id, i, &adev->crtc_irq);
2489                 if (r) {
2490                         DRM_ERROR("Failed to add crtc irq id!\n");
2491                         return r;
2492                 }
2493
2494                 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
2495                 int_params.irq_source =
2496                         dc_interrupt_to_irq_source(dc, i, 0);
2497
2498                 c_irq_params = &adev->dm.vblank_params[int_params.irq_source - DC_IRQ_SOURCE_VBLANK1];
2499
2500                 c_irq_params->adev = adev;
2501                 c_irq_params->irq_src = int_params.irq_source;
2502
2503                 amdgpu_dm_irq_register_interrupt(adev, &int_params,
2504                                 dm_crtc_high_irq, c_irq_params);
2505         }
2506
2507         /* Use VUPDATE interrupt */
2508         for (i = VISLANDS30_IV_SRCID_D1_V_UPDATE_INT; i <= VISLANDS30_IV_SRCID_D6_V_UPDATE_INT; i += 2) {
2509                 r = amdgpu_irq_add_id(adev, client_id, i, &adev->vupdate_irq);
2510                 if (r) {
2511                         DRM_ERROR("Failed to add vupdate irq id!\n");
2512                         return r;
2513                 }
2514
2515                 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
2516                 int_params.irq_source =
2517                         dc_interrupt_to_irq_source(dc, i, 0);
2518
2519                 c_irq_params = &adev->dm.vupdate_params[int_params.irq_source - DC_IRQ_SOURCE_VUPDATE1];
2520
2521                 c_irq_params->adev = adev;
2522                 c_irq_params->irq_src = int_params.irq_source;
2523
2524                 amdgpu_dm_irq_register_interrupt(adev, &int_params,
2525                                 dm_vupdate_high_irq, c_irq_params);
2526         }
2527
2528         /* Use GRPH_PFLIP interrupt */
2529         for (i = VISLANDS30_IV_SRCID_D1_GRPH_PFLIP;
2530                         i <= VISLANDS30_IV_SRCID_D6_GRPH_PFLIP; i += 2) {
2531                 r = amdgpu_irq_add_id(adev, client_id, i, &adev->pageflip_irq);
2532                 if (r) {
2533                         DRM_ERROR("Failed to add page flip irq id!\n");
2534                         return r;
2535                 }
2536
2537                 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
2538                 int_params.irq_source =
2539                         dc_interrupt_to_irq_source(dc, i, 0);
2540
2541                 c_irq_params = &adev->dm.pflip_params[int_params.irq_source - DC_IRQ_SOURCE_PFLIP_FIRST];
2542
2543                 c_irq_params->adev = adev;
2544                 c_irq_params->irq_src = int_params.irq_source;
2545
2546                 amdgpu_dm_irq_register_interrupt(adev, &int_params,
2547                                 dm_pflip_high_irq, c_irq_params);
2548
2549         }
2550
2551         /* HPD */
2552         r = amdgpu_irq_add_id(adev, client_id,
2553                         VISLANDS30_IV_SRCID_HOTPLUG_DETECT_A, &adev->hpd_irq);
2554         if (r) {
2555                 DRM_ERROR("Failed to add hpd irq id!\n");
2556                 return r;
2557         }
2558
2559         register_hpd_handlers(adev);
2560
2561         return 0;
2562 }
2563
2564 #if defined(CONFIG_DRM_AMD_DC_DCN)
2565 /* Register IRQ sources and initialize IRQ callbacks */
2566 static int dcn10_register_irq_handlers(struct amdgpu_device *adev)
2567 {
2568         struct dc *dc = adev->dm.dc;
2569         struct common_irq_params *c_irq_params;
2570         struct dc_interrupt_params int_params = {0};
2571         int r;
2572         int i;
2573
2574         int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
2575         int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
2576
2577         /*
2578          * Actions of amdgpu_irq_add_id():
2579          * 1. Register a set() function with base driver.
2580          *    Base driver will call set() function to enable/disable an
2581          *    interrupt in DC hardware.
2582          * 2. Register amdgpu_dm_irq_handler().
2583          *    Base driver will call amdgpu_dm_irq_handler() for ALL interrupts
2584          *    coming from DC hardware.
2585          *    amdgpu_dm_irq_handler() will re-direct the interrupt to DC
2586          *    for acknowledging and handling.
2587          */
2588
2589         /* Use VSTARTUP interrupt */
2590         for (i = DCN_1_0__SRCID__DC_D1_OTG_VSTARTUP;
2591                         i <= DCN_1_0__SRCID__DC_D1_OTG_VSTARTUP + adev->mode_info.num_crtc - 1;
2592                         i++) {
2593                 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, i, &adev->crtc_irq);
2594
2595                 if (r) {
2596                         DRM_ERROR("Failed to add crtc irq id!\n");
2597                         return r;
2598                 }
2599
2600                 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
2601                 int_params.irq_source =
2602                         dc_interrupt_to_irq_source(dc, i, 0);
2603
2604                 c_irq_params = &adev->dm.vblank_params[int_params.irq_source - DC_IRQ_SOURCE_VBLANK1];
2605
2606                 c_irq_params->adev = adev;
2607                 c_irq_params->irq_src = int_params.irq_source;
2608
2609                 amdgpu_dm_irq_register_interrupt(
2610                         adev, &int_params, dm_crtc_high_irq, c_irq_params);
2611         }
2612
2613         /* Use VUPDATE_NO_LOCK interrupt on DCN, which seems to correspond to
2614          * the regular VUPDATE interrupt on DCE. We want DC_IRQ_SOURCE_VUPDATEx
2615          * to trigger at end of each vblank, regardless of state of the lock,
2616          * matching DCE behaviour.
2617          */
2618         for (i = DCN_1_0__SRCID__OTG0_IHC_V_UPDATE_NO_LOCK_INTERRUPT;
2619              i <= DCN_1_0__SRCID__OTG0_IHC_V_UPDATE_NO_LOCK_INTERRUPT + adev->mode_info.num_crtc - 1;
2620              i++) {
2621                 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, i, &adev->vupdate_irq);
2622
2623                 if (r) {
2624                         DRM_ERROR("Failed to add vupdate irq id!\n");
2625                         return r;
2626                 }
2627
2628                 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
2629                 int_params.irq_source =
2630                         dc_interrupt_to_irq_source(dc, i, 0);
2631
2632                 c_irq_params = &adev->dm.vupdate_params[int_params.irq_source - DC_IRQ_SOURCE_VUPDATE1];
2633
2634                 c_irq_params->adev = adev;
2635                 c_irq_params->irq_src = int_params.irq_source;
2636
2637                 amdgpu_dm_irq_register_interrupt(adev, &int_params,
2638                                 dm_vupdate_high_irq, c_irq_params);
2639         }
2640
2641         /* Use GRPH_PFLIP interrupt */
2642         for (i = DCN_1_0__SRCID__HUBP0_FLIP_INTERRUPT;
2643                         i <= DCN_1_0__SRCID__HUBP0_FLIP_INTERRUPT + adev->mode_info.num_crtc - 1;
2644                         i++) {
2645                 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, i, &adev->pageflip_irq);
2646                 if (r) {
2647                         DRM_ERROR("Failed to add page flip irq id!\n");
2648                         return r;
2649                 }
2650
2651                 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
2652                 int_params.irq_source =
2653                         dc_interrupt_to_irq_source(dc, i, 0);
2654
2655                 c_irq_params = &adev->dm.pflip_params[int_params.irq_source - DC_IRQ_SOURCE_PFLIP_FIRST];
2656
2657                 c_irq_params->adev = adev;
2658                 c_irq_params->irq_src = int_params.irq_source;
2659
2660                 amdgpu_dm_irq_register_interrupt(adev, &int_params,
2661                                 dm_pflip_high_irq, c_irq_params);
2662
2663         }
2664
2665         /* HPD */
2666         r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, DCN_1_0__SRCID__DC_HPD1_INT,
2667                         &adev->hpd_irq);
2668         if (r) {
2669                 DRM_ERROR("Failed to add hpd irq id!\n");
2670                 return r;
2671         }
2672
2673         register_hpd_handlers(adev);
2674
2675         return 0;
2676 }
2677 #endif
2678
2679 /*
2680  * Acquires the lock for the atomic state object and returns
2681  * the new atomic state.
2682  *
2683  * This should only be called during atomic check.
2684  */
2685 static int dm_atomic_get_state(struct drm_atomic_state *state,
2686                                struct dm_atomic_state **dm_state)
2687 {
2688         struct drm_device *dev = state->dev;
2689         struct amdgpu_device *adev = dev->dev_private;
2690         struct amdgpu_display_manager *dm = &adev->dm;
2691         struct drm_private_state *priv_state;
2692
2693         if (*dm_state)
2694                 return 0;
2695
2696         priv_state = drm_atomic_get_private_obj_state(state, &dm->atomic_obj);
2697         if (IS_ERR(priv_state))
2698                 return PTR_ERR(priv_state);
2699
2700         *dm_state = to_dm_atomic_state(priv_state);
2701
2702         return 0;
2703 }
2704
2705 static struct dm_atomic_state *
2706 dm_atomic_get_new_state(struct drm_atomic_state *state)
2707 {
2708         struct drm_device *dev = state->dev;
2709         struct amdgpu_device *adev = dev->dev_private;
2710         struct amdgpu_display_manager *dm = &adev->dm;
2711         struct drm_private_obj *obj;
2712         struct drm_private_state *new_obj_state;
2713         int i;
2714
2715         for_each_new_private_obj_in_state(state, obj, new_obj_state, i) {
2716                 if (obj->funcs == dm->atomic_obj.funcs)
2717                         return to_dm_atomic_state(new_obj_state);
2718         }
2719
2720         return NULL;
2721 }
2722
2723 static struct dm_atomic_state *
2724 dm_atomic_get_old_state(struct drm_atomic_state *state)
2725 {
2726         struct drm_device *dev = state->dev;
2727         struct amdgpu_device *adev = dev->dev_private;
2728         struct amdgpu_display_manager *dm = &adev->dm;
2729         struct drm_private_obj *obj;
2730         struct drm_private_state *old_obj_state;
2731         int i;
2732
2733         for_each_old_private_obj_in_state(state, obj, old_obj_state, i) {
2734                 if (obj->funcs == dm->atomic_obj.funcs)
2735                         return to_dm_atomic_state(old_obj_state);
2736         }
2737
2738         return NULL;
2739 }
2740
2741 static struct drm_private_state *
2742 dm_atomic_duplicate_state(struct drm_private_obj *obj)
2743 {
2744         struct dm_atomic_state *old_state, *new_state;
2745
2746         new_state = kzalloc(sizeof(*new_state), GFP_KERNEL);
2747         if (!new_state)
2748                 return NULL;
2749
2750         __drm_atomic_helper_private_obj_duplicate_state(obj, &new_state->base);
2751
2752         old_state = to_dm_atomic_state(obj->state);
2753
2754         if (old_state && old_state->context)
2755                 new_state->context = dc_copy_state(old_state->context);
2756
2757         if (!new_state->context) {
2758                 kfree(new_state);
2759                 return NULL;
2760         }
2761
2762         return &new_state->base;
2763 }
2764
2765 static void dm_atomic_destroy_state(struct drm_private_obj *obj,
2766                                     struct drm_private_state *state)
2767 {
2768         struct dm_atomic_state *dm_state = to_dm_atomic_state(state);
2769
2770         if (dm_state && dm_state->context)
2771                 dc_release_state(dm_state->context);
2772
2773         kfree(dm_state);
2774 }
2775
2776 static struct drm_private_state_funcs dm_atomic_state_funcs = {
2777         .atomic_duplicate_state = dm_atomic_duplicate_state,
2778         .atomic_destroy_state = dm_atomic_destroy_state,
2779 };
2780
2781 static int amdgpu_dm_mode_config_init(struct amdgpu_device *adev)
2782 {
2783         struct dm_atomic_state *state;
2784         int r;
2785
2786         adev->mode_info.mode_config_initialized = true;
2787
2788         adev->ddev->mode_config.funcs = (void *)&amdgpu_dm_mode_funcs;
2789         adev->ddev->mode_config.helper_private = &amdgpu_dm_mode_config_helperfuncs;
2790
2791         adev->ddev->mode_config.max_width = 16384;
2792         adev->ddev->mode_config.max_height = 16384;
2793
2794         adev->ddev->mode_config.preferred_depth = 24;
2795         adev->ddev->mode_config.prefer_shadow = 1;
2796         /* indicates support for immediate flip */
2797         adev->ddev->mode_config.async_page_flip = true;
2798
2799         adev->ddev->mode_config.fb_base = adev->gmc.aper_base;
2800
2801         state = kzalloc(sizeof(*state), GFP_KERNEL);
2802         if (!state)
2803                 return -ENOMEM;
2804
2805         state->context = dc_create_state(adev->dm.dc);
2806         if (!state->context) {
2807                 kfree(state);
2808                 return -ENOMEM;
2809         }
2810
2811         dc_resource_state_copy_construct_current(adev->dm.dc, state->context);
2812
2813         drm_atomic_private_obj_init(adev->ddev,
2814                                     &adev->dm.atomic_obj,
2815                                     &state->base,
2816                                     &dm_atomic_state_funcs);
2817
2818         r = amdgpu_display_modeset_create_props(adev);
2819         if (r)
2820                 return r;
2821
2822         r = amdgpu_dm_audio_init(adev);
2823         if (r)
2824                 return r;
2825
2826         return 0;
2827 }
2828
2829 #define AMDGPU_DM_DEFAULT_MIN_BACKLIGHT 12
2830 #define AMDGPU_DM_DEFAULT_MAX_BACKLIGHT 255
2831 #define AUX_BL_DEFAULT_TRANSITION_TIME_MS 50
2832
2833 #if defined(CONFIG_BACKLIGHT_CLASS_DEVICE) ||\
2834         defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE)
2835
2836 static void amdgpu_dm_update_backlight_caps(struct amdgpu_display_manager *dm)
2837 {
2838 #if defined(CONFIG_ACPI)
2839         struct amdgpu_dm_backlight_caps caps;
2840
2841         if (dm->backlight_caps.caps_valid)
2842                 return;
2843
2844         amdgpu_acpi_get_backlight_caps(dm->adev, &caps);
2845         if (caps.caps_valid) {
2846                 dm->backlight_caps.caps_valid = true;
2847                 if (caps.aux_support)
2848                         return;
2849                 dm->backlight_caps.min_input_signal = caps.min_input_signal;
2850                 dm->backlight_caps.max_input_signal = caps.max_input_signal;
2851         } else {
2852                 dm->backlight_caps.min_input_signal =
2853                                 AMDGPU_DM_DEFAULT_MIN_BACKLIGHT;
2854                 dm->backlight_caps.max_input_signal =
2855                                 AMDGPU_DM_DEFAULT_MAX_BACKLIGHT;
2856         }
2857 #else
2858         if (dm->backlight_caps.aux_support)
2859                 return;
2860
2861         dm->backlight_caps.min_input_signal = AMDGPU_DM_DEFAULT_MIN_BACKLIGHT;
2862         dm->backlight_caps.max_input_signal = AMDGPU_DM_DEFAULT_MAX_BACKLIGHT;
2863 #endif
2864 }
2865
2866 static int set_backlight_via_aux(struct dc_link *link, uint32_t brightness)
2867 {
2868         bool rc;
2869
2870         if (!link)
2871                 return 1;
2872
2873         rc = dc_link_set_backlight_level_nits(link, true, brightness,
2874                                               AUX_BL_DEFAULT_TRANSITION_TIME_MS);
2875
2876         return rc ? 0 : 1;
2877 }
2878
2879 static u32 convert_brightness(const struct amdgpu_dm_backlight_caps *caps,
2880                               const uint32_t user_brightness)
2881 {
2882         u32 min, max, conversion_pace;
2883         u32 brightness = user_brightness;
2884
2885         if (!caps)
2886                 goto out;
2887
2888         if (!caps->aux_support) {
2889                 max = caps->max_input_signal;
2890                 min = caps->min_input_signal;
2891                 /*
2892                  * The brightness input is in the range 0-255
2893                  * It needs to be rescaled to be between the
2894                  * requested min and max input signal
2895                  * It also needs to be scaled up by 0x101 to
2896                  * match the DC interface which has a range of
2897                  * 0 to 0xffff
2898                  */
2899                 conversion_pace = 0x101;
2900                 brightness =
2901                         user_brightness
2902                         * conversion_pace
2903                         * (max - min)
2904                         / AMDGPU_MAX_BL_LEVEL
2905                         + min * conversion_pace;
2906         } else {
2907                 /* TODO
2908                  * We are doing a linear interpolation here, which is OK but
2909                  * does not provide the optimal result. We probably want
2910                  * something close to the Perceptual Quantizer (PQ) curve.
2911                  */
2912                 max = caps->aux_max_input_signal;
2913                 min = caps->aux_min_input_signal;
2914
2915                 brightness = (AMDGPU_MAX_BL_LEVEL - user_brightness) * min
2916                                + user_brightness * max;
2917                 // Multiple the value by 1000 since we use millinits
2918                 brightness *= 1000;
2919                 brightness = DIV_ROUND_CLOSEST(brightness, AMDGPU_MAX_BL_LEVEL);
2920         }
2921
2922 out:
2923         return brightness;
2924 }
2925
2926 static int amdgpu_dm_backlight_update_status(struct backlight_device *bd)
2927 {
2928         struct amdgpu_display_manager *dm = bl_get_data(bd);
2929         struct amdgpu_dm_backlight_caps caps;
2930         struct dc_link *link = NULL;
2931         u32 brightness;
2932         bool rc;
2933
2934         amdgpu_dm_update_backlight_caps(dm);
2935         caps = dm->backlight_caps;
2936
2937         link = (struct dc_link *)dm->backlight_link;
2938
2939         brightness = convert_brightness(&caps, bd->props.brightness);
2940         // Change brightness based on AUX property
2941         if (caps.aux_support)
2942                 return set_backlight_via_aux(link, brightness);
2943
2944         rc = dc_link_set_backlight_level(dm->backlight_link, brightness, 0);
2945
2946         return rc ? 0 : 1;
2947 }
2948
2949 static int amdgpu_dm_backlight_get_brightness(struct backlight_device *bd)
2950 {
2951         struct amdgpu_display_manager *dm = bl_get_data(bd);
2952         int ret = dc_link_get_backlight_level(dm->backlight_link);
2953
2954         if (ret == DC_ERROR_UNEXPECTED)
2955                 return bd->props.brightness;
2956         return ret;
2957 }
2958
2959 static const struct backlight_ops amdgpu_dm_backlight_ops = {
2960         .options = BL_CORE_SUSPENDRESUME,
2961         .get_brightness = amdgpu_dm_backlight_get_brightness,
2962         .update_status  = amdgpu_dm_backlight_update_status,
2963 };
2964
2965 static void
2966 amdgpu_dm_register_backlight_device(struct amdgpu_display_manager *dm)
2967 {
2968         char bl_name[16];
2969         struct backlight_properties props = { 0 };
2970
2971         amdgpu_dm_update_backlight_caps(dm);
2972
2973         props.max_brightness = AMDGPU_MAX_BL_LEVEL;
2974         props.brightness = AMDGPU_MAX_BL_LEVEL;
2975         props.type = BACKLIGHT_RAW;
2976
2977         snprintf(bl_name, sizeof(bl_name), "amdgpu_bl%d",
2978                         dm->adev->ddev->primary->index);
2979
2980         dm->backlight_dev = backlight_device_register(bl_name,
2981                         dm->adev->ddev->dev,
2982                         dm,
2983                         &amdgpu_dm_backlight_ops,
2984                         &props);
2985
2986         if (IS_ERR(dm->backlight_dev))
2987                 DRM_ERROR("DM: Backlight registration failed!\n");
2988         else
2989                 DRM_DEBUG_DRIVER("DM: Registered Backlight device: %s\n", bl_name);
2990 }
2991
2992 #endif
2993
2994 static int initialize_plane(struct amdgpu_display_manager *dm,
2995                             struct amdgpu_mode_info *mode_info, int plane_id,
2996                             enum drm_plane_type plane_type,
2997                             const struct dc_plane_cap *plane_cap)
2998 {
2999         struct drm_plane *plane;
3000         unsigned long possible_crtcs;
3001         int ret = 0;
3002
3003         plane = kzalloc(sizeof(struct drm_plane), GFP_KERNEL);
3004         if (!plane) {
3005                 DRM_ERROR("KMS: Failed to allocate plane\n");
3006                 return -ENOMEM;
3007         }
3008         plane->type = plane_type;
3009
3010         /*
3011          * HACK: IGT tests expect that the primary plane for a CRTC
3012          * can only have one possible CRTC. Only expose support for
3013          * any CRTC if they're not going to be used as a primary plane
3014          * for a CRTC - like overlay or underlay planes.
3015          */
3016         possible_crtcs = 1 << plane_id;
3017         if (plane_id >= dm->dc->caps.max_streams)
3018                 possible_crtcs = 0xff;
3019
3020         ret = amdgpu_dm_plane_init(dm, plane, possible_crtcs, plane_cap);
3021
3022         if (ret) {
3023                 DRM_ERROR("KMS: Failed to initialize plane\n");
3024                 kfree(plane);
3025                 return ret;
3026         }
3027
3028         if (mode_info)
3029                 mode_info->planes[plane_id] = plane;
3030
3031         return ret;
3032 }
3033
3034
3035 static void register_backlight_device(struct amdgpu_display_manager *dm,
3036                                       struct dc_link *link)
3037 {
3038 #if defined(CONFIG_BACKLIGHT_CLASS_DEVICE) ||\
3039         defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE)
3040
3041         if ((link->connector_signal & (SIGNAL_TYPE_EDP | SIGNAL_TYPE_LVDS)) &&
3042             link->type != dc_connection_none) {
3043                 /*
3044                  * Event if registration failed, we should continue with
3045                  * DM initialization because not having a backlight control
3046                  * is better then a black screen.
3047                  */
3048                 amdgpu_dm_register_backlight_device(dm);
3049
3050                 if (dm->backlight_dev)
3051                         dm->backlight_link = link;
3052         }
3053 #endif
3054 }
3055
3056
3057 /*
3058  * In this architecture, the association
3059  * connector -> encoder -> crtc
3060  * id not really requried. The crtc and connector will hold the
3061  * display_index as an abstraction to use with DAL component
3062  *
3063  * Returns 0 on success
3064  */
3065 static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev)
3066 {
3067         struct amdgpu_display_manager *dm = &adev->dm;
3068         int32_t i;
3069         struct amdgpu_dm_connector *aconnector = NULL;
3070         struct amdgpu_encoder *aencoder = NULL;
3071         struct amdgpu_mode_info *mode_info = &adev->mode_info;
3072         uint32_t link_cnt;
3073         int32_t primary_planes;
3074         enum dc_connection_type new_connection_type = dc_connection_none;
3075         const struct dc_plane_cap *plane;
3076
3077         link_cnt = dm->dc->caps.max_links;
3078         if (amdgpu_dm_mode_config_init(dm->adev)) {
3079                 DRM_ERROR("DM: Failed to initialize mode config\n");
3080                 return -EINVAL;
3081         }
3082
3083         /* There is one primary plane per CRTC */
3084         primary_planes = dm->dc->caps.max_streams;
3085         ASSERT(primary_planes <= AMDGPU_MAX_PLANES);
3086
3087         /*
3088          * Initialize primary planes, implicit planes for legacy IOCTLS.
3089          * Order is reversed to match iteration order in atomic check.
3090          */
3091         for (i = (primary_planes - 1); i >= 0; i--) {
3092                 plane = &dm->dc->caps.planes[i];
3093
3094                 if (initialize_plane(dm, mode_info, i,
3095                                      DRM_PLANE_TYPE_PRIMARY, plane)) {
3096                         DRM_ERROR("KMS: Failed to initialize primary plane\n");
3097                         goto fail;
3098                 }
3099         }
3100
3101         /*
3102          * Initialize overlay planes, index starting after primary planes.
3103          * These planes have a higher DRM index than the primary planes since
3104          * they should be considered as having a higher z-order.
3105          * Order is reversed to match iteration order in atomic check.
3106          *
3107          * Only support DCN for now, and only expose one so we don't encourage
3108          * userspace to use up all the pipes.
3109          */
3110         for (i = 0; i < dm->dc->caps.max_planes; ++i) {
3111                 struct dc_plane_cap *plane = &dm->dc->caps.planes[i];
3112
3113                 if (plane->type != DC_PLANE_TYPE_DCN_UNIVERSAL)
3114                         continue;
3115
3116                 if (!plane->blends_with_above || !plane->blends_with_below)
3117                         continue;
3118
3119                 if (!plane->pixel_format_support.argb8888)
3120                         continue;
3121
3122                 if (initialize_plane(dm, NULL, primary_planes + i,
3123                                      DRM_PLANE_TYPE_OVERLAY, plane)) {
3124                         DRM_ERROR("KMS: Failed to initialize overlay plane\n");
3125                         goto fail;
3126                 }
3127
3128                 /* Only create one overlay plane. */
3129                 break;
3130         }
3131
3132         for (i = 0; i < dm->dc->caps.max_streams; i++)
3133                 if (amdgpu_dm_crtc_init(dm, mode_info->planes[i], i)) {
3134                         DRM_ERROR("KMS: Failed to initialize crtc\n");
3135                         goto fail;
3136                 }
3137
3138         dm->display_indexes_num = dm->dc->caps.max_streams;
3139
3140         /* loops over all connectors on the board */
3141         for (i = 0; i < link_cnt; i++) {
3142                 struct dc_link *link = NULL;
3143
3144                 if (i > AMDGPU_DM_MAX_DISPLAY_INDEX) {
3145                         DRM_ERROR(
3146                                 "KMS: Cannot support more than %d display indexes\n",
3147                                         AMDGPU_DM_MAX_DISPLAY_INDEX);
3148                         continue;
3149                 }
3150
3151                 aconnector = kzalloc(sizeof(*aconnector), GFP_KERNEL);
3152                 if (!aconnector)
3153                         goto fail;
3154
3155                 aencoder = kzalloc(sizeof(*aencoder), GFP_KERNEL);
3156                 if (!aencoder)
3157                         goto fail;
3158
3159                 if (amdgpu_dm_encoder_init(dm->ddev, aencoder, i)) {
3160                         DRM_ERROR("KMS: Failed to initialize encoder\n");
3161                         goto fail;
3162                 }
3163
3164                 if (amdgpu_dm_connector_init(dm, aconnector, i, aencoder)) {
3165                         DRM_ERROR("KMS: Failed to initialize connector\n");
3166                         goto fail;
3167                 }
3168
3169                 link = dc_get_link_at_index(dm->dc, i);
3170
3171                 if (!dc_link_detect_sink(link, &new_connection_type))
3172                         DRM_ERROR("KMS: Failed to detect connector\n");
3173
3174                 if (aconnector->base.force && new_connection_type == dc_connection_none) {
3175                         emulated_link_detect(link);
3176                         amdgpu_dm_update_connector_after_detect(aconnector);
3177
3178                 } else if (dc_link_detect(link, DETECT_REASON_BOOT)) {
3179                         amdgpu_dm_update_connector_after_detect(aconnector);
3180                         register_backlight_device(dm, link);
3181                         if (amdgpu_dc_feature_mask & DC_PSR_MASK)
3182                                 amdgpu_dm_set_psr_caps(link);
3183                 }
3184
3185
3186         }
3187
3188         /* Software is initialized. Now we can register interrupt handlers. */
3189         switch (adev->asic_type) {
3190         case CHIP_BONAIRE:
3191         case CHIP_HAWAII:
3192         case CHIP_KAVERI:
3193         case CHIP_KABINI:
3194         case CHIP_MULLINS:
3195         case CHIP_TONGA:
3196         case CHIP_FIJI:
3197         case CHIP_CARRIZO:
3198         case CHIP_STONEY:
3199         case CHIP_POLARIS11:
3200         case CHIP_POLARIS10:
3201         case CHIP_POLARIS12:
3202         case CHIP_VEGAM:
3203         case CHIP_VEGA10:
3204         case CHIP_VEGA12:
3205         case CHIP_VEGA20:
3206                 if (dce110_register_irq_handlers(dm->adev)) {
3207                         DRM_ERROR("DM: Failed to initialize IRQ\n");
3208                         goto fail;
3209                 }
3210                 break;
3211 #if defined(CONFIG_DRM_AMD_DC_DCN)
3212         case CHIP_RAVEN:
3213         case CHIP_NAVI12:
3214         case CHIP_NAVI10:
3215         case CHIP_NAVI14:
3216         case CHIP_RENOIR:
3217 #if defined(CONFIG_DRM_AMD_DC_DCN3_0)
3218         case CHIP_SIENNA_CICHLID:
3219 #endif
3220                 if (dcn10_register_irq_handlers(dm->adev)) {
3221                         DRM_ERROR("DM: Failed to initialize IRQ\n");
3222                         goto fail;
3223                 }
3224                 break;
3225 #endif
3226         default:
3227                 DRM_ERROR("Unsupported ASIC type: 0x%X\n", adev->asic_type);
3228                 goto fail;
3229         }
3230
3231         /* No userspace support. */
3232         dm->dc->debug.disable_tri_buf = true;
3233
3234         return 0;
3235 fail:
3236         kfree(aencoder);
3237         kfree(aconnector);
3238
3239         return -EINVAL;
3240 }
3241
3242 static void amdgpu_dm_destroy_drm_device(struct amdgpu_display_manager *dm)
3243 {
3244         drm_mode_config_cleanup(dm->ddev);
3245         drm_atomic_private_obj_fini(&dm->atomic_obj);
3246         return;
3247 }
3248
3249 /******************************************************************************
3250  * amdgpu_display_funcs functions
3251  *****************************************************************************/
3252
3253 /*
3254  * dm_bandwidth_update - program display watermarks
3255  *
3256  * @adev: amdgpu_device pointer
3257  *
3258  * Calculate and program the display watermarks and line buffer allocation.
3259  */
3260 static void dm_bandwidth_update(struct amdgpu_device *adev)
3261 {
3262         /* TODO: implement later */
3263 }
3264
3265 static const struct amdgpu_display_funcs dm_display_funcs = {
3266         .bandwidth_update = dm_bandwidth_update, /* called unconditionally */
3267         .vblank_get_counter = dm_vblank_get_counter,/* called unconditionally */
3268         .backlight_set_level = NULL, /* never called for DC */
3269         .backlight_get_level = NULL, /* never called for DC */
3270         .hpd_sense = NULL,/* called unconditionally */
3271         .hpd_set_polarity = NULL, /* called unconditionally */
3272         .hpd_get_gpio_reg = NULL, /* VBIOS parsing. DAL does it. */
3273         .page_flip_get_scanoutpos =
3274                 dm_crtc_get_scanoutpos,/* called unconditionally */
3275         .add_encoder = NULL, /* VBIOS parsing. DAL does it. */
3276         .add_connector = NULL, /* VBIOS parsing. DAL does it. */
3277 };
3278
3279 #if defined(CONFIG_DEBUG_KERNEL_DC)
3280
3281 static ssize_t s3_debug_store(struct device *device,
3282                               struct device_attribute *attr,
3283                               const char *buf,
3284                               size_t count)
3285 {
3286         int ret;
3287         int s3_state;
3288         struct drm_device *drm_dev = dev_get_drvdata(device);
3289         struct amdgpu_device *adev = drm_dev->dev_private;
3290
3291         ret = kstrtoint(buf, 0, &s3_state);
3292
3293         if (ret == 0) {
3294                 if (s3_state) {
3295                         dm_resume(adev);
3296                         drm_kms_helper_hotplug_event(adev->ddev);
3297                 } else
3298                         dm_suspend(adev);
3299         }
3300
3301         return ret == 0 ? count : 0;
3302 }
3303
3304 DEVICE_ATTR_WO(s3_debug);
3305
3306 #endif
3307
3308 static int dm_early_init(void *handle)
3309 {
3310         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
3311
3312         switch (adev->asic_type) {
3313         case CHIP_BONAIRE:
3314         case CHIP_HAWAII:
3315                 adev->mode_info.num_crtc = 6;
3316                 adev->mode_info.num_hpd = 6;
3317                 adev->mode_info.num_dig = 6;
3318                 break;
3319         case CHIP_KAVERI:
3320                 adev->mode_info.num_crtc = 4;
3321                 adev->mode_info.num_hpd = 6;
3322                 adev->mode_info.num_dig = 7;
3323                 break;
3324         case CHIP_KABINI:
3325         case CHIP_MULLINS:
3326                 adev->mode_info.num_crtc = 2;
3327                 adev->mode_info.num_hpd = 6;
3328                 adev->mode_info.num_dig = 6;
3329                 break;
3330         case CHIP_FIJI:
3331         case CHIP_TONGA:
3332                 adev->mode_info.num_crtc = 6;
3333                 adev->mode_info.num_hpd = 6;
3334                 adev->mode_info.num_dig = 7;
3335                 break;
3336         case CHIP_CARRIZO:
3337                 adev->mode_info.num_crtc = 3;
3338                 adev->mode_info.num_hpd = 6;
3339                 adev->mode_info.num_dig = 9;
3340                 break;
3341         case CHIP_STONEY:
3342                 adev->mode_info.num_crtc = 2;
3343                 adev->mode_info.num_hpd = 6;
3344                 adev->mode_info.num_dig = 9;
3345                 break;
3346         case CHIP_POLARIS11:
3347         case CHIP_POLARIS12:
3348                 adev->mode_info.num_crtc = 5;
3349                 adev->mode_info.num_hpd = 5;
3350                 adev->mode_info.num_dig = 5;
3351                 break;
3352         case CHIP_POLARIS10:
3353         case CHIP_VEGAM:
3354                 adev->mode_info.num_crtc = 6;
3355                 adev->mode_info.num_hpd = 6;
3356                 adev->mode_info.num_dig = 6;
3357                 break;
3358         case CHIP_VEGA10:
3359         case CHIP_VEGA12:
3360         case CHIP_VEGA20:
3361                 adev->mode_info.num_crtc = 6;
3362                 adev->mode_info.num_hpd = 6;
3363                 adev->mode_info.num_dig = 6;
3364                 break;
3365 #if defined(CONFIG_DRM_AMD_DC_DCN)
3366         case CHIP_RAVEN:
3367                 adev->mode_info.num_crtc = 4;
3368                 adev->mode_info.num_hpd = 4;
3369                 adev->mode_info.num_dig = 4;
3370                 break;
3371 #endif
3372         case CHIP_NAVI10:
3373         case CHIP_NAVI12:
3374 #if defined(CONFIG_DRM_AMD_DC_DCN3_0)
3375         case CHIP_SIENNA_CICHLID:
3376 #endif
3377                 adev->mode_info.num_crtc = 6;
3378                 adev->mode_info.num_hpd = 6;
3379                 adev->mode_info.num_dig = 6;
3380                 break;
3381         case CHIP_NAVI14:
3382                 adev->mode_info.num_crtc = 5;
3383                 adev->mode_info.num_hpd = 5;
3384                 adev->mode_info.num_dig = 5;
3385                 break;
3386         case CHIP_RENOIR:
3387                 adev->mode_info.num_crtc = 4;
3388                 adev->mode_info.num_hpd = 4;
3389                 adev->mode_info.num_dig = 4;
3390                 break;
3391         default:
3392                 DRM_ERROR("Unsupported ASIC type: 0x%X\n", adev->asic_type);
3393                 return -EINVAL;
3394         }
3395
3396         amdgpu_dm_set_irq_funcs(adev);
3397
3398         if (adev->mode_info.funcs == NULL)
3399                 adev->mode_info.funcs = &dm_display_funcs;
3400
3401         /*
3402          * Note: Do NOT change adev->audio_endpt_rreg and
3403          * adev->audio_endpt_wreg because they are initialised in
3404          * amdgpu_device_init()
3405          */
3406 #if defined(CONFIG_DEBUG_KERNEL_DC)
3407         device_create_file(
3408                 adev->ddev->dev,
3409                 &dev_attr_s3_debug);
3410 #endif
3411
3412         return 0;
3413 }
3414
3415 static bool modeset_required(struct drm_crtc_state *crtc_state,
3416                              struct dc_stream_state *new_stream,
3417                              struct dc_stream_state *old_stream)
3418 {
3419         if (!drm_atomic_crtc_needs_modeset(crtc_state))
3420                 return false;
3421
3422         if (!crtc_state->enable)
3423                 return false;
3424
3425         return crtc_state->active;
3426 }
3427
3428 static bool modereset_required(struct drm_crtc_state *crtc_state)
3429 {
3430         if (!drm_atomic_crtc_needs_modeset(crtc_state))
3431                 return false;
3432
3433         return !crtc_state->enable || !crtc_state->active;
3434 }
3435
3436 static void amdgpu_dm_encoder_destroy(struct drm_encoder *encoder)
3437 {
3438         drm_encoder_cleanup(encoder);
3439         kfree(encoder);
3440 }
3441
3442 static const struct drm_encoder_funcs amdgpu_dm_encoder_funcs = {
3443         .destroy = amdgpu_dm_encoder_destroy,
3444 };
3445
3446
3447 static int fill_dc_scaling_info(const struct drm_plane_state *state,
3448                                 struct dc_scaling_info *scaling_info)
3449 {
3450         int scale_w, scale_h;
3451
3452         memset(scaling_info, 0, sizeof(*scaling_info));
3453
3454         /* Source is fixed 16.16 but we ignore mantissa for now... */
3455         scaling_info->src_rect.x = state->src_x >> 16;
3456         scaling_info->src_rect.y = state->src_y >> 16;
3457
3458         scaling_info->src_rect.width = state->src_w >> 16;
3459         if (scaling_info->src_rect.width == 0)
3460                 return -EINVAL;
3461
3462         scaling_info->src_rect.height = state->src_h >> 16;
3463         if (scaling_info->src_rect.height == 0)
3464                 return -EINVAL;
3465
3466         scaling_info->dst_rect.x = state->crtc_x;
3467         scaling_info->dst_rect.y = state->crtc_y;
3468
3469         if (state->crtc_w == 0)
3470                 return -EINVAL;
3471
3472         scaling_info->dst_rect.width = state->crtc_w;
3473
3474         if (state->crtc_h == 0)
3475                 return -EINVAL;
3476
3477         scaling_info->dst_rect.height = state->crtc_h;
3478
3479         /* DRM doesn't specify clipping on destination output. */
3480         scaling_info->clip_rect = scaling_info->dst_rect;
3481
3482         /* TODO: Validate scaling per-format with DC plane caps */
3483         scale_w = scaling_info->dst_rect.width * 1000 /
3484                   scaling_info->src_rect.width;
3485
3486         if (scale_w < 250 || scale_w > 16000)
3487                 return -EINVAL;
3488
3489         scale_h = scaling_info->dst_rect.height * 1000 /
3490                   scaling_info->src_rect.height;
3491
3492         if (scale_h < 250 || scale_h > 16000)
3493                 return -EINVAL;
3494
3495         /*
3496          * The "scaling_quality" can be ignored for now, quality = 0 has DC
3497          * assume reasonable defaults based on the format.
3498          */
3499
3500         return 0;
3501 }
3502
3503 static int get_fb_info(const struct amdgpu_framebuffer *amdgpu_fb,
3504                        uint64_t *tiling_flags, bool *tmz_surface)
3505 {
3506         struct amdgpu_bo *rbo = gem_to_amdgpu_bo(amdgpu_fb->base.obj[0]);
3507         int r = amdgpu_bo_reserve(rbo, false);
3508
3509         if (unlikely(r)) {
3510                 /* Don't show error message when returning -ERESTARTSYS */
3511                 if (r != -ERESTARTSYS)
3512                         DRM_ERROR("Unable to reserve buffer: %d\n", r);
3513                 return r;
3514         }
3515
3516         if (tiling_flags)
3517                 amdgpu_bo_get_tiling_flags(rbo, tiling_flags);
3518
3519         if (tmz_surface)
3520                 *tmz_surface = amdgpu_bo_encrypted(rbo);
3521
3522         amdgpu_bo_unreserve(rbo);
3523
3524         return r;
3525 }
3526
3527 static inline uint64_t get_dcc_address(uint64_t address, uint64_t tiling_flags)
3528 {
3529         uint32_t offset = AMDGPU_TILING_GET(tiling_flags, DCC_OFFSET_256B);
3530
3531         return offset ? (address + offset * 256) : 0;
3532 }
3533
3534 static int
3535 fill_plane_dcc_attributes(struct amdgpu_device *adev,
3536                           const struct amdgpu_framebuffer *afb,
3537                           const enum surface_pixel_format format,
3538                           const enum dc_rotation_angle rotation,
3539                           const struct plane_size *plane_size,
3540                           const union dc_tiling_info *tiling_info,
3541                           const uint64_t info,
3542                           struct dc_plane_dcc_param *dcc,
3543                           struct dc_plane_address *address,
3544                           bool force_disable_dcc)
3545 {
3546         struct dc *dc = adev->dm.dc;
3547         struct dc_dcc_surface_param input;
3548         struct dc_surface_dcc_cap output;
3549         uint32_t offset = AMDGPU_TILING_GET(info, DCC_OFFSET_256B);
3550         uint32_t i64b = AMDGPU_TILING_GET(info, DCC_INDEPENDENT_64B) != 0;
3551         uint64_t dcc_address;
3552
3553         memset(&input, 0, sizeof(input));
3554         memset(&output, 0, sizeof(output));
3555
3556         if (force_disable_dcc)
3557                 return 0;
3558
3559         if (!offset)
3560                 return 0;
3561
3562         if (format >= SURFACE_PIXEL_FORMAT_VIDEO_BEGIN)
3563                 return 0;
3564
3565         if (!dc->cap_funcs.get_dcc_compression_cap)
3566                 return -EINVAL;
3567
3568         input.format = format;
3569         input.surface_size.width = plane_size->surface_size.width;
3570         input.surface_size.height = plane_size->surface_size.height;
3571         input.swizzle_mode = tiling_info->gfx9.swizzle;
3572
3573         if (rotation == ROTATION_ANGLE_0 || rotation == ROTATION_ANGLE_180)
3574                 input.scan = SCAN_DIRECTION_HORIZONTAL;
3575         else if (rotation == ROTATION_ANGLE_90 || rotation == ROTATION_ANGLE_270)
3576                 input.scan = SCAN_DIRECTION_VERTICAL;
3577
3578         if (!dc->cap_funcs.get_dcc_compression_cap(dc, &input, &output))
3579                 return -EINVAL;
3580
3581         if (!output.capable)
3582                 return -EINVAL;
3583
3584         if (i64b == 0 && output.grph.rgb.independent_64b_blks != 0)
3585                 return -EINVAL;
3586
3587         dcc->enable = 1;
3588         dcc->meta_pitch =
3589                 AMDGPU_TILING_GET(info, DCC_PITCH_MAX) + 1;
3590         dcc->independent_64b_blks = i64b;
3591
3592         dcc_address = get_dcc_address(afb->address, info);
3593         address->grph.meta_addr.low_part = lower_32_bits(dcc_address);
3594         address->grph.meta_addr.high_part = upper_32_bits(dcc_address);
3595
3596         return 0;
3597 }
3598
3599 static int
3600 fill_plane_buffer_attributes(struct amdgpu_device *adev,
3601                              const struct amdgpu_framebuffer *afb,
3602                              const enum surface_pixel_format format,
3603                              const enum dc_rotation_angle rotation,
3604                              const uint64_t tiling_flags,
3605                              union dc_tiling_info *tiling_info,
3606                              struct plane_size *plane_size,
3607                              struct dc_plane_dcc_param *dcc,
3608                              struct dc_plane_address *address,
3609                              bool tmz_surface,
3610                              bool force_disable_dcc)
3611 {
3612         const struct drm_framebuffer *fb = &afb->base;
3613         int ret;
3614
3615         memset(tiling_info, 0, sizeof(*tiling_info));
3616         memset(plane_size, 0, sizeof(*plane_size));
3617         memset(dcc, 0, sizeof(*dcc));
3618         memset(address, 0, sizeof(*address));
3619
3620         address->tmz_surface = tmz_surface;
3621
3622         if (format < SURFACE_PIXEL_FORMAT_VIDEO_BEGIN) {
3623                 plane_size->surface_size.x = 0;
3624                 plane_size->surface_size.y = 0;
3625                 plane_size->surface_size.width = fb->width;
3626                 plane_size->surface_size.height = fb->height;
3627                 plane_size->surface_pitch =
3628                         fb->pitches[0] / fb->format->cpp[0];
3629
3630                 address->type = PLN_ADDR_TYPE_GRAPHICS;
3631                 address->grph.addr.low_part = lower_32_bits(afb->address);
3632                 address->grph.addr.high_part = upper_32_bits(afb->address);
3633         } else if (format < SURFACE_PIXEL_FORMAT_INVALID) {
3634                 uint64_t chroma_addr = afb->address + fb->offsets[1];
3635
3636                 plane_size->surface_size.x = 0;
3637                 plane_size->surface_size.y = 0;
3638                 plane_size->surface_size.width = fb->width;
3639                 plane_size->surface_size.height = fb->height;
3640                 plane_size->surface_pitch =
3641                         fb->pitches[0] / fb->format->cpp[0];
3642
3643                 plane_size->chroma_size.x = 0;
3644                 plane_size->chroma_size.y = 0;
3645                 /* TODO: set these based on surface format */
3646                 plane_size->chroma_size.width = fb->width / 2;
3647                 plane_size->chroma_size.height = fb->height / 2;
3648
3649                 plane_size->chroma_pitch =
3650                         fb->pitches[1] / fb->format->cpp[1];
3651
3652                 address->type = PLN_ADDR_TYPE_VIDEO_PROGRESSIVE;
3653                 address->video_progressive.luma_addr.low_part =
3654                         lower_32_bits(afb->address);
3655                 address->video_progressive.luma_addr.high_part =
3656                         upper_32_bits(afb->address);
3657                 address->video_progressive.chroma_addr.low_part =
3658                         lower_32_bits(chroma_addr);
3659                 address->video_progressive.chroma_addr.high_part =
3660                         upper_32_bits(chroma_addr);
3661         }
3662
3663         /* Fill GFX8 params */
3664         if (AMDGPU_TILING_GET(tiling_flags, ARRAY_MODE) == DC_ARRAY_2D_TILED_THIN1) {
3665                 unsigned int bankw, bankh, mtaspect, tile_split, num_banks;
3666
3667                 bankw = AMDGPU_TILING_GET(tiling_flags, BANK_WIDTH);
3668                 bankh = AMDGPU_TILING_GET(tiling_flags, BANK_HEIGHT);
3669                 mtaspect = AMDGPU_TILING_GET(tiling_flags, MACRO_TILE_ASPECT);
3670                 tile_split = AMDGPU_TILING_GET(tiling_flags, TILE_SPLIT);
3671                 num_banks = AMDGPU_TILING_GET(tiling_flags, NUM_BANKS);
3672
3673                 /* XXX fix me for VI */
3674                 tiling_info->gfx8.num_banks = num_banks;
3675                 tiling_info->gfx8.array_mode =
3676                                 DC_ARRAY_2D_TILED_THIN1;
3677                 tiling_info->gfx8.tile_split = tile_split;
3678                 tiling_info->gfx8.bank_width = bankw;
3679                 tiling_info->gfx8.bank_height = bankh;
3680                 tiling_info->gfx8.tile_aspect = mtaspect;
3681                 tiling_info->gfx8.tile_mode =
3682                                 DC_ADDR_SURF_MICRO_TILING_DISPLAY;
3683         } else if (AMDGPU_TILING_GET(tiling_flags, ARRAY_MODE)
3684                         == DC_ARRAY_1D_TILED_THIN1) {
3685                 tiling_info->gfx8.array_mode = DC_ARRAY_1D_TILED_THIN1;
3686         }
3687
3688         tiling_info->gfx8.pipe_config =
3689                         AMDGPU_TILING_GET(tiling_flags, PIPE_CONFIG);
3690
3691         if (adev->asic_type == CHIP_VEGA10 ||
3692             adev->asic_type == CHIP_VEGA12 ||
3693             adev->asic_type == CHIP_VEGA20 ||
3694             adev->asic_type == CHIP_NAVI10 ||
3695             adev->asic_type == CHIP_NAVI14 ||
3696             adev->asic_type == CHIP_NAVI12 ||
3697 #if defined(CONFIG_DRM_AMD_DC_DCN3_0)
3698                 adev->asic_type == CHIP_SIENNA_CICHLID ||
3699 #endif
3700             adev->asic_type == CHIP_RENOIR ||
3701             adev->asic_type == CHIP_RAVEN) {
3702                 /* Fill GFX9 params */
3703                 tiling_info->gfx9.num_pipes =
3704                         adev->gfx.config.gb_addr_config_fields.num_pipes;
3705                 tiling_info->gfx9.num_banks =
3706                         adev->gfx.config.gb_addr_config_fields.num_banks;
3707                 tiling_info->gfx9.pipe_interleave =
3708                         adev->gfx.config.gb_addr_config_fields.pipe_interleave_size;
3709                 tiling_info->gfx9.num_shader_engines =
3710                         adev->gfx.config.gb_addr_config_fields.num_se;
3711                 tiling_info->gfx9.max_compressed_frags =
3712                         adev->gfx.config.gb_addr_config_fields.max_compress_frags;
3713                 tiling_info->gfx9.num_rb_per_se =
3714                         adev->gfx.config.gb_addr_config_fields.num_rb_per_se;
3715                 tiling_info->gfx9.swizzle =
3716                         AMDGPU_TILING_GET(tiling_flags, SWIZZLE_MODE);
3717                 tiling_info->gfx9.shaderEnable = 1;
3718
3719 #ifdef CONFIG_DRM_AMD_DC_DCN3_0
3720                 if (adev->asic_type == CHIP_SIENNA_CICHLID)
3721                         tiling_info->gfx9.num_pkrs = adev->gfx.config.gb_addr_config_fields.num_pkrs;
3722
3723 #endif
3724                 ret = fill_plane_dcc_attributes(adev, afb, format, rotation,
3725                                                 plane_size, tiling_info,
3726                                                 tiling_flags, dcc, address,
3727                                                 force_disable_dcc);
3728                 if (ret)
3729                         return ret;
3730         }
3731
3732         return 0;
3733 }
3734
3735 static void
3736 fill_blending_from_plane_state(const struct drm_plane_state *plane_state,
3737                                bool *per_pixel_alpha, bool *global_alpha,
3738                                int *global_alpha_value)
3739 {
3740         *per_pixel_alpha = false;
3741         *global_alpha = false;
3742         *global_alpha_value = 0xff;
3743
3744         if (plane_state->plane->type != DRM_PLANE_TYPE_OVERLAY)
3745                 return;
3746
3747         if (plane_state->pixel_blend_mode == DRM_MODE_BLEND_PREMULTI) {
3748                 static const uint32_t alpha_formats[] = {
3749                         DRM_FORMAT_ARGB8888,
3750                         DRM_FORMAT_RGBA8888,
3751                         DRM_FORMAT_ABGR8888,
3752                 };
3753                 uint32_t format = plane_state->fb->format->format;
3754                 unsigned int i;
3755
3756                 for (i = 0; i < ARRAY_SIZE(alpha_formats); ++i) {
3757                         if (format == alpha_formats[i]) {
3758                                 *per_pixel_alpha = true;
3759                                 break;
3760                         }
3761                 }
3762         }
3763
3764         if (plane_state->alpha < 0xffff) {
3765                 *global_alpha = true;
3766                 *global_alpha_value = plane_state->alpha >> 8;
3767         }
3768 }
3769
3770 static int
3771 fill_plane_color_attributes(const struct drm_plane_state *plane_state,
3772                             const enum surface_pixel_format format,
3773                             enum dc_color_space *color_space)
3774 {
3775         bool full_range;
3776
3777         *color_space = COLOR_SPACE_SRGB;
3778
3779         /* DRM color properties only affect non-RGB formats. */
3780         if (format < SURFACE_PIXEL_FORMAT_VIDEO_BEGIN)
3781                 return 0;
3782
3783         full_range = (plane_state->color_range == DRM_COLOR_YCBCR_FULL_RANGE);
3784
3785         switch (plane_state->color_encoding) {
3786         case DRM_COLOR_YCBCR_BT601:
3787                 if (full_range)
3788                         *color_space = COLOR_SPACE_YCBCR601;
3789                 else
3790                         *color_space = COLOR_SPACE_YCBCR601_LIMITED;
3791                 break;
3792
3793         case DRM_COLOR_YCBCR_BT709:
3794                 if (full_range)
3795                         *color_space = COLOR_SPACE_YCBCR709;
3796                 else
3797                         *color_space = COLOR_SPACE_YCBCR709_LIMITED;
3798                 break;
3799
3800         case DRM_COLOR_YCBCR_BT2020:
3801                 if (full_range)
3802                         *color_space = COLOR_SPACE_2020_YCBCR;
3803                 else
3804                         return -EINVAL;
3805                 break;
3806
3807         default:
3808                 return -EINVAL;
3809         }
3810
3811         return 0;
3812 }
3813
3814 static int
3815 fill_dc_plane_info_and_addr(struct amdgpu_device *adev,
3816                             const struct drm_plane_state *plane_state,
3817                             const uint64_t tiling_flags,
3818                             struct dc_plane_info *plane_info,
3819                             struct dc_plane_address *address,
3820                             bool tmz_surface,
3821                             bool force_disable_dcc)
3822 {
3823         const struct drm_framebuffer *fb = plane_state->fb;
3824         const struct amdgpu_framebuffer *afb =
3825                 to_amdgpu_framebuffer(plane_state->fb);
3826         struct drm_format_name_buf format_name;
3827         int ret;
3828
3829         memset(plane_info, 0, sizeof(*plane_info));
3830
3831         switch (fb->format->format) {
3832         case DRM_FORMAT_C8:
3833                 plane_info->format =
3834                         SURFACE_PIXEL_FORMAT_GRPH_PALETA_256_COLORS;
3835                 break;
3836         case DRM_FORMAT_RGB565:
3837                 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_RGB565;
3838                 break;
3839         case DRM_FORMAT_XRGB8888:
3840         case DRM_FORMAT_ARGB8888:
3841                 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ARGB8888;
3842                 break;
3843         case DRM_FORMAT_XRGB2101010:
3844         case DRM_FORMAT_ARGB2101010:
3845                 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ARGB2101010;
3846                 break;
3847         case DRM_FORMAT_XBGR2101010:
3848         case DRM_FORMAT_ABGR2101010:
3849                 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ABGR2101010;
3850                 break;
3851         case DRM_FORMAT_XBGR8888:
3852         case DRM_FORMAT_ABGR8888:
3853                 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ABGR8888;
3854                 break;
3855         case DRM_FORMAT_NV21:
3856                 plane_info->format = SURFACE_PIXEL_FORMAT_VIDEO_420_YCbCr;
3857                 break;
3858         case DRM_FORMAT_NV12:
3859                 plane_info->format = SURFACE_PIXEL_FORMAT_VIDEO_420_YCrCb;
3860                 break;
3861         case DRM_FORMAT_P010:
3862                 plane_info->format = SURFACE_PIXEL_FORMAT_VIDEO_420_10bpc_YCrCb;
3863                 break;
3864         case DRM_FORMAT_XRGB16161616F:
3865         case DRM_FORMAT_ARGB16161616F:
3866                 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ARGB16161616F;
3867                 break;
3868         case DRM_FORMAT_XBGR16161616F:
3869         case DRM_FORMAT_ABGR16161616F:
3870                 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ABGR16161616F;
3871                 break;
3872         default:
3873                 DRM_ERROR(
3874                         "Unsupported screen format %s\n",
3875                         drm_get_format_name(fb->format->format, &format_name));
3876                 return -EINVAL;
3877         }
3878
3879         switch (plane_state->rotation & DRM_MODE_ROTATE_MASK) {
3880         case DRM_MODE_ROTATE_0:
3881                 plane_info->rotation = ROTATION_ANGLE_0;
3882                 break;
3883         case DRM_MODE_ROTATE_90:
3884                 plane_info->rotation = ROTATION_ANGLE_90;
3885                 break;
3886         case DRM_MODE_ROTATE_180:
3887                 plane_info->rotation = ROTATION_ANGLE_180;
3888                 break;
3889         case DRM_MODE_ROTATE_270:
3890                 plane_info->rotation = ROTATION_ANGLE_270;
3891                 break;
3892         default:
3893                 plane_info->rotation = ROTATION_ANGLE_0;
3894                 break;
3895         }
3896
3897         plane_info->visible = true;
3898         plane_info->stereo_format = PLANE_STEREO_FORMAT_NONE;
3899
3900         plane_info->layer_index = 0;
3901
3902         ret = fill_plane_color_attributes(plane_state, plane_info->format,
3903                                           &plane_info->color_space);
3904         if (ret)
3905                 return ret;
3906
3907         ret = fill_plane_buffer_attributes(adev, afb, plane_info->format,
3908                                            plane_info->rotation, tiling_flags,
3909                                            &plane_info->tiling_info,
3910                                            &plane_info->plane_size,
3911                                            &plane_info->dcc, address, tmz_surface,
3912                                            force_disable_dcc);
3913         if (ret)
3914                 return ret;
3915
3916         fill_blending_from_plane_state(
3917                 plane_state, &plane_info->per_pixel_alpha,
3918                 &plane_info->global_alpha, &plane_info->global_alpha_value);
3919
3920         return 0;
3921 }
3922
3923 static int fill_dc_plane_attributes(struct amdgpu_device *adev,
3924                                     struct dc_plane_state *dc_plane_state,
3925                                     struct drm_plane_state *plane_state,
3926                                     struct drm_crtc_state *crtc_state)
3927 {
3928         struct dm_crtc_state *dm_crtc_state = to_dm_crtc_state(crtc_state);
3929         const struct amdgpu_framebuffer *amdgpu_fb =
3930                 to_amdgpu_framebuffer(plane_state->fb);
3931         struct dc_scaling_info scaling_info;
3932         struct dc_plane_info plane_info;
3933         uint64_t tiling_flags;
3934         int ret;
3935         bool tmz_surface = false;
3936         bool force_disable_dcc = false;
3937
3938         ret = fill_dc_scaling_info(plane_state, &scaling_info);
3939         if (ret)
3940                 return ret;
3941
3942         dc_plane_state->src_rect = scaling_info.src_rect;
3943         dc_plane_state->dst_rect = scaling_info.dst_rect;
3944         dc_plane_state->clip_rect = scaling_info.clip_rect;
3945         dc_plane_state->scaling_quality = scaling_info.scaling_quality;
3946
3947         ret = get_fb_info(amdgpu_fb, &tiling_flags, &tmz_surface);
3948         if (ret)
3949                 return ret;
3950
3951         force_disable_dcc = adev->asic_type == CHIP_RAVEN && adev->in_suspend;
3952         ret = fill_dc_plane_info_and_addr(adev, plane_state, tiling_flags,
3953                                           &plane_info,
3954                                           &dc_plane_state->address,
3955                                           tmz_surface,
3956                                           force_disable_dcc);
3957         if (ret)
3958                 return ret;
3959
3960         dc_plane_state->format = plane_info.format;
3961         dc_plane_state->color_space = plane_info.color_space;
3962         dc_plane_state->format = plane_info.format;
3963         dc_plane_state->plane_size = plane_info.plane_size;
3964         dc_plane_state->rotation = plane_info.rotation;
3965         dc_plane_state->horizontal_mirror = plane_info.horizontal_mirror;
3966         dc_plane_state->stereo_format = plane_info.stereo_format;
3967         dc_plane_state->tiling_info = plane_info.tiling_info;
3968         dc_plane_state->visible = plane_info.visible;
3969         dc_plane_state->per_pixel_alpha = plane_info.per_pixel_alpha;
3970         dc_plane_state->global_alpha = plane_info.global_alpha;
3971         dc_plane_state->global_alpha_value = plane_info.global_alpha_value;
3972         dc_plane_state->dcc = plane_info.dcc;
3973         dc_plane_state->layer_index = plane_info.layer_index; // Always returns 0
3974
3975         /*
3976          * Always set input transfer function, since plane state is refreshed
3977          * every time.
3978          */
3979         ret = amdgpu_dm_update_plane_color_mgmt(dm_crtc_state, dc_plane_state);
3980         if (ret)
3981                 return ret;
3982
3983         return 0;
3984 }
3985
3986 static void update_stream_scaling_settings(const struct drm_display_mode *mode,
3987                                            const struct dm_connector_state *dm_state,
3988                                            struct dc_stream_state *stream)
3989 {
3990         enum amdgpu_rmx_type rmx_type;
3991
3992         struct rect src = { 0 }; /* viewport in composition space*/
3993         struct rect dst = { 0 }; /* stream addressable area */
3994
3995         /* no mode. nothing to be done */
3996         if (!mode)
3997                 return;
3998
3999         /* Full screen scaling by default */
4000         src.width = mode->hdisplay;
4001         src.height = mode->vdisplay;
4002         dst.width = stream->timing.h_addressable;
4003         dst.height = stream->timing.v_addressable;
4004
4005         if (dm_state) {
4006                 rmx_type = dm_state->scaling;
4007                 if (rmx_type == RMX_ASPECT || rmx_type == RMX_OFF) {
4008                         if (src.width * dst.height <
4009                                         src.height * dst.width) {
4010                                 /* height needs less upscaling/more downscaling */
4011                                 dst.width = src.width *
4012                                                 dst.height / src.height;
4013                         } else {
4014                                 /* width needs less upscaling/more downscaling */
4015                                 dst.height = src.height *
4016                                                 dst.width / src.width;
4017                         }
4018                 } else if (rmx_type == RMX_CENTER) {
4019                         dst = src;
4020                 }
4021
4022                 dst.x = (stream->timing.h_addressable - dst.width) / 2;
4023                 dst.y = (stream->timing.v_addressable - dst.height) / 2;
4024
4025                 if (dm_state->underscan_enable) {
4026                         dst.x += dm_state->underscan_hborder / 2;
4027                         dst.y += dm_state->underscan_vborder / 2;
4028                         dst.width -= dm_state->underscan_hborder;
4029                         dst.height -= dm_state->underscan_vborder;
4030                 }
4031         }
4032
4033         stream->src = src;
4034         stream->dst = dst;
4035
4036         DRM_DEBUG_DRIVER("Destination Rectangle x:%d  y:%d  width:%d  height:%d\n",
4037                         dst.x, dst.y, dst.width, dst.height);
4038
4039 }
4040
4041 static enum dc_color_depth
4042 convert_color_depth_from_display_info(const struct drm_connector *connector,
4043                                       bool is_y420, int requested_bpc)
4044 {
4045         uint8_t bpc;
4046
4047         if (is_y420) {
4048                 bpc = 8;
4049
4050                 /* Cap display bpc based on HDMI 2.0 HF-VSDB */
4051                 if (connector->display_info.hdmi.y420_dc_modes & DRM_EDID_YCBCR420_DC_48)
4052                         bpc = 16;
4053                 else if (connector->display_info.hdmi.y420_dc_modes & DRM_EDID_YCBCR420_DC_36)
4054                         bpc = 12;
4055                 else if (connector->display_info.hdmi.y420_dc_modes & DRM_EDID_YCBCR420_DC_30)
4056                         bpc = 10;
4057         } else {
4058                 bpc = (uint8_t)connector->display_info.bpc;
4059                 /* Assume 8 bpc by default if no bpc is specified. */
4060                 bpc = bpc ? bpc : 8;
4061         }
4062
4063         if (requested_bpc > 0) {
4064                 /*
4065                  * Cap display bpc based on the user requested value.
4066                  *
4067                  * The value for state->max_bpc may not correctly updated
4068                  * depending on when the connector gets added to the state
4069                  * or if this was called outside of atomic check, so it
4070                  * can't be used directly.
4071                  */
4072                 bpc = min_t(u8, bpc, requested_bpc);
4073
4074                 /* Round down to the nearest even number. */
4075                 bpc = bpc - (bpc & 1);
4076         }
4077
4078         switch (bpc) {
4079         case 0:
4080                 /*
4081                  * Temporary Work around, DRM doesn't parse color depth for
4082                  * EDID revision before 1.4
4083                  * TODO: Fix edid parsing
4084                  */
4085                 return COLOR_DEPTH_888;
4086         case 6:
4087                 return COLOR_DEPTH_666;
4088         case 8:
4089                 return COLOR_DEPTH_888;
4090         case 10:
4091                 return COLOR_DEPTH_101010;
4092         case 12:
4093                 return COLOR_DEPTH_121212;
4094         case 14:
4095                 return COLOR_DEPTH_141414;
4096         case 16:
4097                 return COLOR_DEPTH_161616;
4098         default:
4099                 return COLOR_DEPTH_UNDEFINED;
4100         }
4101 }
4102
4103 static enum dc_aspect_ratio
4104 get_aspect_ratio(const struct drm_display_mode *mode_in)
4105 {
4106         /* 1-1 mapping, since both enums follow the HDMI spec. */
4107         return (enum dc_aspect_ratio) mode_in->picture_aspect_ratio;
4108 }
4109
4110 static enum dc_color_space
4111 get_output_color_space(const struct dc_crtc_timing *dc_crtc_timing)
4112 {
4113         enum dc_color_space color_space = COLOR_SPACE_SRGB;
4114
4115         switch (dc_crtc_timing->pixel_encoding) {
4116         case PIXEL_ENCODING_YCBCR422:
4117         case PIXEL_ENCODING_YCBCR444:
4118         case PIXEL_ENCODING_YCBCR420:
4119         {
4120                 /*
4121                  * 27030khz is the separation point between HDTV and SDTV
4122                  * according to HDMI spec, we use YCbCr709 and YCbCr601
4123                  * respectively
4124                  */
4125                 if (dc_crtc_timing->pix_clk_100hz > 270300) {
4126                         if (dc_crtc_timing->flags.Y_ONLY)
4127                                 color_space =
4128                                         COLOR_SPACE_YCBCR709_LIMITED;
4129                         else
4130                                 color_space = COLOR_SPACE_YCBCR709;
4131                 } else {
4132                         if (dc_crtc_timing->flags.Y_ONLY)
4133                                 color_space =
4134                                         COLOR_SPACE_YCBCR601_LIMITED;
4135                         else
4136                                 color_space = COLOR_SPACE_YCBCR601;
4137                 }
4138
4139         }
4140         break;
4141         case PIXEL_ENCODING_RGB:
4142                 color_space = COLOR_SPACE_SRGB;
4143                 break;
4144
4145         default:
4146                 WARN_ON(1);
4147                 break;
4148         }
4149
4150         return color_space;
4151 }
4152
4153 static bool adjust_colour_depth_from_display_info(
4154         struct dc_crtc_timing *timing_out,
4155         const struct drm_display_info *info)
4156 {
4157         enum dc_color_depth depth = timing_out->display_color_depth;
4158         int normalized_clk;
4159         do {
4160                 normalized_clk = timing_out->pix_clk_100hz / 10;
4161                 /* YCbCr 4:2:0 requires additional adjustment of 1/2 */
4162                 if (timing_out->pixel_encoding == PIXEL_ENCODING_YCBCR420)
4163                         normalized_clk /= 2;
4164                 /* Adjusting pix clock following on HDMI spec based on colour depth */
4165                 switch (depth) {
4166                 case COLOR_DEPTH_888:
4167                         break;
4168                 case COLOR_DEPTH_101010:
4169                         normalized_clk = (normalized_clk * 30) / 24;
4170                         break;
4171                 case COLOR_DEPTH_121212:
4172                         normalized_clk = (normalized_clk * 36) / 24;
4173                         break;
4174                 case COLOR_DEPTH_161616:
4175                         normalized_clk = (normalized_clk * 48) / 24;
4176                         break;
4177                 default:
4178                         /* The above depths are the only ones valid for HDMI. */
4179                         return false;
4180                 }
4181                 if (normalized_clk <= info->max_tmds_clock) {
4182                         timing_out->display_color_depth = depth;
4183                         return true;
4184                 }
4185         } while (--depth > COLOR_DEPTH_666);
4186         return false;
4187 }
4188
4189 static void fill_stream_properties_from_drm_display_mode(
4190         struct dc_stream_state *stream,
4191         const struct drm_display_mode *mode_in,
4192         const struct drm_connector *connector,
4193         const struct drm_connector_state *connector_state,
4194         const struct dc_stream_state *old_stream,
4195         int requested_bpc)
4196 {
4197         struct dc_crtc_timing *timing_out = &stream->timing;
4198         const struct drm_display_info *info = &connector->display_info;
4199         struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
4200         struct hdmi_vendor_infoframe hv_frame;
4201         struct hdmi_avi_infoframe avi_frame;
4202
4203         memset(&hv_frame, 0, sizeof(hv_frame));
4204         memset(&avi_frame, 0, sizeof(avi_frame));
4205
4206         timing_out->h_border_left = 0;
4207         timing_out->h_border_right = 0;
4208         timing_out->v_border_top = 0;
4209         timing_out->v_border_bottom = 0;
4210         /* TODO: un-hardcode */
4211         if (drm_mode_is_420_only(info, mode_in)
4212                         && stream->signal == SIGNAL_TYPE_HDMI_TYPE_A)
4213                 timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR420;
4214         else if (drm_mode_is_420_also(info, mode_in)
4215                         && aconnector->force_yuv420_output)
4216                 timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR420;
4217         else if ((connector->display_info.color_formats & DRM_COLOR_FORMAT_YCRCB444)
4218                         && stream->signal == SIGNAL_TYPE_HDMI_TYPE_A)
4219                 timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR444;
4220         else
4221                 timing_out->pixel_encoding = PIXEL_ENCODING_RGB;
4222
4223         timing_out->timing_3d_format = TIMING_3D_FORMAT_NONE;
4224         timing_out->display_color_depth = convert_color_depth_from_display_info(
4225                 connector,
4226                 (timing_out->pixel_encoding == PIXEL_ENCODING_YCBCR420),
4227                 requested_bpc);
4228         timing_out->scan_type = SCANNING_TYPE_NODATA;
4229         timing_out->hdmi_vic = 0;
4230
4231         if(old_stream) {
4232                 timing_out->vic = old_stream->timing.vic;
4233                 timing_out->flags.HSYNC_POSITIVE_POLARITY = old_stream->timing.flags.HSYNC_POSITIVE_POLARITY;
4234                 timing_out->flags.VSYNC_POSITIVE_POLARITY = old_stream->timing.flags.VSYNC_POSITIVE_POLARITY;
4235         } else {
4236                 timing_out->vic = drm_match_cea_mode(mode_in);
4237                 if (mode_in->flags & DRM_MODE_FLAG_PHSYNC)
4238                         timing_out->flags.HSYNC_POSITIVE_POLARITY = 1;
4239                 if (mode_in->flags & DRM_MODE_FLAG_PVSYNC)
4240                         timing_out->flags.VSYNC_POSITIVE_POLARITY = 1;
4241         }
4242
4243         if (stream->signal == SIGNAL_TYPE_HDMI_TYPE_A) {
4244                 drm_hdmi_avi_infoframe_from_display_mode(&avi_frame, (struct drm_connector *)connector, mode_in);
4245                 timing_out->vic = avi_frame.video_code;
4246                 drm_hdmi_vendor_infoframe_from_display_mode(&hv_frame, (struct drm_connector *)connector, mode_in);
4247                 timing_out->hdmi_vic = hv_frame.vic;
4248         }
4249
4250         timing_out->h_addressable = mode_in->crtc_hdisplay;
4251         timing_out->h_total = mode_in->crtc_htotal;
4252         timing_out->h_sync_width =
4253                 mode_in->crtc_hsync_end - mode_in->crtc_hsync_start;
4254         timing_out->h_front_porch =
4255                 mode_in->crtc_hsync_start - mode_in->crtc_hdisplay;
4256         timing_out->v_total = mode_in->crtc_vtotal;
4257         timing_out->v_addressable = mode_in->crtc_vdisplay;
4258         timing_out->v_front_porch =
4259                 mode_in->crtc_vsync_start - mode_in->crtc_vdisplay;
4260         timing_out->v_sync_width =
4261                 mode_in->crtc_vsync_end - mode_in->crtc_vsync_start;
4262         timing_out->pix_clk_100hz = mode_in->crtc_clock * 10;
4263         timing_out->aspect_ratio = get_aspect_ratio(mode_in);
4264
4265         stream->output_color_space = get_output_color_space(timing_out);
4266
4267         stream->out_transfer_func->type = TF_TYPE_PREDEFINED;
4268         stream->out_transfer_func->tf = TRANSFER_FUNCTION_SRGB;
4269         if (stream->signal == SIGNAL_TYPE_HDMI_TYPE_A) {
4270                 if (!adjust_colour_depth_from_display_info(timing_out, info) &&
4271                     drm_mode_is_420_also(info, mode_in) &&
4272                     timing_out->pixel_encoding != PIXEL_ENCODING_YCBCR420) {
4273                         timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR420;
4274                         adjust_colour_depth_from_display_info(timing_out, info);
4275                 }
4276         }
4277 }
4278
4279 static void fill_audio_info(struct audio_info *audio_info,
4280                             const struct drm_connector *drm_connector,
4281                             const struct dc_sink *dc_sink)
4282 {
4283         int i = 0;
4284         int cea_revision = 0;
4285         const struct dc_edid_caps *edid_caps = &dc_sink->edid_caps;
4286
4287         audio_info->manufacture_id = edid_caps->manufacturer_id;
4288         audio_info->product_id = edid_caps->product_id;
4289
4290         cea_revision = drm_connector->display_info.cea_rev;
4291
4292         strscpy(audio_info->display_name,
4293                 edid_caps->display_name,
4294                 AUDIO_INFO_DISPLAY_NAME_SIZE_IN_CHARS);
4295
4296         if (cea_revision >= 3) {
4297                 audio_info->mode_count = edid_caps->audio_mode_count;
4298
4299                 for (i = 0; i < audio_info->mode_count; ++i) {
4300                         audio_info->modes[i].format_code =
4301                                         (enum audio_format_code)
4302                                         (edid_caps->audio_modes[i].format_code);
4303                         audio_info->modes[i].channel_count =
4304                                         edid_caps->audio_modes[i].channel_count;
4305                         audio_info->modes[i].sample_rates.all =
4306                                         edid_caps->audio_modes[i].sample_rate;
4307                         audio_info->modes[i].sample_size =
4308                                         edid_caps->audio_modes[i].sample_size;
4309                 }
4310         }
4311
4312         audio_info->flags.all = edid_caps->speaker_flags;
4313
4314         /* TODO: We only check for the progressive mode, check for interlace mode too */
4315         if (drm_connector->latency_present[0]) {
4316                 audio_info->video_latency = drm_connector->video_latency[0];
4317                 audio_info->audio_latency = drm_connector->audio_latency[0];
4318         }
4319
4320         /* TODO: For DP, video and audio latency should be calculated from DPCD caps */
4321
4322 }
4323
4324 static void
4325 copy_crtc_timing_for_drm_display_mode(const struct drm_display_mode *src_mode,
4326                                       struct drm_display_mode *dst_mode)
4327 {
4328         dst_mode->crtc_hdisplay = src_mode->crtc_hdisplay;
4329         dst_mode->crtc_vdisplay = src_mode->crtc_vdisplay;
4330         dst_mode->crtc_clock = src_mode->crtc_clock;
4331         dst_mode->crtc_hblank_start = src_mode->crtc_hblank_start;
4332         dst_mode->crtc_hblank_end = src_mode->crtc_hblank_end;
4333         dst_mode->crtc_hsync_start =  src_mode->crtc_hsync_start;
4334         dst_mode->crtc_hsync_end = src_mode->crtc_hsync_end;
4335         dst_mode->crtc_htotal = src_mode->crtc_htotal;
4336         dst_mode->crtc_hskew = src_mode->crtc_hskew;
4337         dst_mode->crtc_vblank_start = src_mode->crtc_vblank_start;
4338         dst_mode->crtc_vblank_end = src_mode->crtc_vblank_end;
4339         dst_mode->crtc_vsync_start = src_mode->crtc_vsync_start;
4340         dst_mode->crtc_vsync_end = src_mode->crtc_vsync_end;
4341         dst_mode->crtc_vtotal = src_mode->crtc_vtotal;
4342 }
4343
4344 static void
4345 decide_crtc_timing_for_drm_display_mode(struct drm_display_mode *drm_mode,
4346                                         const struct drm_display_mode *native_mode,
4347                                         bool scale_enabled)
4348 {
4349         if (scale_enabled) {
4350                 copy_crtc_timing_for_drm_display_mode(native_mode, drm_mode);
4351         } else if (native_mode->clock == drm_mode->clock &&
4352                         native_mode->htotal == drm_mode->htotal &&
4353                         native_mode->vtotal == drm_mode->vtotal) {
4354                 copy_crtc_timing_for_drm_display_mode(native_mode, drm_mode);
4355         } else {
4356                 /* no scaling nor amdgpu inserted, no need to patch */
4357         }
4358 }
4359
4360 static struct dc_sink *
4361 create_fake_sink(struct amdgpu_dm_connector *aconnector)
4362 {
4363         struct dc_sink_init_data sink_init_data = { 0 };
4364         struct dc_sink *sink = NULL;
4365         sink_init_data.link = aconnector->dc_link;
4366         sink_init_data.sink_signal = aconnector->dc_link->connector_signal;
4367
4368         sink = dc_sink_create(&sink_init_data);
4369         if (!sink) {
4370                 DRM_ERROR("Failed to create sink!\n");
4371                 return NULL;
4372         }
4373         sink->sink_signal = SIGNAL_TYPE_VIRTUAL;
4374
4375         return sink;
4376 }
4377
4378 static void set_multisync_trigger_params(
4379                 struct dc_stream_state *stream)
4380 {
4381         if (stream->triggered_crtc_reset.enabled) {
4382                 stream->triggered_crtc_reset.event = CRTC_EVENT_VSYNC_RISING;
4383                 stream->triggered_crtc_reset.delay = TRIGGER_DELAY_NEXT_LINE;
4384         }
4385 }
4386
4387 static void set_master_stream(struct dc_stream_state *stream_set[],
4388                               int stream_count)
4389 {
4390         int j, highest_rfr = 0, master_stream = 0;
4391
4392         for (j = 0;  j < stream_count; j++) {
4393                 if (stream_set[j] && stream_set[j]->triggered_crtc_reset.enabled) {
4394                         int refresh_rate = 0;
4395
4396                         refresh_rate = (stream_set[j]->timing.pix_clk_100hz*100)/
4397                                 (stream_set[j]->timing.h_total*stream_set[j]->timing.v_total);
4398                         if (refresh_rate > highest_rfr) {
4399                                 highest_rfr = refresh_rate;
4400                                 master_stream = j;
4401                         }
4402                 }
4403         }
4404         for (j = 0;  j < stream_count; j++) {
4405                 if (stream_set[j])
4406                         stream_set[j]->triggered_crtc_reset.event_source = stream_set[master_stream];
4407         }
4408 }
4409
4410 static void dm_enable_per_frame_crtc_master_sync(struct dc_state *context)
4411 {
4412         int i = 0;
4413
4414         if (context->stream_count < 2)
4415                 return;
4416         for (i = 0; i < context->stream_count ; i++) {
4417                 if (!context->streams[i])
4418                         continue;
4419                 /*
4420                  * TODO: add a function to read AMD VSDB bits and set
4421                  * crtc_sync_master.multi_sync_enabled flag
4422                  * For now it's set to false
4423                  */
4424                 set_multisync_trigger_params(context->streams[i]);
4425         }
4426         set_master_stream(context->streams, context->stream_count);
4427 }
4428
4429 static struct dc_stream_state *
4430 create_stream_for_sink(struct amdgpu_dm_connector *aconnector,
4431                        const struct drm_display_mode *drm_mode,
4432                        const struct dm_connector_state *dm_state,
4433                        const struct dc_stream_state *old_stream,
4434                        int requested_bpc)
4435 {
4436         struct drm_display_mode *preferred_mode = NULL;
4437         struct drm_connector *drm_connector;
4438         const struct drm_connector_state *con_state =
4439                 dm_state ? &dm_state->base : NULL;
4440         struct dc_stream_state *stream = NULL;
4441         struct drm_display_mode mode = *drm_mode;
4442         bool native_mode_found = false;
4443         bool scale = dm_state ? (dm_state->scaling != RMX_OFF) : false;
4444         int mode_refresh;
4445         int preferred_refresh = 0;
4446 #if defined(CONFIG_DRM_AMD_DC_DCN)
4447         struct dsc_dec_dpcd_caps dsc_caps;
4448 #endif
4449         uint32_t link_bandwidth_kbps;
4450
4451         struct dc_sink *sink = NULL;
4452         if (aconnector == NULL) {
4453                 DRM_ERROR("aconnector is NULL!\n");
4454                 return stream;
4455         }
4456
4457         drm_connector = &aconnector->base;
4458
4459         if (!aconnector->dc_sink) {
4460                 sink = create_fake_sink(aconnector);
4461                 if (!sink)
4462                         return stream;
4463         } else {
4464                 sink = aconnector->dc_sink;
4465                 dc_sink_retain(sink);
4466         }
4467
4468         stream = dc_create_stream_for_sink(sink);
4469
4470         if (stream == NULL) {
4471                 DRM_ERROR("Failed to create stream for sink!\n");
4472                 goto finish;
4473         }
4474
4475         stream->dm_stream_context = aconnector;
4476
4477         stream->timing.flags.LTE_340MCSC_SCRAMBLE =
4478                 drm_connector->display_info.hdmi.scdc.scrambling.low_rates;
4479
4480         list_for_each_entry(preferred_mode, &aconnector->base.modes, head) {
4481                 /* Search for preferred mode */
4482                 if (preferred_mode->type & DRM_MODE_TYPE_PREFERRED) {
4483                         native_mode_found = true;
4484                         break;
4485                 }
4486         }
4487         if (!native_mode_found)
4488                 preferred_mode = list_first_entry_or_null(
4489                                 &aconnector->base.modes,
4490                                 struct drm_display_mode,
4491                                 head);
4492
4493         mode_refresh = drm_mode_vrefresh(&mode);
4494
4495         if (preferred_mode == NULL) {
4496                 /*
4497                  * This may not be an error, the use case is when we have no
4498                  * usermode calls to reset and set mode upon hotplug. In this
4499                  * case, we call set mode ourselves to restore the previous mode
4500                  * and the modelist may not be filled in in time.
4501                  */
4502                 DRM_DEBUG_DRIVER("No preferred mode found\n");
4503         } else {
4504                 decide_crtc_timing_for_drm_display_mode(
4505                                 &mode, preferred_mode,
4506                                 dm_state ? (dm_state->scaling != RMX_OFF) : false);
4507                 preferred_refresh = drm_mode_vrefresh(preferred_mode);
4508         }
4509
4510         if (!dm_state)
4511                 drm_mode_set_crtcinfo(&mode, 0);
4512
4513         /*
4514         * If scaling is enabled and refresh rate didn't change
4515         * we copy the vic and polarities of the old timings
4516         */
4517         if (!scale || mode_refresh != preferred_refresh)
4518                 fill_stream_properties_from_drm_display_mode(stream,
4519                         &mode, &aconnector->base, con_state, NULL, requested_bpc);
4520         else
4521                 fill_stream_properties_from_drm_display_mode(stream,
4522                         &mode, &aconnector->base, con_state, old_stream, requested_bpc);
4523
4524         stream->timing.flags.DSC = 0;
4525
4526         if (aconnector->dc_link && sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT) {
4527 #if defined(CONFIG_DRM_AMD_DC_DCN)
4528                 dc_dsc_parse_dsc_dpcd(aconnector->dc_link->ctx->dc,
4529                                       aconnector->dc_link->dpcd_caps.dsc_caps.dsc_basic_caps.raw,
4530                                       aconnector->dc_link->dpcd_caps.dsc_caps.dsc_ext_caps.raw,
4531                                       &dsc_caps);
4532 #endif
4533                 link_bandwidth_kbps = dc_link_bandwidth_kbps(aconnector->dc_link,
4534                                                              dc_link_get_link_cap(aconnector->dc_link));
4535
4536 #if defined(CONFIG_DRM_AMD_DC_DCN)
4537                 if (dsc_caps.is_dsc_supported)
4538                         if (dc_dsc_compute_config(aconnector->dc_link->ctx->dc->res_pool->dscs[0],
4539                                                   &dsc_caps,
4540                                                   aconnector->dc_link->ctx->dc->debug.dsc_min_slice_height_override,
4541                                                   link_bandwidth_kbps,
4542                                                   &stream->timing,
4543                                                   &stream->timing.dsc_cfg))
4544                                 stream->timing.flags.DSC = 1;
4545 #endif
4546         }
4547
4548         update_stream_scaling_settings(&mode, dm_state, stream);
4549
4550         fill_audio_info(
4551                 &stream->audio_info,
4552                 drm_connector,
4553                 sink);
4554
4555         update_stream_signal(stream, sink);
4556
4557         if (stream->signal == SIGNAL_TYPE_HDMI_TYPE_A)
4558                 mod_build_hf_vsif_infopacket(stream, &stream->vsp_infopacket, false, false);
4559         if (stream->link->psr_settings.psr_feature_enabled)     {
4560                 struct dc  *core_dc = stream->link->ctx->dc;
4561
4562                 if (dc_is_dmcu_initialized(core_dc)) {
4563                         //
4564                         // should decide stream support vsc sdp colorimetry capability
4565                         // before building vsc info packet
4566                         //
4567                         stream->use_vsc_sdp_for_colorimetry = false;
4568                         if (aconnector->dc_sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT_MST) {
4569                                 stream->use_vsc_sdp_for_colorimetry =
4570                                         aconnector->dc_sink->is_vsc_sdp_colorimetry_supported;
4571                         } else {
4572                                 if (stream->link->dpcd_caps.dprx_feature.bits.VSC_SDP_COLORIMETRY_SUPPORTED)
4573                                         stream->use_vsc_sdp_for_colorimetry = true;
4574                         }
4575                         mod_build_vsc_infopacket(stream, &stream->vsc_infopacket);
4576                 }
4577         }
4578 finish:
4579         dc_sink_release(sink);
4580
4581         return stream;
4582 }
4583
4584 static void amdgpu_dm_crtc_destroy(struct drm_crtc *crtc)
4585 {
4586         drm_crtc_cleanup(crtc);
4587         kfree(crtc);
4588 }
4589
4590 static void dm_crtc_destroy_state(struct drm_crtc *crtc,
4591                                   struct drm_crtc_state *state)
4592 {
4593         struct dm_crtc_state *cur = to_dm_crtc_state(state);
4594
4595         /* TODO Destroy dc_stream objects are stream object is flattened */
4596         if (cur->stream)
4597                 dc_stream_release(cur->stream);
4598
4599
4600         __drm_atomic_helper_crtc_destroy_state(state);
4601
4602
4603         kfree(state);
4604 }
4605
4606 static void dm_crtc_reset_state(struct drm_crtc *crtc)
4607 {
4608         struct dm_crtc_state *state;
4609
4610         if (crtc->state)
4611                 dm_crtc_destroy_state(crtc, crtc->state);
4612
4613         state = kzalloc(sizeof(*state), GFP_KERNEL);
4614         if (WARN_ON(!state))
4615                 return;
4616
4617         __drm_atomic_helper_crtc_reset(crtc, &state->base);
4618 }
4619
4620 static struct drm_crtc_state *
4621 dm_crtc_duplicate_state(struct drm_crtc *crtc)
4622 {
4623         struct dm_crtc_state *state, *cur;
4624
4625         cur = to_dm_crtc_state(crtc->state);
4626
4627         if (WARN_ON(!crtc->state))
4628                 return NULL;
4629
4630         state = kzalloc(sizeof(*state), GFP_KERNEL);
4631         if (!state)
4632                 return NULL;
4633
4634         __drm_atomic_helper_crtc_duplicate_state(crtc, &state->base);
4635
4636         if (cur->stream) {
4637                 state->stream = cur->stream;
4638                 dc_stream_retain(state->stream);
4639         }
4640
4641         state->active_planes = cur->active_planes;
4642         state->interrupts_enabled = cur->interrupts_enabled;
4643         state->vrr_params = cur->vrr_params;
4644         state->vrr_infopacket = cur->vrr_infopacket;
4645         state->abm_level = cur->abm_level;
4646         state->vrr_supported = cur->vrr_supported;
4647         state->freesync_config = cur->freesync_config;
4648         state->crc_src = cur->crc_src;
4649         state->cm_has_degamma = cur->cm_has_degamma;
4650         state->cm_is_degamma_srgb = cur->cm_is_degamma_srgb;
4651
4652         /* TODO Duplicate dc_stream after objects are stream object is flattened */
4653
4654         return &state->base;
4655 }
4656
4657 static inline int dm_set_vupdate_irq(struct drm_crtc *crtc, bool enable)
4658 {
4659         enum dc_irq_source irq_source;
4660         struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
4661         struct amdgpu_device *adev = crtc->dev->dev_private;
4662         int rc;
4663
4664         irq_source = IRQ_TYPE_VUPDATE + acrtc->otg_inst;
4665
4666         rc = dc_interrupt_set(adev->dm.dc, irq_source, enable) ? 0 : -EBUSY;
4667
4668         DRM_DEBUG_DRIVER("crtc %d - vupdate irq %sabling: r=%d\n",
4669                          acrtc->crtc_id, enable ? "en" : "dis", rc);
4670         return rc;
4671 }
4672
4673 static inline int dm_set_vblank(struct drm_crtc *crtc, bool enable)
4674 {
4675         enum dc_irq_source irq_source;
4676         struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
4677         struct amdgpu_device *adev = crtc->dev->dev_private;
4678         struct dm_crtc_state *acrtc_state = to_dm_crtc_state(crtc->state);
4679         int rc = 0;
4680
4681         if (enable) {
4682                 /* vblank irq on -> Only need vupdate irq in vrr mode */
4683                 if (amdgpu_dm_vrr_active(acrtc_state))
4684                         rc = dm_set_vupdate_irq(crtc, true);
4685         } else {
4686                 /* vblank irq off -> vupdate irq off */
4687                 rc = dm_set_vupdate_irq(crtc, false);
4688         }
4689
4690         if (rc)
4691                 return rc;
4692
4693         irq_source = IRQ_TYPE_VBLANK + acrtc->otg_inst;
4694         return dc_interrupt_set(adev->dm.dc, irq_source, enable) ? 0 : -EBUSY;
4695 }
4696
4697 static int dm_enable_vblank(struct drm_crtc *crtc)
4698 {
4699         return dm_set_vblank(crtc, true);
4700 }
4701
4702 static void dm_disable_vblank(struct drm_crtc *crtc)
4703 {
4704         dm_set_vblank(crtc, false);
4705 }
4706
4707 /* Implemented only the options currently availible for the driver */
4708 static const struct drm_crtc_funcs amdgpu_dm_crtc_funcs = {
4709         .reset = dm_crtc_reset_state,
4710         .destroy = amdgpu_dm_crtc_destroy,
4711         .gamma_set = drm_atomic_helper_legacy_gamma_set,
4712         .set_config = drm_atomic_helper_set_config,
4713         .page_flip = drm_atomic_helper_page_flip,
4714         .atomic_duplicate_state = dm_crtc_duplicate_state,
4715         .atomic_destroy_state = dm_crtc_destroy_state,
4716         .set_crc_source = amdgpu_dm_crtc_set_crc_source,
4717         .verify_crc_source = amdgpu_dm_crtc_verify_crc_source,
4718         .get_crc_sources = amdgpu_dm_crtc_get_crc_sources,
4719         .get_vblank_counter = amdgpu_get_vblank_counter_kms,
4720         .enable_vblank = dm_enable_vblank,
4721         .disable_vblank = dm_disable_vblank,
4722         .get_vblank_timestamp = drm_crtc_vblank_helper_get_vblank_timestamp,
4723 };
4724
4725 static enum drm_connector_status
4726 amdgpu_dm_connector_detect(struct drm_connector *connector, bool force)
4727 {
4728         bool connected;
4729         struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
4730
4731         /*
4732          * Notes:
4733          * 1. This interface is NOT called in context of HPD irq.
4734          * 2. This interface *is called* in context of user-mode ioctl. Which
4735          * makes it a bad place for *any* MST-related activity.
4736          */
4737
4738         if (aconnector->base.force == DRM_FORCE_UNSPECIFIED &&
4739             !aconnector->fake_enable)
4740                 connected = (aconnector->dc_sink != NULL);
4741         else
4742                 connected = (aconnector->base.force == DRM_FORCE_ON);
4743
4744         return (connected ? connector_status_connected :
4745                         connector_status_disconnected);
4746 }
4747
4748 int amdgpu_dm_connector_atomic_set_property(struct drm_connector *connector,
4749                                             struct drm_connector_state *connector_state,
4750                                             struct drm_property *property,
4751                                             uint64_t val)
4752 {
4753         struct drm_device *dev = connector->dev;
4754         struct amdgpu_device *adev = dev->dev_private;
4755         struct dm_connector_state *dm_old_state =
4756                 to_dm_connector_state(connector->state);
4757         struct dm_connector_state *dm_new_state =
4758                 to_dm_connector_state(connector_state);
4759
4760         int ret = -EINVAL;
4761
4762         if (property == dev->mode_config.scaling_mode_property) {
4763                 enum amdgpu_rmx_type rmx_type;
4764
4765                 switch (val) {
4766                 case DRM_MODE_SCALE_CENTER:
4767                         rmx_type = RMX_CENTER;
4768                         break;
4769                 case DRM_MODE_SCALE_ASPECT:
4770                         rmx_type = RMX_ASPECT;
4771                         break;
4772                 case DRM_MODE_SCALE_FULLSCREEN:
4773                         rmx_type = RMX_FULL;
4774                         break;
4775                 case DRM_MODE_SCALE_NONE:
4776                 default:
4777                         rmx_type = RMX_OFF;
4778                         break;
4779                 }
4780
4781                 if (dm_old_state->scaling == rmx_type)
4782                         return 0;
4783
4784                 dm_new_state->scaling = rmx_type;
4785                 ret = 0;
4786         } else if (property == adev->mode_info.underscan_hborder_property) {
4787                 dm_new_state->underscan_hborder = val;
4788                 ret = 0;
4789         } else if (property == adev->mode_info.underscan_vborder_property) {
4790                 dm_new_state->underscan_vborder = val;
4791                 ret = 0;
4792         } else if (property == adev->mode_info.underscan_property) {
4793                 dm_new_state->underscan_enable = val;
4794                 ret = 0;
4795         } else if (property == adev->mode_info.abm_level_property) {
4796                 dm_new_state->abm_level = val;
4797                 ret = 0;
4798         }
4799
4800         return ret;
4801 }
4802
4803 int amdgpu_dm_connector_atomic_get_property(struct drm_connector *connector,
4804                                             const struct drm_connector_state *state,
4805                                             struct drm_property *property,
4806                                             uint64_t *val)
4807 {
4808         struct drm_device *dev = connector->dev;
4809         struct amdgpu_device *adev = dev->dev_private;
4810         struct dm_connector_state *dm_state =
4811                 to_dm_connector_state(state);
4812         int ret = -EINVAL;
4813
4814         if (property == dev->mode_config.scaling_mode_property) {
4815                 switch (dm_state->scaling) {
4816                 case RMX_CENTER:
4817                         *val = DRM_MODE_SCALE_CENTER;
4818                         break;
4819                 case RMX_ASPECT:
4820                         *val = DRM_MODE_SCALE_ASPECT;
4821                         break;
4822                 case RMX_FULL:
4823                         *val = DRM_MODE_SCALE_FULLSCREEN;
4824                         break;
4825                 case RMX_OFF:
4826                 default:
4827                         *val = DRM_MODE_SCALE_NONE;
4828                         break;
4829                 }
4830                 ret = 0;
4831         } else if (property == adev->mode_info.underscan_hborder_property) {
4832                 *val = dm_state->underscan_hborder;
4833                 ret = 0;
4834         } else if (property == adev->mode_info.underscan_vborder_property) {
4835                 *val = dm_state->underscan_vborder;
4836                 ret = 0;
4837         } else if (property == adev->mode_info.underscan_property) {
4838                 *val = dm_state->underscan_enable;
4839                 ret = 0;
4840         } else if (property == adev->mode_info.abm_level_property) {
4841                 *val = dm_state->abm_level;
4842                 ret = 0;
4843         }
4844
4845         return ret;
4846 }
4847
4848 static void amdgpu_dm_connector_unregister(struct drm_connector *connector)
4849 {
4850         struct amdgpu_dm_connector *amdgpu_dm_connector = to_amdgpu_dm_connector(connector);
4851
4852         drm_dp_aux_unregister(&amdgpu_dm_connector->dm_dp_aux.aux);
4853 }
4854
4855 static void amdgpu_dm_connector_destroy(struct drm_connector *connector)
4856 {
4857         struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
4858         const struct dc_link *link = aconnector->dc_link;
4859         struct amdgpu_device *adev = connector->dev->dev_private;
4860         struct amdgpu_display_manager *dm = &adev->dm;
4861
4862 #if defined(CONFIG_BACKLIGHT_CLASS_DEVICE) ||\
4863         defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE)
4864
4865         if ((link->connector_signal & (SIGNAL_TYPE_EDP | SIGNAL_TYPE_LVDS)) &&
4866             link->type != dc_connection_none &&
4867             dm->backlight_dev) {
4868                 backlight_device_unregister(dm->backlight_dev);
4869                 dm->backlight_dev = NULL;
4870         }
4871 #endif
4872
4873         if (aconnector->dc_em_sink)
4874                 dc_sink_release(aconnector->dc_em_sink);
4875         aconnector->dc_em_sink = NULL;
4876         if (aconnector->dc_sink)
4877                 dc_sink_release(aconnector->dc_sink);
4878         aconnector->dc_sink = NULL;
4879
4880         drm_dp_cec_unregister_connector(&aconnector->dm_dp_aux.aux);
4881         drm_connector_unregister(connector);
4882         drm_connector_cleanup(connector);
4883         if (aconnector->i2c) {
4884                 i2c_del_adapter(&aconnector->i2c->base);
4885                 kfree(aconnector->i2c);
4886         }
4887         kfree(aconnector->dm_dp_aux.aux.name);
4888
4889         kfree(connector);
4890 }
4891
4892 void amdgpu_dm_connector_funcs_reset(struct drm_connector *connector)
4893 {
4894         struct dm_connector_state *state =
4895                 to_dm_connector_state(connector->state);
4896
4897         if (connector->state)
4898                 __drm_atomic_helper_connector_destroy_state(connector->state);
4899
4900         kfree(state);
4901
4902         state = kzalloc(sizeof(*state), GFP_KERNEL);
4903
4904         if (state) {
4905                 state->scaling = RMX_OFF;
4906                 state->underscan_enable = false;
4907                 state->underscan_hborder = 0;
4908                 state->underscan_vborder = 0;
4909                 state->base.max_requested_bpc = 8;
4910                 state->vcpi_slots = 0;
4911                 state->pbn = 0;
4912                 if (connector->connector_type == DRM_MODE_CONNECTOR_eDP)
4913                         state->abm_level = amdgpu_dm_abm_level;
4914
4915                 __drm_atomic_helper_connector_reset(connector, &state->base);
4916         }
4917 }
4918
4919 struct drm_connector_state *
4920 amdgpu_dm_connector_atomic_duplicate_state(struct drm_connector *connector)
4921 {
4922         struct dm_connector_state *state =
4923                 to_dm_connector_state(connector->state);
4924
4925         struct dm_connector_state *new_state =
4926                         kmemdup(state, sizeof(*state), GFP_KERNEL);
4927
4928         if (!new_state)
4929                 return NULL;
4930
4931         __drm_atomic_helper_connector_duplicate_state(connector, &new_state->base);
4932
4933         new_state->freesync_capable = state->freesync_capable;
4934         new_state->abm_level = state->abm_level;
4935         new_state->scaling = state->scaling;
4936         new_state->underscan_enable = state->underscan_enable;
4937         new_state->underscan_hborder = state->underscan_hborder;
4938         new_state->underscan_vborder = state->underscan_vborder;
4939         new_state->vcpi_slots = state->vcpi_slots;
4940         new_state->pbn = state->pbn;
4941         return &new_state->base;
4942 }
4943
4944 static int
4945 amdgpu_dm_connector_late_register(struct drm_connector *connector)
4946 {
4947         struct amdgpu_dm_connector *amdgpu_dm_connector =
4948                 to_amdgpu_dm_connector(connector);
4949         int r;
4950
4951         if ((connector->connector_type == DRM_MODE_CONNECTOR_DisplayPort) ||
4952             (connector->connector_type == DRM_MODE_CONNECTOR_eDP)) {
4953                 amdgpu_dm_connector->dm_dp_aux.aux.dev = connector->kdev;
4954                 r = drm_dp_aux_register(&amdgpu_dm_connector->dm_dp_aux.aux);
4955                 if (r)
4956                         return r;
4957         }
4958
4959 #if defined(CONFIG_DEBUG_FS)
4960         connector_debugfs_init(amdgpu_dm_connector);
4961 #endif
4962
4963         return 0;
4964 }
4965
4966 static const struct drm_connector_funcs amdgpu_dm_connector_funcs = {
4967         .reset = amdgpu_dm_connector_funcs_reset,
4968         .detect = amdgpu_dm_connector_detect,
4969         .fill_modes = drm_helper_probe_single_connector_modes,
4970         .destroy = amdgpu_dm_connector_destroy,
4971         .atomic_duplicate_state = amdgpu_dm_connector_atomic_duplicate_state,
4972         .atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
4973         .atomic_set_property = amdgpu_dm_connector_atomic_set_property,
4974         .atomic_get_property = amdgpu_dm_connector_atomic_get_property,
4975         .late_register = amdgpu_dm_connector_late_register,
4976         .early_unregister = amdgpu_dm_connector_unregister
4977 };
4978
4979 static int get_modes(struct drm_connector *connector)
4980 {
4981         return amdgpu_dm_connector_get_modes(connector);
4982 }
4983
4984 static void create_eml_sink(struct amdgpu_dm_connector *aconnector)
4985 {
4986         struct dc_sink_init_data init_params = {
4987                         .link = aconnector->dc_link,
4988                         .sink_signal = SIGNAL_TYPE_VIRTUAL
4989         };
4990         struct edid *edid;
4991
4992         if (!aconnector->base.edid_blob_ptr) {
4993                 DRM_ERROR("No EDID firmware found on connector: %s ,forcing to OFF!\n",
4994                                 aconnector->base.name);
4995
4996                 aconnector->base.force = DRM_FORCE_OFF;
4997                 aconnector->base.override_edid = false;
4998                 return;
4999         }
5000
5001         edid = (struct edid *) aconnector->base.edid_blob_ptr->data;
5002
5003         aconnector->edid = edid;
5004
5005         aconnector->dc_em_sink = dc_link_add_remote_sink(
5006                 aconnector->dc_link,
5007                 (uint8_t *)edid,
5008                 (edid->extensions + 1) * EDID_LENGTH,
5009                 &init_params);
5010
5011         if (aconnector->base.force == DRM_FORCE_ON) {
5012                 aconnector->dc_sink = aconnector->dc_link->local_sink ?
5013                 aconnector->dc_link->local_sink :
5014                 aconnector->dc_em_sink;
5015                 dc_sink_retain(aconnector->dc_sink);
5016         }
5017 }
5018
5019 static void handle_edid_mgmt(struct amdgpu_dm_connector *aconnector)
5020 {
5021         struct dc_link *link = (struct dc_link *)aconnector->dc_link;
5022
5023         /*
5024          * In case of headless boot with force on for DP managed connector
5025          * Those settings have to be != 0 to get initial modeset
5026          */
5027         if (link->connector_signal == SIGNAL_TYPE_DISPLAY_PORT) {
5028                 link->verified_link_cap.lane_count = LANE_COUNT_FOUR;
5029                 link->verified_link_cap.link_rate = LINK_RATE_HIGH2;
5030         }
5031
5032
5033         aconnector->base.override_edid = true;
5034         create_eml_sink(aconnector);
5035 }
5036
5037 static struct dc_stream_state *
5038 create_validate_stream_for_sink(struct amdgpu_dm_connector *aconnector,
5039                                 const struct drm_display_mode *drm_mode,
5040                                 const struct dm_connector_state *dm_state,
5041                                 const struct dc_stream_state *old_stream)
5042 {
5043         struct drm_connector *connector = &aconnector->base;
5044         struct amdgpu_device *adev = connector->dev->dev_private;
5045         struct dc_stream_state *stream;
5046         const struct drm_connector_state *drm_state = dm_state ? &dm_state->base : NULL;
5047         int requested_bpc = drm_state ? drm_state->max_requested_bpc : 8;
5048         enum dc_status dc_result = DC_OK;
5049
5050         do {
5051                 stream = create_stream_for_sink(aconnector, drm_mode,
5052                                                 dm_state, old_stream,
5053                                                 requested_bpc);
5054                 if (stream == NULL) {
5055                         DRM_ERROR("Failed to create stream for sink!\n");
5056                         break;
5057                 }
5058
5059                 dc_result = dc_validate_stream(adev->dm.dc, stream);
5060
5061                 if (dc_result != DC_OK) {
5062                         DRM_DEBUG_KMS("Mode %dx%d (clk %d) failed DC validation with error %d (%s)\n",
5063                                       drm_mode->hdisplay,
5064                                       drm_mode->vdisplay,
5065                                       drm_mode->clock,
5066                                       dc_result,
5067                                       dc_status_to_str(dc_result));
5068
5069                         dc_stream_release(stream);
5070                         stream = NULL;
5071                         requested_bpc -= 2; /* lower bpc to retry validation */
5072                 }
5073
5074         } while (stream == NULL && requested_bpc >= 6);
5075
5076         return stream;
5077 }
5078
5079 enum drm_mode_status amdgpu_dm_connector_mode_valid(struct drm_connector *connector,
5080                                    struct drm_display_mode *mode)
5081 {
5082         int result = MODE_ERROR;
5083         struct dc_sink *dc_sink;
5084         /* TODO: Unhardcode stream count */
5085         struct dc_stream_state *stream;
5086         struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
5087
5088         if ((mode->flags & DRM_MODE_FLAG_INTERLACE) ||
5089                         (mode->flags & DRM_MODE_FLAG_DBLSCAN))
5090                 return result;
5091
5092         /*
5093          * Only run this the first time mode_valid is called to initilialize
5094          * EDID mgmt
5095          */
5096         if (aconnector->base.force != DRM_FORCE_UNSPECIFIED &&
5097                 !aconnector->dc_em_sink)
5098                 handle_edid_mgmt(aconnector);
5099
5100         dc_sink = to_amdgpu_dm_connector(connector)->dc_sink;
5101
5102         if (dc_sink == NULL) {
5103                 DRM_ERROR("dc_sink is NULL!\n");
5104                 goto fail;
5105         }
5106
5107         stream = create_validate_stream_for_sink(aconnector, mode, NULL, NULL);
5108         if (stream) {
5109                 dc_stream_release(stream);
5110                 result = MODE_OK;
5111         }
5112
5113 fail:
5114         /* TODO: error handling*/
5115         return result;
5116 }
5117
5118 static int fill_hdr_info_packet(const struct drm_connector_state *state,
5119                                 struct dc_info_packet *out)
5120 {
5121         struct hdmi_drm_infoframe frame;
5122         unsigned char buf[30]; /* 26 + 4 */
5123         ssize_t len;
5124         int ret, i;
5125
5126         memset(out, 0, sizeof(*out));
5127
5128         if (!state->hdr_output_metadata)
5129                 return 0;
5130
5131         ret = drm_hdmi_infoframe_set_hdr_metadata(&frame, state);
5132         if (ret)
5133                 return ret;
5134
5135         len = hdmi_drm_infoframe_pack_only(&frame, buf, sizeof(buf));
5136         if (len < 0)
5137                 return (int)len;
5138
5139         /* Static metadata is a fixed 26 bytes + 4 byte header. */
5140         if (len != 30)
5141                 return -EINVAL;
5142
5143         /* Prepare the infopacket for DC. */
5144         switch (state->connector->connector_type) {
5145         case DRM_MODE_CONNECTOR_HDMIA:
5146                 out->hb0 = 0x87; /* type */
5147                 out->hb1 = 0x01; /* version */
5148                 out->hb2 = 0x1A; /* length */
5149                 out->sb[0] = buf[3]; /* checksum */
5150                 i = 1;
5151                 break;
5152
5153         case DRM_MODE_CONNECTOR_DisplayPort:
5154         case DRM_MODE_CONNECTOR_eDP:
5155                 out->hb0 = 0x00; /* sdp id, zero */
5156                 out->hb1 = 0x87; /* type */
5157                 out->hb2 = 0x1D; /* payload len - 1 */
5158                 out->hb3 = (0x13 << 2); /* sdp version */
5159                 out->sb[0] = 0x01; /* version */
5160                 out->sb[1] = 0x1A; /* length */
5161                 i = 2;
5162                 break;
5163
5164         default:
5165                 return -EINVAL;
5166         }
5167
5168         memcpy(&out->sb[i], &buf[4], 26);
5169         out->valid = true;
5170
5171         print_hex_dump(KERN_DEBUG, "HDR SB:", DUMP_PREFIX_NONE, 16, 1, out->sb,
5172                        sizeof(out->sb), false);
5173
5174         return 0;
5175 }
5176
5177 static bool
5178 is_hdr_metadata_different(const struct drm_connector_state *old_state,
5179                           const struct drm_connector_state *new_state)
5180 {
5181         struct drm_property_blob *old_blob = old_state->hdr_output_metadata;
5182         struct drm_property_blob *new_blob = new_state->hdr_output_metadata;
5183
5184         if (old_blob != new_blob) {
5185                 if (old_blob && new_blob &&
5186                     old_blob->length == new_blob->length)
5187                         return memcmp(old_blob->data, new_blob->data,
5188                                       old_blob->length);
5189
5190                 return true;
5191         }
5192
5193         return false;
5194 }
5195
5196 static int
5197 amdgpu_dm_connector_atomic_check(struct drm_connector *conn,
5198                                  struct drm_atomic_state *state)
5199 {
5200         struct drm_connector_state *new_con_state =
5201                 drm_atomic_get_new_connector_state(state, conn);
5202         struct drm_connector_state *old_con_state =
5203                 drm_atomic_get_old_connector_state(state, conn);
5204         struct drm_crtc *crtc = new_con_state->crtc;
5205         struct drm_crtc_state *new_crtc_state;
5206         int ret;
5207
5208         if (!crtc)
5209                 return 0;
5210
5211         if (is_hdr_metadata_different(old_con_state, new_con_state)) {
5212                 struct dc_info_packet hdr_infopacket;
5213
5214                 ret = fill_hdr_info_packet(new_con_state, &hdr_infopacket);
5215                 if (ret)
5216                         return ret;
5217
5218                 new_crtc_state = drm_atomic_get_crtc_state(state, crtc);
5219                 if (IS_ERR(new_crtc_state))
5220                         return PTR_ERR(new_crtc_state);
5221
5222                 /*
5223                  * DC considers the stream backends changed if the
5224                  * static metadata changes. Forcing the modeset also
5225                  * gives a simple way for userspace to switch from
5226                  * 8bpc to 10bpc when setting the metadata to enter
5227                  * or exit HDR.
5228                  *
5229                  * Changing the static metadata after it's been
5230                  * set is permissible, however. So only force a
5231                  * modeset if we're entering or exiting HDR.
5232                  */
5233                 new_crtc_state->mode_changed =
5234                         !old_con_state->hdr_output_metadata ||
5235                         !new_con_state->hdr_output_metadata;
5236         }
5237
5238         return 0;
5239 }
5240
5241 static const struct drm_connector_helper_funcs
5242 amdgpu_dm_connector_helper_funcs = {
5243         /*
5244          * If hotplugging a second bigger display in FB Con mode, bigger resolution
5245          * modes will be filtered by drm_mode_validate_size(), and those modes
5246          * are missing after user start lightdm. So we need to renew modes list.
5247          * in get_modes call back, not just return the modes count
5248          */
5249         .get_modes = get_modes,
5250         .mode_valid = amdgpu_dm_connector_mode_valid,
5251         .atomic_check = amdgpu_dm_connector_atomic_check,
5252 };
5253
5254 static void dm_crtc_helper_disable(struct drm_crtc *crtc)
5255 {
5256 }
5257
5258 static bool does_crtc_have_active_cursor(struct drm_crtc_state *new_crtc_state)
5259 {
5260         struct drm_device *dev = new_crtc_state->crtc->dev;
5261         struct drm_plane *plane;
5262
5263         drm_for_each_plane_mask(plane, dev, new_crtc_state->plane_mask) {
5264                 if (plane->type == DRM_PLANE_TYPE_CURSOR)
5265                         return true;
5266         }
5267
5268         return false;
5269 }
5270
5271 static int count_crtc_active_planes(struct drm_crtc_state *new_crtc_state)
5272 {
5273         struct drm_atomic_state *state = new_crtc_state->state;
5274         struct drm_plane *plane;
5275         int num_active = 0;
5276
5277         drm_for_each_plane_mask(plane, state->dev, new_crtc_state->plane_mask) {
5278                 struct drm_plane_state *new_plane_state;
5279
5280                 /* Cursor planes are "fake". */
5281                 if (plane->type == DRM_PLANE_TYPE_CURSOR)
5282                         continue;
5283
5284                 new_plane_state = drm_atomic_get_new_plane_state(state, plane);
5285
5286                 if (!new_plane_state) {
5287                         /*
5288                          * The plane is enable on the CRTC and hasn't changed
5289                          * state. This means that it previously passed
5290                          * validation and is therefore enabled.
5291                          */
5292                         num_active += 1;
5293                         continue;
5294                 }
5295
5296                 /* We need a framebuffer to be considered enabled. */
5297                 num_active += (new_plane_state->fb != NULL);
5298         }
5299
5300         return num_active;
5301 }
5302
5303 /*
5304  * Sets whether interrupts should be enabled on a specific CRTC.
5305  * We require that the stream be enabled and that there exist active
5306  * DC planes on the stream.
5307  */
5308 static void
5309 dm_update_crtc_interrupt_state(struct drm_crtc *crtc,
5310                                struct drm_crtc_state *new_crtc_state)
5311 {
5312         struct dm_crtc_state *dm_new_crtc_state =
5313                 to_dm_crtc_state(new_crtc_state);
5314
5315         dm_new_crtc_state->active_planes = 0;
5316         dm_new_crtc_state->interrupts_enabled = false;
5317
5318         if (!dm_new_crtc_state->stream)
5319                 return;
5320
5321         dm_new_crtc_state->active_planes =
5322                 count_crtc_active_planes(new_crtc_state);
5323
5324         dm_new_crtc_state->interrupts_enabled =
5325                 dm_new_crtc_state->active_planes > 0;
5326 }
5327
5328 static int dm_crtc_helper_atomic_check(struct drm_crtc *crtc,
5329                                        struct drm_crtc_state *state)
5330 {
5331         struct amdgpu_device *adev = crtc->dev->dev_private;
5332         struct dc *dc = adev->dm.dc;
5333         struct dm_crtc_state *dm_crtc_state = to_dm_crtc_state(state);
5334         int ret = -EINVAL;
5335
5336         /*
5337          * Update interrupt state for the CRTC. This needs to happen whenever
5338          * the CRTC has changed or whenever any of its planes have changed.
5339          * Atomic check satisfies both of these requirements since the CRTC
5340          * is added to the state by DRM during drm_atomic_helper_check_planes.
5341          */
5342         dm_update_crtc_interrupt_state(crtc, state);
5343
5344         if (unlikely(!dm_crtc_state->stream &&
5345                      modeset_required(state, NULL, dm_crtc_state->stream))) {
5346                 WARN_ON(1);
5347                 return ret;
5348         }
5349
5350         /* In some use cases, like reset, no stream is attached */
5351         if (!dm_crtc_state->stream)
5352                 return 0;
5353
5354         /*
5355          * We want at least one hardware plane enabled to use
5356          * the stream with a cursor enabled.
5357          */
5358         if (state->enable && state->active &&
5359             does_crtc_have_active_cursor(state) &&
5360             dm_crtc_state->active_planes == 0)
5361                 return -EINVAL;
5362
5363         if (dc_validate_stream(dc, dm_crtc_state->stream) == DC_OK)
5364                 return 0;
5365
5366         return ret;
5367 }
5368
5369 static bool dm_crtc_helper_mode_fixup(struct drm_crtc *crtc,
5370                                       const struct drm_display_mode *mode,
5371                                       struct drm_display_mode *adjusted_mode)
5372 {
5373         return true;
5374 }
5375
5376 static const struct drm_crtc_helper_funcs amdgpu_dm_crtc_helper_funcs = {
5377         .disable = dm_crtc_helper_disable,
5378         .atomic_check = dm_crtc_helper_atomic_check,
5379         .mode_fixup = dm_crtc_helper_mode_fixup,
5380         .get_scanout_position = amdgpu_crtc_get_scanout_position,
5381 };
5382
5383 static void dm_encoder_helper_disable(struct drm_encoder *encoder)
5384 {
5385
5386 }
5387
5388 static int convert_dc_color_depth_into_bpc (enum dc_color_depth display_color_depth)
5389 {
5390         switch (display_color_depth) {
5391                 case COLOR_DEPTH_666:
5392                         return 6;
5393                 case COLOR_DEPTH_888:
5394                         return 8;
5395                 case COLOR_DEPTH_101010:
5396                         return 10;
5397                 case COLOR_DEPTH_121212:
5398                         return 12;
5399                 case COLOR_DEPTH_141414:
5400                         return 14;
5401                 case COLOR_DEPTH_161616:
5402                         return 16;
5403                 default:
5404                         break;
5405                 }
5406         return 0;
5407 }
5408
5409 static int dm_encoder_helper_atomic_check(struct drm_encoder *encoder,
5410                                           struct drm_crtc_state *crtc_state,
5411                                           struct drm_connector_state *conn_state)
5412 {
5413         struct drm_atomic_state *state = crtc_state->state;
5414         struct drm_connector *connector = conn_state->connector;
5415         struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
5416         struct dm_connector_state *dm_new_connector_state = to_dm_connector_state(conn_state);
5417         const struct drm_display_mode *adjusted_mode = &crtc_state->adjusted_mode;
5418         struct drm_dp_mst_topology_mgr *mst_mgr;
5419         struct drm_dp_mst_port *mst_port;
5420         enum dc_color_depth color_depth;
5421         int clock, bpp = 0;
5422         bool is_y420 = false;
5423
5424         if (!aconnector->port || !aconnector->dc_sink)
5425                 return 0;
5426
5427         mst_port = aconnector->port;
5428         mst_mgr = &aconnector->mst_port->mst_mgr;
5429
5430         if (!crtc_state->connectors_changed && !crtc_state->mode_changed)
5431                 return 0;
5432
5433         if (!state->duplicated) {
5434                 int max_bpc = conn_state->max_requested_bpc;
5435                 is_y420 = drm_mode_is_420_also(&connector->display_info, adjusted_mode) &&
5436                                 aconnector->force_yuv420_output;
5437                 color_depth = convert_color_depth_from_display_info(connector,
5438                                                                     is_y420,
5439                                                                     max_bpc);
5440                 bpp = convert_dc_color_depth_into_bpc(color_depth) * 3;
5441                 clock = adjusted_mode->clock;
5442                 dm_new_connector_state->pbn = drm_dp_calc_pbn_mode(clock, bpp, false);
5443         }
5444         dm_new_connector_state->vcpi_slots = drm_dp_atomic_find_vcpi_slots(state,
5445                                                                            mst_mgr,
5446                                                                            mst_port,
5447                                                                            dm_new_connector_state->pbn,
5448                                                                            dm_mst_get_pbn_divider(aconnector->dc_link));
5449         if (dm_new_connector_state->vcpi_slots < 0) {
5450                 DRM_DEBUG_ATOMIC("failed finding vcpi slots: %d\n", (int)dm_new_connector_state->vcpi_slots);
5451                 return dm_new_connector_state->vcpi_slots;
5452         }
5453         return 0;
5454 }
5455
5456 const struct drm_encoder_helper_funcs amdgpu_dm_encoder_helper_funcs = {
5457         .disable = dm_encoder_helper_disable,
5458         .atomic_check = dm_encoder_helper_atomic_check
5459 };
5460
5461 #if defined(CONFIG_DRM_AMD_DC_DCN)
5462 static int dm_update_mst_vcpi_slots_for_dsc(struct drm_atomic_state *state,
5463                                             struct dc_state *dc_state)
5464 {
5465         struct dc_stream_state *stream = NULL;
5466         struct drm_connector *connector;
5467         struct drm_connector_state *new_con_state, *old_con_state;
5468         struct amdgpu_dm_connector *aconnector;
5469         struct dm_connector_state *dm_conn_state;
5470         int i, j, clock, bpp;
5471         int vcpi, pbn_div, pbn = 0;
5472
5473         for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
5474
5475                 aconnector = to_amdgpu_dm_connector(connector);
5476
5477                 if (!aconnector->port)
5478                         continue;
5479
5480                 if (!new_con_state || !new_con_state->crtc)
5481                         continue;
5482
5483                 dm_conn_state = to_dm_connector_state(new_con_state);
5484
5485                 for (j = 0; j < dc_state->stream_count; j++) {
5486                         stream = dc_state->streams[j];
5487                         if (!stream)
5488                                 continue;
5489
5490                         if ((struct amdgpu_dm_connector*)stream->dm_stream_context == aconnector)
5491                                 break;
5492
5493                         stream = NULL;
5494                 }
5495
5496                 if (!stream)
5497                         continue;
5498
5499                 if (stream->timing.flags.DSC != 1) {
5500                         drm_dp_mst_atomic_enable_dsc(state,
5501                                                      aconnector->port,
5502                                                      dm_conn_state->pbn,
5503                                                      0,
5504                                                      false);
5505                         continue;
5506                 }
5507
5508                 pbn_div = dm_mst_get_pbn_divider(stream->link);
5509                 bpp = stream->timing.dsc_cfg.bits_per_pixel;
5510                 clock = stream->timing.pix_clk_100hz / 10;
5511                 pbn = drm_dp_calc_pbn_mode(clock, bpp, true);
5512                 vcpi = drm_dp_mst_atomic_enable_dsc(state,
5513                                                     aconnector->port,
5514                                                     pbn, pbn_div,
5515                                                     true);
5516                 if (vcpi < 0)
5517                         return vcpi;
5518
5519                 dm_conn_state->pbn = pbn;
5520                 dm_conn_state->vcpi_slots = vcpi;
5521         }
5522         return 0;
5523 }
5524 #endif
5525
5526 static void dm_drm_plane_reset(struct drm_plane *plane)
5527 {
5528         struct dm_plane_state *amdgpu_state = NULL;
5529
5530         if (plane->state)
5531                 plane->funcs->atomic_destroy_state(plane, plane->state);
5532
5533         amdgpu_state = kzalloc(sizeof(*amdgpu_state), GFP_KERNEL);
5534         WARN_ON(amdgpu_state == NULL);
5535
5536         if (amdgpu_state)
5537                 __drm_atomic_helper_plane_reset(plane, &amdgpu_state->base);
5538 }
5539
5540 static struct drm_plane_state *
5541 dm_drm_plane_duplicate_state(struct drm_plane *plane)
5542 {
5543         struct dm_plane_state *dm_plane_state, *old_dm_plane_state;
5544
5545         old_dm_plane_state = to_dm_plane_state(plane->state);
5546         dm_plane_state = kzalloc(sizeof(*dm_plane_state), GFP_KERNEL);
5547         if (!dm_plane_state)
5548                 return NULL;
5549
5550         __drm_atomic_helper_plane_duplicate_state(plane, &dm_plane_state->base);
5551
5552         if (old_dm_plane_state->dc_state) {
5553                 dm_plane_state->dc_state = old_dm_plane_state->dc_state;
5554                 dc_plane_state_retain(dm_plane_state->dc_state);
5555         }
5556
5557         return &dm_plane_state->base;
5558 }
5559
5560 static void dm_drm_plane_destroy_state(struct drm_plane *plane,
5561                                 struct drm_plane_state *state)
5562 {
5563         struct dm_plane_state *dm_plane_state = to_dm_plane_state(state);
5564
5565         if (dm_plane_state->dc_state)
5566                 dc_plane_state_release(dm_plane_state->dc_state);
5567
5568         drm_atomic_helper_plane_destroy_state(plane, state);
5569 }
5570
5571 static const struct drm_plane_funcs dm_plane_funcs = {
5572         .update_plane   = drm_atomic_helper_update_plane,
5573         .disable_plane  = drm_atomic_helper_disable_plane,
5574         .destroy        = drm_primary_helper_destroy,
5575         .reset = dm_drm_plane_reset,
5576         .atomic_duplicate_state = dm_drm_plane_duplicate_state,
5577         .atomic_destroy_state = dm_drm_plane_destroy_state,
5578 };
5579
5580 static int dm_plane_helper_prepare_fb(struct drm_plane *plane,
5581                                       struct drm_plane_state *new_state)
5582 {
5583         struct amdgpu_framebuffer *afb;
5584         struct drm_gem_object *obj;
5585         struct amdgpu_device *adev;
5586         struct amdgpu_bo *rbo;
5587         struct dm_plane_state *dm_plane_state_new, *dm_plane_state_old;
5588         struct list_head list;
5589         struct ttm_validate_buffer tv;
5590         struct ww_acquire_ctx ticket;
5591         uint64_t tiling_flags;
5592         uint32_t domain;
5593         int r;
5594         bool tmz_surface = false;
5595         bool force_disable_dcc = false;
5596
5597         dm_plane_state_old = to_dm_plane_state(plane->state);
5598         dm_plane_state_new = to_dm_plane_state(new_state);
5599
5600         if (!new_state->fb) {
5601                 DRM_DEBUG_DRIVER("No FB bound\n");
5602                 return 0;
5603         }
5604
5605         afb = to_amdgpu_framebuffer(new_state->fb);
5606         obj = new_state->fb->obj[0];
5607         rbo = gem_to_amdgpu_bo(obj);
5608         adev = amdgpu_ttm_adev(rbo->tbo.bdev);
5609         INIT_LIST_HEAD(&list);
5610
5611         tv.bo = &rbo->tbo;
5612         tv.num_shared = 1;
5613         list_add(&tv.head, &list);
5614
5615         r = ttm_eu_reserve_buffers(&ticket, &list, false, NULL);
5616         if (r) {
5617                 dev_err(adev->dev, "fail to reserve bo (%d)\n", r);
5618                 return r;
5619         }
5620
5621         if (plane->type != DRM_PLANE_TYPE_CURSOR)
5622                 domain = amdgpu_display_supported_domains(adev, rbo->flags);
5623         else
5624                 domain = AMDGPU_GEM_DOMAIN_VRAM;
5625
5626         r = amdgpu_bo_pin(rbo, domain);
5627         if (unlikely(r != 0)) {
5628                 if (r != -ERESTARTSYS)
5629                         DRM_ERROR("Failed to pin framebuffer with error %d\n", r);
5630                 ttm_eu_backoff_reservation(&ticket, &list);
5631                 return r;
5632         }
5633
5634         r = amdgpu_ttm_alloc_gart(&rbo->tbo);
5635         if (unlikely(r != 0)) {
5636                 amdgpu_bo_unpin(rbo);
5637                 ttm_eu_backoff_reservation(&ticket, &list);
5638                 DRM_ERROR("%p bind failed\n", rbo);
5639                 return r;
5640         }
5641
5642         amdgpu_bo_get_tiling_flags(rbo, &tiling_flags);
5643
5644         tmz_surface = amdgpu_bo_encrypted(rbo);
5645
5646         ttm_eu_backoff_reservation(&ticket, &list);
5647
5648         afb->address = amdgpu_bo_gpu_offset(rbo);
5649
5650         amdgpu_bo_ref(rbo);
5651
5652         if (dm_plane_state_new->dc_state &&
5653                         dm_plane_state_old->dc_state != dm_plane_state_new->dc_state) {
5654                 struct dc_plane_state *plane_state = dm_plane_state_new->dc_state;
5655
5656                 force_disable_dcc = adev->asic_type == CHIP_RAVEN && adev->in_suspend;
5657                 fill_plane_buffer_attributes(
5658                         adev, afb, plane_state->format, plane_state->rotation,
5659                         tiling_flags, &plane_state->tiling_info,
5660                         &plane_state->plane_size, &plane_state->dcc,
5661                         &plane_state->address, tmz_surface,
5662                         force_disable_dcc);
5663         }
5664
5665         return 0;
5666 }
5667
5668 static void dm_plane_helper_cleanup_fb(struct drm_plane *plane,
5669                                        struct drm_plane_state *old_state)
5670 {
5671         struct amdgpu_bo *rbo;
5672         int r;
5673
5674         if (!old_state->fb)
5675                 return;
5676
5677         rbo = gem_to_amdgpu_bo(old_state->fb->obj[0]);
5678         r = amdgpu_bo_reserve(rbo, false);
5679         if (unlikely(r)) {
5680                 DRM_ERROR("failed to reserve rbo before unpin\n");
5681                 return;
5682         }
5683
5684         amdgpu_bo_unpin(rbo);
5685         amdgpu_bo_unreserve(rbo);
5686         amdgpu_bo_unref(&rbo);
5687 }
5688
5689 static int dm_plane_helper_check_state(struct drm_plane_state *state,
5690                                        struct drm_crtc_state *new_crtc_state)
5691 {
5692         int max_downscale = 0;
5693         int max_upscale = INT_MAX;
5694
5695         /* TODO: These should be checked against DC plane caps */
5696         return drm_atomic_helper_check_plane_state(
5697                 state, new_crtc_state, max_downscale, max_upscale, true, true);
5698 }
5699
5700 static int dm_plane_atomic_check(struct drm_plane *plane,
5701                                  struct drm_plane_state *state)
5702 {
5703         struct amdgpu_device *adev = plane->dev->dev_private;
5704         struct dc *dc = adev->dm.dc;
5705         struct dm_plane_state *dm_plane_state;
5706         struct dc_scaling_info scaling_info;
5707         struct drm_crtc_state *new_crtc_state;
5708         int ret;
5709
5710         dm_plane_state = to_dm_plane_state(state);
5711
5712         if (!dm_plane_state->dc_state)
5713                 return 0;
5714
5715         new_crtc_state =
5716                 drm_atomic_get_new_crtc_state(state->state, state->crtc);
5717         if (!new_crtc_state)
5718                 return -EINVAL;
5719
5720         ret = dm_plane_helper_check_state(state, new_crtc_state);
5721         if (ret)
5722                 return ret;
5723
5724         ret = fill_dc_scaling_info(state, &scaling_info);
5725         if (ret)
5726                 return ret;
5727
5728         if (dc_validate_plane(dc, dm_plane_state->dc_state) == DC_OK)
5729                 return 0;
5730
5731         return -EINVAL;
5732 }
5733
5734 static int dm_plane_atomic_async_check(struct drm_plane *plane,
5735                                        struct drm_plane_state *new_plane_state)
5736 {
5737         /* Only support async updates on cursor planes. */
5738         if (plane->type != DRM_PLANE_TYPE_CURSOR)
5739                 return -EINVAL;
5740
5741         return 0;
5742 }
5743
5744 static void dm_plane_atomic_async_update(struct drm_plane *plane,
5745                                          struct drm_plane_state *new_state)
5746 {
5747         struct drm_plane_state *old_state =
5748                 drm_atomic_get_old_plane_state(new_state->state, plane);
5749
5750         swap(plane->state->fb, new_state->fb);
5751
5752         plane->state->src_x = new_state->src_x;
5753         plane->state->src_y = new_state->src_y;
5754         plane->state->src_w = new_state->src_w;
5755         plane->state->src_h = new_state->src_h;
5756         plane->state->crtc_x = new_state->crtc_x;
5757         plane->state->crtc_y = new_state->crtc_y;
5758         plane->state->crtc_w = new_state->crtc_w;
5759         plane->state->crtc_h = new_state->crtc_h;
5760
5761         handle_cursor_update(plane, old_state);
5762 }
5763
5764 static const struct drm_plane_helper_funcs dm_plane_helper_funcs = {
5765         .prepare_fb = dm_plane_helper_prepare_fb,
5766         .cleanup_fb = dm_plane_helper_cleanup_fb,
5767         .atomic_check = dm_plane_atomic_check,
5768         .atomic_async_check = dm_plane_atomic_async_check,
5769         .atomic_async_update = dm_plane_atomic_async_update
5770 };
5771
5772 /*
5773  * TODO: these are currently initialized to rgb formats only.
5774  * For future use cases we should either initialize them dynamically based on
5775  * plane capabilities, or initialize this array to all formats, so internal drm
5776  * check will succeed, and let DC implement proper check
5777  */
5778 static const uint32_t rgb_formats[] = {
5779         DRM_FORMAT_XRGB8888,
5780         DRM_FORMAT_ARGB8888,
5781         DRM_FORMAT_RGBA8888,
5782         DRM_FORMAT_XRGB2101010,
5783         DRM_FORMAT_XBGR2101010,
5784         DRM_FORMAT_ARGB2101010,
5785         DRM_FORMAT_ABGR2101010,
5786         DRM_FORMAT_XBGR8888,
5787         DRM_FORMAT_ABGR8888,
5788         DRM_FORMAT_RGB565,
5789 };
5790
5791 static const uint32_t overlay_formats[] = {
5792         DRM_FORMAT_XRGB8888,
5793         DRM_FORMAT_ARGB8888,
5794         DRM_FORMAT_RGBA8888,
5795         DRM_FORMAT_XBGR8888,
5796         DRM_FORMAT_ABGR8888,
5797         DRM_FORMAT_RGB565
5798 };
5799
5800 static const u32 cursor_formats[] = {
5801         DRM_FORMAT_ARGB8888
5802 };
5803
5804 static int get_plane_formats(const struct drm_plane *plane,
5805                              const struct dc_plane_cap *plane_cap,
5806                              uint32_t *formats, int max_formats)
5807 {
5808         int i, num_formats = 0;
5809
5810         /*
5811          * TODO: Query support for each group of formats directly from
5812          * DC plane caps. This will require adding more formats to the
5813          * caps list.
5814          */
5815
5816         switch (plane->type) {
5817         case DRM_PLANE_TYPE_PRIMARY:
5818                 for (i = 0; i < ARRAY_SIZE(rgb_formats); ++i) {
5819                         if (num_formats >= max_formats)
5820                                 break;
5821
5822                         formats[num_formats++] = rgb_formats[i];
5823                 }
5824
5825                 if (plane_cap && plane_cap->pixel_format_support.nv12)
5826                         formats[num_formats++] = DRM_FORMAT_NV12;
5827                 if (plane_cap && plane_cap->pixel_format_support.p010)
5828                         formats[num_formats++] = DRM_FORMAT_P010;
5829                 if (plane_cap && plane_cap->pixel_format_support.fp16) {
5830                         formats[num_formats++] = DRM_FORMAT_XRGB16161616F;
5831                         formats[num_formats++] = DRM_FORMAT_ARGB16161616F;
5832                         formats[num_formats++] = DRM_FORMAT_XBGR16161616F;
5833                         formats[num_formats++] = DRM_FORMAT_ABGR16161616F;
5834                 }
5835                 break;
5836
5837         case DRM_PLANE_TYPE_OVERLAY:
5838                 for (i = 0; i < ARRAY_SIZE(overlay_formats); ++i) {
5839                         if (num_formats >= max_formats)
5840                                 break;
5841
5842                         formats[num_formats++] = overlay_formats[i];
5843                 }
5844                 break;
5845
5846         case DRM_PLANE_TYPE_CURSOR:
5847                 for (i = 0; i < ARRAY_SIZE(cursor_formats); ++i) {
5848                         if (num_formats >= max_formats)
5849                                 break;
5850
5851                         formats[num_formats++] = cursor_formats[i];
5852                 }
5853                 break;
5854         }
5855
5856         return num_formats;
5857 }
5858
5859 static int amdgpu_dm_plane_init(struct amdgpu_display_manager *dm,
5860                                 struct drm_plane *plane,
5861                                 unsigned long possible_crtcs,
5862                                 const struct dc_plane_cap *plane_cap)
5863 {
5864         uint32_t formats[32];
5865         int num_formats;
5866         int res = -EPERM;
5867
5868         num_formats = get_plane_formats(plane, plane_cap, formats,
5869                                         ARRAY_SIZE(formats));
5870
5871         res = drm_universal_plane_init(dm->adev->ddev, plane, possible_crtcs,
5872                                        &dm_plane_funcs, formats, num_formats,
5873                                        NULL, plane->type, NULL);
5874         if (res)
5875                 return res;
5876
5877         if (plane->type == DRM_PLANE_TYPE_OVERLAY &&
5878             plane_cap && plane_cap->per_pixel_alpha) {
5879                 unsigned int blend_caps = BIT(DRM_MODE_BLEND_PIXEL_NONE) |
5880                                           BIT(DRM_MODE_BLEND_PREMULTI);
5881
5882                 drm_plane_create_alpha_property(plane);
5883                 drm_plane_create_blend_mode_property(plane, blend_caps);
5884         }
5885
5886         if (plane->type == DRM_PLANE_TYPE_PRIMARY &&
5887             plane_cap &&
5888             (plane_cap->pixel_format_support.nv12 ||
5889              plane_cap->pixel_format_support.p010)) {
5890                 /* This only affects YUV formats. */
5891                 drm_plane_create_color_properties(
5892                         plane,
5893                         BIT(DRM_COLOR_YCBCR_BT601) |
5894                         BIT(DRM_COLOR_YCBCR_BT709) |
5895                         BIT(DRM_COLOR_YCBCR_BT2020),
5896                         BIT(DRM_COLOR_YCBCR_LIMITED_RANGE) |
5897                         BIT(DRM_COLOR_YCBCR_FULL_RANGE),
5898                         DRM_COLOR_YCBCR_BT709, DRM_COLOR_YCBCR_LIMITED_RANGE);
5899         }
5900
5901         drm_plane_helper_add(plane, &dm_plane_helper_funcs);
5902
5903         /* Create (reset) the plane state */
5904         if (plane->funcs->reset)
5905                 plane->funcs->reset(plane);
5906
5907         return 0;
5908 }
5909
5910 static int amdgpu_dm_crtc_init(struct amdgpu_display_manager *dm,
5911                                struct drm_plane *plane,
5912                                uint32_t crtc_index)
5913 {
5914         struct amdgpu_crtc *acrtc = NULL;
5915         struct drm_plane *cursor_plane;
5916
5917         int res = -ENOMEM;
5918
5919         cursor_plane = kzalloc(sizeof(*cursor_plane), GFP_KERNEL);
5920         if (!cursor_plane)
5921                 goto fail;
5922
5923         cursor_plane->type = DRM_PLANE_TYPE_CURSOR;
5924         res = amdgpu_dm_plane_init(dm, cursor_plane, 0, NULL);
5925
5926         acrtc = kzalloc(sizeof(struct amdgpu_crtc), GFP_KERNEL);
5927         if (!acrtc)
5928                 goto fail;
5929
5930         res = drm_crtc_init_with_planes(
5931                         dm->ddev,
5932                         &acrtc->base,
5933                         plane,
5934                         cursor_plane,
5935                         &amdgpu_dm_crtc_funcs, NULL);
5936
5937         if (res)
5938                 goto fail;
5939
5940         drm_crtc_helper_add(&acrtc->base, &amdgpu_dm_crtc_helper_funcs);
5941
5942         /* Create (reset) the plane state */
5943         if (acrtc->base.funcs->reset)
5944                 acrtc->base.funcs->reset(&acrtc->base);
5945
5946         acrtc->max_cursor_width = dm->adev->dm.dc->caps.max_cursor_size;
5947         acrtc->max_cursor_height = dm->adev->dm.dc->caps.max_cursor_size;
5948
5949         acrtc->crtc_id = crtc_index;
5950         acrtc->base.enabled = false;
5951         acrtc->otg_inst = -1;
5952
5953         dm->adev->mode_info.crtcs[crtc_index] = acrtc;
5954         drm_crtc_enable_color_mgmt(&acrtc->base, MAX_COLOR_LUT_ENTRIES,
5955                                    true, MAX_COLOR_LUT_ENTRIES);
5956         drm_mode_crtc_set_gamma_size(&acrtc->base, MAX_COLOR_LEGACY_LUT_ENTRIES);
5957
5958         return 0;
5959
5960 fail:
5961         kfree(acrtc);
5962         kfree(cursor_plane);
5963         return res;
5964 }
5965
5966
5967 static int to_drm_connector_type(enum signal_type st)
5968 {
5969         switch (st) {
5970         case SIGNAL_TYPE_HDMI_TYPE_A:
5971                 return DRM_MODE_CONNECTOR_HDMIA;
5972         case SIGNAL_TYPE_EDP:
5973                 return DRM_MODE_CONNECTOR_eDP;
5974         case SIGNAL_TYPE_LVDS:
5975                 return DRM_MODE_CONNECTOR_LVDS;
5976         case SIGNAL_TYPE_RGB:
5977                 return DRM_MODE_CONNECTOR_VGA;
5978         case SIGNAL_TYPE_DISPLAY_PORT:
5979         case SIGNAL_TYPE_DISPLAY_PORT_MST:
5980                 return DRM_MODE_CONNECTOR_DisplayPort;
5981         case SIGNAL_TYPE_DVI_DUAL_LINK:
5982         case SIGNAL_TYPE_DVI_SINGLE_LINK:
5983                 return DRM_MODE_CONNECTOR_DVID;
5984         case SIGNAL_TYPE_VIRTUAL:
5985                 return DRM_MODE_CONNECTOR_VIRTUAL;
5986
5987         default:
5988                 return DRM_MODE_CONNECTOR_Unknown;
5989         }
5990 }
5991
5992 static struct drm_encoder *amdgpu_dm_connector_to_encoder(struct drm_connector *connector)
5993 {
5994         struct drm_encoder *encoder;
5995
5996         /* There is only one encoder per connector */
5997         drm_connector_for_each_possible_encoder(connector, encoder)
5998                 return encoder;
5999
6000         return NULL;
6001 }
6002
6003 static void amdgpu_dm_get_native_mode(struct drm_connector *connector)
6004 {
6005         struct drm_encoder *encoder;
6006         struct amdgpu_encoder *amdgpu_encoder;
6007
6008         encoder = amdgpu_dm_connector_to_encoder(connector);
6009
6010         if (encoder == NULL)
6011                 return;
6012
6013         amdgpu_encoder = to_amdgpu_encoder(encoder);
6014
6015         amdgpu_encoder->native_mode.clock = 0;
6016
6017         if (!list_empty(&connector->probed_modes)) {
6018                 struct drm_display_mode *preferred_mode = NULL;
6019
6020                 list_for_each_entry(preferred_mode,
6021                                     &connector->probed_modes,
6022                                     head) {
6023                         if (preferred_mode->type & DRM_MODE_TYPE_PREFERRED)
6024                                 amdgpu_encoder->native_mode = *preferred_mode;
6025
6026                         break;
6027                 }
6028
6029         }
6030 }
6031
6032 static struct drm_display_mode *
6033 amdgpu_dm_create_common_mode(struct drm_encoder *encoder,
6034                              char *name,
6035                              int hdisplay, int vdisplay)
6036 {
6037         struct drm_device *dev = encoder->dev;
6038         struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
6039         struct drm_display_mode *mode = NULL;
6040         struct drm_display_mode *native_mode = &amdgpu_encoder->native_mode;
6041
6042         mode = drm_mode_duplicate(dev, native_mode);
6043
6044         if (mode == NULL)
6045                 return NULL;
6046
6047         mode->hdisplay = hdisplay;
6048         mode->vdisplay = vdisplay;
6049         mode->type &= ~DRM_MODE_TYPE_PREFERRED;
6050         strscpy(mode->name, name, DRM_DISPLAY_MODE_LEN);
6051
6052         return mode;
6053
6054 }
6055
6056 static void amdgpu_dm_connector_add_common_modes(struct drm_encoder *encoder,
6057                                                  struct drm_connector *connector)
6058 {
6059         struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
6060         struct drm_display_mode *mode = NULL;
6061         struct drm_display_mode *native_mode = &amdgpu_encoder->native_mode;
6062         struct amdgpu_dm_connector *amdgpu_dm_connector =
6063                                 to_amdgpu_dm_connector(connector);
6064         int i;
6065         int n;
6066         struct mode_size {
6067                 char name[DRM_DISPLAY_MODE_LEN];
6068                 int w;
6069                 int h;
6070         } common_modes[] = {
6071                 {  "640x480",  640,  480},
6072                 {  "800x600",  800,  600},
6073                 { "1024x768", 1024,  768},
6074                 { "1280x720", 1280,  720},
6075                 { "1280x800", 1280,  800},
6076                 {"1280x1024", 1280, 1024},
6077                 { "1440x900", 1440,  900},
6078                 {"1680x1050", 1680, 1050},
6079                 {"1600x1200", 1600, 1200},
6080                 {"1920x1080", 1920, 1080},
6081                 {"1920x1200", 1920, 1200}
6082         };
6083
6084         n = ARRAY_SIZE(common_modes);
6085
6086         for (i = 0; i < n; i++) {
6087                 struct drm_display_mode *curmode = NULL;
6088                 bool mode_existed = false;
6089
6090                 if (common_modes[i].w > native_mode->hdisplay ||
6091                     common_modes[i].h > native_mode->vdisplay ||
6092                    (common_modes[i].w == native_mode->hdisplay &&
6093                     common_modes[i].h == native_mode->vdisplay))
6094                         continue;
6095
6096                 list_for_each_entry(curmode, &connector->probed_modes, head) {
6097                         if (common_modes[i].w == curmode->hdisplay &&
6098                             common_modes[i].h == curmode->vdisplay) {
6099                                 mode_existed = true;
6100                                 break;
6101                         }
6102                 }
6103
6104                 if (mode_existed)
6105                         continue;
6106
6107                 mode = amdgpu_dm_create_common_mode(encoder,
6108                                 common_modes[i].name, common_modes[i].w,
6109                                 common_modes[i].h);
6110                 drm_mode_probed_add(connector, mode);
6111                 amdgpu_dm_connector->num_modes++;
6112         }
6113 }
6114
6115 static void amdgpu_dm_connector_ddc_get_modes(struct drm_connector *connector,
6116                                               struct edid *edid)
6117 {
6118         struct amdgpu_dm_connector *amdgpu_dm_connector =
6119                         to_amdgpu_dm_connector(connector);
6120
6121         if (edid) {
6122                 /* empty probed_modes */
6123                 INIT_LIST_HEAD(&connector->probed_modes);
6124                 amdgpu_dm_connector->num_modes =
6125                                 drm_add_edid_modes(connector, edid);
6126
6127                 /* sorting the probed modes before calling function
6128                  * amdgpu_dm_get_native_mode() since EDID can have
6129                  * more than one preferred mode. The modes that are
6130                  * later in the probed mode list could be of higher
6131                  * and preferred resolution. For example, 3840x2160
6132                  * resolution in base EDID preferred timing and 4096x2160
6133                  * preferred resolution in DID extension block later.
6134                  */
6135                 drm_mode_sort(&connector->probed_modes);
6136                 amdgpu_dm_get_native_mode(connector);
6137         } else {
6138                 amdgpu_dm_connector->num_modes = 0;
6139         }
6140 }
6141
6142 static int amdgpu_dm_connector_get_modes(struct drm_connector *connector)
6143 {
6144         struct amdgpu_dm_connector *amdgpu_dm_connector =
6145                         to_amdgpu_dm_connector(connector);
6146         struct drm_encoder *encoder;
6147         struct edid *edid = amdgpu_dm_connector->edid;
6148
6149         encoder = amdgpu_dm_connector_to_encoder(connector);
6150
6151         if (!edid || !drm_edid_is_valid(edid)) {
6152                 amdgpu_dm_connector->num_modes =
6153                                 drm_add_modes_noedid(connector, 640, 480);
6154         } else {
6155                 amdgpu_dm_connector_ddc_get_modes(connector, edid);
6156                 amdgpu_dm_connector_add_common_modes(encoder, connector);
6157         }
6158         amdgpu_dm_fbc_init(connector);
6159
6160         return amdgpu_dm_connector->num_modes;
6161 }
6162
6163 void amdgpu_dm_connector_init_helper(struct amdgpu_display_manager *dm,
6164                                      struct amdgpu_dm_connector *aconnector,
6165                                      int connector_type,
6166                                      struct dc_link *link,
6167                                      int link_index)
6168 {
6169         struct amdgpu_device *adev = dm->ddev->dev_private;
6170
6171         /*
6172          * Some of the properties below require access to state, like bpc.
6173          * Allocate some default initial connector state with our reset helper.
6174          */
6175         if (aconnector->base.funcs->reset)
6176                 aconnector->base.funcs->reset(&aconnector->base);
6177
6178         aconnector->connector_id = link_index;
6179         aconnector->dc_link = link;
6180         aconnector->base.interlace_allowed = false;
6181         aconnector->base.doublescan_allowed = false;
6182         aconnector->base.stereo_allowed = false;
6183         aconnector->base.dpms = DRM_MODE_DPMS_OFF;
6184         aconnector->hpd.hpd = AMDGPU_HPD_NONE; /* not used */
6185         aconnector->audio_inst = -1;
6186         mutex_init(&aconnector->hpd_lock);
6187
6188         /*
6189          * configure support HPD hot plug connector_>polled default value is 0
6190          * which means HPD hot plug not supported
6191          */
6192         switch (connector_type) {
6193         case DRM_MODE_CONNECTOR_HDMIA:
6194                 aconnector->base.polled = DRM_CONNECTOR_POLL_HPD;
6195                 aconnector->base.ycbcr_420_allowed =
6196                         link->link_enc->features.hdmi_ycbcr420_supported ? true : false;
6197                 break;
6198         case DRM_MODE_CONNECTOR_DisplayPort:
6199                 aconnector->base.polled = DRM_CONNECTOR_POLL_HPD;
6200                 aconnector->base.ycbcr_420_allowed =
6201                         link->link_enc->features.dp_ycbcr420_supported ? true : false;
6202                 break;
6203         case DRM_MODE_CONNECTOR_DVID:
6204                 aconnector->base.polled = DRM_CONNECTOR_POLL_HPD;
6205                 break;
6206         default:
6207                 break;
6208         }
6209
6210         drm_object_attach_property(&aconnector->base.base,
6211                                 dm->ddev->mode_config.scaling_mode_property,
6212                                 DRM_MODE_SCALE_NONE);
6213
6214         drm_object_attach_property(&aconnector->base.base,
6215                                 adev->mode_info.underscan_property,
6216                                 UNDERSCAN_OFF);
6217         drm_object_attach_property(&aconnector->base.base,
6218                                 adev->mode_info.underscan_hborder_property,
6219                                 0);
6220         drm_object_attach_property(&aconnector->base.base,
6221                                 adev->mode_info.underscan_vborder_property,
6222                                 0);
6223
6224         if (!aconnector->mst_port)
6225                 drm_connector_attach_max_bpc_property(&aconnector->base, 8, 16);
6226
6227         /* This defaults to the max in the range, but we want 8bpc for non-edp. */
6228         aconnector->base.state->max_bpc = (connector_type == DRM_MODE_CONNECTOR_eDP) ? 16 : 8;
6229         aconnector->base.state->max_requested_bpc = aconnector->base.state->max_bpc;
6230
6231         if (connector_type == DRM_MODE_CONNECTOR_eDP &&
6232             dc_is_dmcu_initialized(adev->dm.dc)) {
6233                 drm_object_attach_property(&aconnector->base.base,
6234                                 adev->mode_info.abm_level_property, 0);
6235         }
6236
6237         if (connector_type == DRM_MODE_CONNECTOR_HDMIA ||
6238             connector_type == DRM_MODE_CONNECTOR_DisplayPort ||
6239             connector_type == DRM_MODE_CONNECTOR_eDP) {
6240                 drm_object_attach_property(
6241                         &aconnector->base.base,
6242                         dm->ddev->mode_config.hdr_output_metadata_property, 0);
6243
6244                 if (!aconnector->mst_port)
6245                         drm_connector_attach_vrr_capable_property(&aconnector->base);
6246
6247 #ifdef CONFIG_DRM_AMD_DC_HDCP
6248                 if (adev->dm.hdcp_workqueue)
6249                         drm_connector_attach_content_protection_property(&aconnector->base, true);
6250 #endif
6251         }
6252 }
6253
6254 static int amdgpu_dm_i2c_xfer(struct i2c_adapter *i2c_adap,
6255                               struct i2c_msg *msgs, int num)
6256 {
6257         struct amdgpu_i2c_adapter *i2c = i2c_get_adapdata(i2c_adap);
6258         struct ddc_service *ddc_service = i2c->ddc_service;
6259         struct i2c_command cmd;
6260         int i;
6261         int result = -EIO;
6262
6263         cmd.payloads = kcalloc(num, sizeof(struct i2c_payload), GFP_KERNEL);
6264
6265         if (!cmd.payloads)
6266                 return result;
6267
6268         cmd.number_of_payloads = num;
6269         cmd.engine = I2C_COMMAND_ENGINE_DEFAULT;
6270         cmd.speed = 100;
6271
6272         for (i = 0; i < num; i++) {
6273                 cmd.payloads[i].write = !(msgs[i].flags & I2C_M_RD);
6274                 cmd.payloads[i].address = msgs[i].addr;
6275                 cmd.payloads[i].length = msgs[i].len;
6276                 cmd.payloads[i].data = msgs[i].buf;
6277         }
6278
6279         if (dc_submit_i2c(
6280                         ddc_service->ctx->dc,
6281                         ddc_service->ddc_pin->hw_info.ddc_channel,
6282                         &cmd))
6283                 result = num;
6284
6285         kfree(cmd.payloads);
6286         return result;
6287 }
6288
6289 static u32 amdgpu_dm_i2c_func(struct i2c_adapter *adap)
6290 {
6291         return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL;
6292 }
6293
6294 static const struct i2c_algorithm amdgpu_dm_i2c_algo = {
6295         .master_xfer = amdgpu_dm_i2c_xfer,
6296         .functionality = amdgpu_dm_i2c_func,
6297 };
6298
6299 static struct amdgpu_i2c_adapter *
6300 create_i2c(struct ddc_service *ddc_service,
6301            int link_index,
6302            int *res)
6303 {
6304         struct amdgpu_device *adev = ddc_service->ctx->driver_context;
6305         struct amdgpu_i2c_adapter *i2c;
6306
6307         i2c = kzalloc(sizeof(struct amdgpu_i2c_adapter), GFP_KERNEL);
6308         if (!i2c)
6309                 return NULL;
6310         i2c->base.owner = THIS_MODULE;
6311         i2c->base.class = I2C_CLASS_DDC;
6312         i2c->base.dev.parent = &adev->pdev->dev;
6313         i2c->base.algo = &amdgpu_dm_i2c_algo;
6314         snprintf(i2c->base.name, sizeof(i2c->base.name), "AMDGPU DM i2c hw bus %d", link_index);
6315         i2c_set_adapdata(&i2c->base, i2c);
6316         i2c->ddc_service = ddc_service;
6317         i2c->ddc_service->ddc_pin->hw_info.ddc_channel = link_index;
6318
6319         return i2c;
6320 }
6321
6322
6323 /*
6324  * Note: this function assumes that dc_link_detect() was called for the
6325  * dc_link which will be represented by this aconnector.
6326  */
6327 static int amdgpu_dm_connector_init(struct amdgpu_display_manager *dm,
6328                                     struct amdgpu_dm_connector *aconnector,
6329                                     uint32_t link_index,
6330                                     struct amdgpu_encoder *aencoder)
6331 {
6332         int res = 0;
6333         int connector_type;
6334         struct dc *dc = dm->dc;
6335         struct dc_link *link = dc_get_link_at_index(dc, link_index);
6336         struct amdgpu_i2c_adapter *i2c;
6337
6338         link->priv = aconnector;
6339
6340         DRM_DEBUG_DRIVER("%s()\n", __func__);
6341
6342         i2c = create_i2c(link->ddc, link->link_index, &res);
6343         if (!i2c) {
6344                 DRM_ERROR("Failed to create i2c adapter data\n");
6345                 return -ENOMEM;
6346         }
6347
6348         aconnector->i2c = i2c;
6349         res = i2c_add_adapter(&i2c->base);
6350
6351         if (res) {
6352                 DRM_ERROR("Failed to register hw i2c %d\n", link->link_index);
6353                 goto out_free;
6354         }
6355
6356         connector_type = to_drm_connector_type(link->connector_signal);
6357
6358         res = drm_connector_init_with_ddc(
6359                         dm->ddev,
6360                         &aconnector->base,
6361                         &amdgpu_dm_connector_funcs,
6362                         connector_type,
6363                         &i2c->base);
6364
6365         if (res) {
6366                 DRM_ERROR("connector_init failed\n");
6367                 aconnector->connector_id = -1;
6368                 goto out_free;
6369         }
6370
6371         drm_connector_helper_add(
6372                         &aconnector->base,
6373                         &amdgpu_dm_connector_helper_funcs);
6374
6375         amdgpu_dm_connector_init_helper(
6376                 dm,
6377                 aconnector,
6378                 connector_type,
6379                 link,
6380                 link_index);
6381
6382         drm_connector_attach_encoder(
6383                 &aconnector->base, &aencoder->base);
6384
6385         if (connector_type == DRM_MODE_CONNECTOR_DisplayPort
6386                 || connector_type == DRM_MODE_CONNECTOR_eDP)
6387                 amdgpu_dm_initialize_dp_connector(dm, aconnector, link->link_index);
6388
6389 out_free:
6390         if (res) {
6391                 kfree(i2c);
6392                 aconnector->i2c = NULL;
6393         }
6394         return res;
6395 }
6396
6397 int amdgpu_dm_get_encoder_crtc_mask(struct amdgpu_device *adev)
6398 {
6399         switch (adev->mode_info.num_crtc) {
6400         case 1:
6401                 return 0x1;
6402         case 2:
6403                 return 0x3;
6404         case 3:
6405                 return 0x7;
6406         case 4:
6407                 return 0xf;
6408         case 5:
6409                 return 0x1f;
6410         case 6:
6411         default:
6412                 return 0x3f;
6413         }
6414 }
6415
6416 static int amdgpu_dm_encoder_init(struct drm_device *dev,
6417                                   struct amdgpu_encoder *aencoder,
6418                                   uint32_t link_index)
6419 {
6420         struct amdgpu_device *adev = dev->dev_private;
6421
6422         int res = drm_encoder_init(dev,
6423                                    &aencoder->base,
6424                                    &amdgpu_dm_encoder_funcs,
6425                                    DRM_MODE_ENCODER_TMDS,
6426                                    NULL);
6427
6428         aencoder->base.possible_crtcs = amdgpu_dm_get_encoder_crtc_mask(adev);
6429
6430         if (!res)
6431                 aencoder->encoder_id = link_index;
6432         else
6433                 aencoder->encoder_id = -1;
6434
6435         drm_encoder_helper_add(&aencoder->base, &amdgpu_dm_encoder_helper_funcs);
6436
6437         return res;
6438 }
6439
6440 static void manage_dm_interrupts(struct amdgpu_device *adev,
6441                                  struct amdgpu_crtc *acrtc,
6442                                  bool enable)
6443 {
6444         /*
6445          * this is not correct translation but will work as soon as VBLANK
6446          * constant is the same as PFLIP
6447          */
6448         int irq_type =
6449                 amdgpu_display_crtc_idx_to_irq_type(
6450                         adev,
6451                         acrtc->crtc_id);
6452
6453         if (enable) {
6454                 drm_crtc_vblank_on(&acrtc->base);
6455                 amdgpu_irq_get(
6456                         adev,
6457                         &adev->pageflip_irq,
6458                         irq_type);
6459         } else {
6460
6461                 amdgpu_irq_put(
6462                         adev,
6463                         &adev->pageflip_irq,
6464                         irq_type);
6465                 drm_crtc_vblank_off(&acrtc->base);
6466         }
6467 }
6468
6469 static bool
6470 is_scaling_state_different(const struct dm_connector_state *dm_state,
6471                            const struct dm_connector_state *old_dm_state)
6472 {
6473         if (dm_state->scaling != old_dm_state->scaling)
6474                 return true;
6475         if (!dm_state->underscan_enable && old_dm_state->underscan_enable) {
6476                 if (old_dm_state->underscan_hborder != 0 && old_dm_state->underscan_vborder != 0)
6477                         return true;
6478         } else  if (dm_state->underscan_enable && !old_dm_state->underscan_enable) {
6479                 if (dm_state->underscan_hborder != 0 && dm_state->underscan_vborder != 0)
6480                         return true;
6481         } else if (dm_state->underscan_hborder != old_dm_state->underscan_hborder ||
6482                    dm_state->underscan_vborder != old_dm_state->underscan_vborder)
6483                 return true;
6484         return false;
6485 }
6486
6487 #ifdef CONFIG_DRM_AMD_DC_HDCP
6488 static bool is_content_protection_different(struct drm_connector_state *state,
6489                                             const struct drm_connector_state *old_state,
6490                                             const struct drm_connector *connector, struct hdcp_workqueue *hdcp_w)
6491 {
6492         struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
6493
6494         if (old_state->hdcp_content_type != state->hdcp_content_type &&
6495             state->content_protection != DRM_MODE_CONTENT_PROTECTION_UNDESIRED) {
6496                 state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED;
6497                 return true;
6498         }
6499
6500         /* CP is being re enabled, ignore this */
6501         if (old_state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED &&
6502             state->content_protection == DRM_MODE_CONTENT_PROTECTION_DESIRED) {
6503                 state->content_protection = DRM_MODE_CONTENT_PROTECTION_ENABLED;
6504                 return false;
6505         }
6506
6507         /* S3 resume case, since old state will always be 0 (UNDESIRED) and the restored state will be ENABLED */
6508         if (old_state->content_protection == DRM_MODE_CONTENT_PROTECTION_UNDESIRED &&
6509             state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED)
6510                 state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED;
6511
6512         /* Check if something is connected/enabled, otherwise we start hdcp but nothing is connected/enabled
6513          * hot-plug, headless s3, dpms
6514          */
6515         if (state->content_protection == DRM_MODE_CONTENT_PROTECTION_DESIRED && connector->dpms == DRM_MODE_DPMS_ON &&
6516             aconnector->dc_sink != NULL)
6517                 return true;
6518
6519         if (old_state->content_protection == state->content_protection)
6520                 return false;
6521
6522         if (state->content_protection == DRM_MODE_CONTENT_PROTECTION_UNDESIRED)
6523                 return true;
6524
6525         return false;
6526 }
6527
6528 #endif
6529 static void remove_stream(struct amdgpu_device *adev,
6530                           struct amdgpu_crtc *acrtc,
6531                           struct dc_stream_state *stream)
6532 {
6533         /* this is the update mode case */
6534
6535         acrtc->otg_inst = -1;
6536         acrtc->enabled = false;
6537 }
6538
6539 static int get_cursor_position(struct drm_plane *plane, struct drm_crtc *crtc,
6540                                struct dc_cursor_position *position)
6541 {
6542         struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
6543         int x, y;
6544         int xorigin = 0, yorigin = 0;
6545
6546         position->enable = false;
6547         position->x = 0;
6548         position->y = 0;
6549
6550         if (!crtc || !plane->state->fb)
6551                 return 0;
6552
6553         if ((plane->state->crtc_w > amdgpu_crtc->max_cursor_width) ||
6554             (plane->state->crtc_h > amdgpu_crtc->max_cursor_height)) {
6555                 DRM_ERROR("%s: bad cursor width or height %d x %d\n",
6556                           __func__,
6557                           plane->state->crtc_w,
6558                           plane->state->crtc_h);
6559                 return -EINVAL;
6560         }
6561
6562         x = plane->state->crtc_x;
6563         y = plane->state->crtc_y;
6564
6565         if (x <= -amdgpu_crtc->max_cursor_width ||
6566             y <= -amdgpu_crtc->max_cursor_height)
6567                 return 0;
6568
6569         if (x < 0) {
6570                 xorigin = min(-x, amdgpu_crtc->max_cursor_width - 1);
6571                 x = 0;
6572         }
6573         if (y < 0) {
6574                 yorigin = min(-y, amdgpu_crtc->max_cursor_height - 1);
6575                 y = 0;
6576         }
6577         position->enable = true;
6578         position->translate_by_source = true;
6579         position->x = x;
6580         position->y = y;
6581         position->x_hotspot = xorigin;
6582         position->y_hotspot = yorigin;
6583
6584         return 0;
6585 }
6586
6587 static void handle_cursor_update(struct drm_plane *plane,
6588                                  struct drm_plane_state *old_plane_state)
6589 {
6590         struct amdgpu_device *adev = plane->dev->dev_private;
6591         struct amdgpu_framebuffer *afb = to_amdgpu_framebuffer(plane->state->fb);
6592         struct drm_crtc *crtc = afb ? plane->state->crtc : old_plane_state->crtc;
6593         struct dm_crtc_state *crtc_state = crtc ? to_dm_crtc_state(crtc->state) : NULL;
6594         struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
6595         uint64_t address = afb ? afb->address : 0;
6596         struct dc_cursor_position position;
6597         struct dc_cursor_attributes attributes;
6598         int ret;
6599
6600         if (!plane->state->fb && !old_plane_state->fb)
6601                 return;
6602
6603         DRM_DEBUG_DRIVER("%s: crtc_id=%d with size %d to %d\n",
6604                          __func__,
6605                          amdgpu_crtc->crtc_id,
6606                          plane->state->crtc_w,
6607                          plane->state->crtc_h);
6608
6609         ret = get_cursor_position(plane, crtc, &position);
6610         if (ret)
6611                 return;
6612
6613         if (!position.enable) {
6614                 /* turn off cursor */
6615                 if (crtc_state && crtc_state->stream) {
6616                         mutex_lock(&adev->dm.dc_lock);
6617                         dc_stream_set_cursor_position(crtc_state->stream,
6618                                                       &position);
6619                         mutex_unlock(&adev->dm.dc_lock);
6620                 }
6621                 return;
6622         }
6623
6624         amdgpu_crtc->cursor_width = plane->state->crtc_w;
6625         amdgpu_crtc->cursor_height = plane->state->crtc_h;
6626
6627         memset(&attributes, 0, sizeof(attributes));
6628         attributes.address.high_part = upper_32_bits(address);
6629         attributes.address.low_part  = lower_32_bits(address);
6630         attributes.width             = plane->state->crtc_w;
6631         attributes.height            = plane->state->crtc_h;
6632         attributes.color_format      = CURSOR_MODE_COLOR_PRE_MULTIPLIED_ALPHA;
6633         attributes.rotation_angle    = 0;
6634         attributes.attribute_flags.value = 0;
6635
6636         attributes.pitch = attributes.width;
6637
6638         if (crtc_state->stream) {
6639                 mutex_lock(&adev->dm.dc_lock);
6640                 if (!dc_stream_set_cursor_attributes(crtc_state->stream,
6641                                                          &attributes))
6642                         DRM_ERROR("DC failed to set cursor attributes\n");
6643
6644                 if (!dc_stream_set_cursor_position(crtc_state->stream,
6645                                                    &position))
6646                         DRM_ERROR("DC failed to set cursor position\n");
6647                 mutex_unlock(&adev->dm.dc_lock);
6648         }
6649 }
6650
6651 static void prepare_flip_isr(struct amdgpu_crtc *acrtc)
6652 {
6653
6654         assert_spin_locked(&acrtc->base.dev->event_lock);
6655         WARN_ON(acrtc->event);
6656
6657         acrtc->event = acrtc->base.state->event;
6658
6659         /* Set the flip status */
6660         acrtc->pflip_status = AMDGPU_FLIP_SUBMITTED;
6661
6662         /* Mark this event as consumed */
6663         acrtc->base.state->event = NULL;
6664
6665         DRM_DEBUG_DRIVER("crtc:%d, pflip_stat:AMDGPU_FLIP_SUBMITTED\n",
6666                                                  acrtc->crtc_id);
6667 }
6668
6669 static void update_freesync_state_on_stream(
6670         struct amdgpu_display_manager *dm,
6671         struct dm_crtc_state *new_crtc_state,
6672         struct dc_stream_state *new_stream,
6673         struct dc_plane_state *surface,
6674         u32 flip_timestamp_in_us)
6675 {
6676         struct mod_vrr_params vrr_params;
6677         struct dc_info_packet vrr_infopacket = {0};
6678         struct amdgpu_device *adev = dm->adev;
6679         unsigned long flags;
6680
6681         if (!new_stream)
6682                 return;
6683
6684         /*
6685          * TODO: Determine why min/max totals and vrefresh can be 0 here.
6686          * For now it's sufficient to just guard against these conditions.
6687          */
6688
6689         if (!new_stream->timing.h_total || !new_stream->timing.v_total)
6690                 return;
6691
6692         spin_lock_irqsave(&adev->ddev->event_lock, flags);
6693         vrr_params = new_crtc_state->vrr_params;
6694
6695         if (surface) {
6696                 mod_freesync_handle_preflip(
6697                         dm->freesync_module,
6698                         surface,
6699                         new_stream,
6700                         flip_timestamp_in_us,
6701                         &vrr_params);
6702
6703                 if (adev->family < AMDGPU_FAMILY_AI &&
6704                     amdgpu_dm_vrr_active(new_crtc_state)) {
6705                         mod_freesync_handle_v_update(dm->freesync_module,
6706                                                      new_stream, &vrr_params);
6707
6708                         /* Need to call this before the frame ends. */
6709                         dc_stream_adjust_vmin_vmax(dm->dc,
6710                                                    new_crtc_state->stream,
6711                                                    &vrr_params.adjust);
6712                 }
6713         }
6714
6715         mod_freesync_build_vrr_infopacket(
6716                 dm->freesync_module,
6717                 new_stream,
6718                 &vrr_params,
6719                 PACKET_TYPE_VRR,
6720                 TRANSFER_FUNC_UNKNOWN,
6721                 &vrr_infopacket);
6722
6723         new_crtc_state->freesync_timing_changed |=
6724                 (memcmp(&new_crtc_state->vrr_params.adjust,
6725                         &vrr_params.adjust,
6726                         sizeof(vrr_params.adjust)) != 0);
6727
6728         new_crtc_state->freesync_vrr_info_changed |=
6729                 (memcmp(&new_crtc_state->vrr_infopacket,
6730                         &vrr_infopacket,
6731                         sizeof(vrr_infopacket)) != 0);
6732
6733         new_crtc_state->vrr_params = vrr_params;
6734         new_crtc_state->vrr_infopacket = vrr_infopacket;
6735
6736         new_stream->adjust = new_crtc_state->vrr_params.adjust;
6737         new_stream->vrr_infopacket = vrr_infopacket;
6738
6739         if (new_crtc_state->freesync_vrr_info_changed)
6740                 DRM_DEBUG_KMS("VRR packet update: crtc=%u enabled=%d state=%d",
6741                               new_crtc_state->base.crtc->base.id,
6742                               (int)new_crtc_state->base.vrr_enabled,
6743                               (int)vrr_params.state);
6744
6745         spin_unlock_irqrestore(&adev->ddev->event_lock, flags);
6746 }
6747
6748 static void pre_update_freesync_state_on_stream(
6749         struct amdgpu_display_manager *dm,
6750         struct dm_crtc_state *new_crtc_state)
6751 {
6752         struct dc_stream_state *new_stream = new_crtc_state->stream;
6753         struct mod_vrr_params vrr_params;
6754         struct mod_freesync_config config = new_crtc_state->freesync_config;
6755         struct amdgpu_device *adev = dm->adev;
6756         unsigned long flags;
6757
6758         if (!new_stream)
6759                 return;
6760
6761         /*
6762          * TODO: Determine why min/max totals and vrefresh can be 0 here.
6763          * For now it's sufficient to just guard against these conditions.
6764          */
6765         if (!new_stream->timing.h_total || !new_stream->timing.v_total)
6766                 return;
6767
6768         spin_lock_irqsave(&adev->ddev->event_lock, flags);
6769         vrr_params = new_crtc_state->vrr_params;
6770
6771         if (new_crtc_state->vrr_supported &&
6772             config.min_refresh_in_uhz &&
6773             config.max_refresh_in_uhz) {
6774                 config.state = new_crtc_state->base.vrr_enabled ?
6775                         VRR_STATE_ACTIVE_VARIABLE :
6776                         VRR_STATE_INACTIVE;
6777         } else {
6778                 config.state = VRR_STATE_UNSUPPORTED;
6779         }
6780
6781         mod_freesync_build_vrr_params(dm->freesync_module,
6782                                       new_stream,
6783                                       &config, &vrr_params);
6784
6785         new_crtc_state->freesync_timing_changed |=
6786                 (memcmp(&new_crtc_state->vrr_params.adjust,
6787                         &vrr_params.adjust,
6788                         sizeof(vrr_params.adjust)) != 0);
6789
6790         new_crtc_state->vrr_params = vrr_params;
6791         spin_unlock_irqrestore(&adev->ddev->event_lock, flags);
6792 }
6793
6794 static void amdgpu_dm_handle_vrr_transition(struct dm_crtc_state *old_state,
6795                                             struct dm_crtc_state *new_state)
6796 {
6797         bool old_vrr_active = amdgpu_dm_vrr_active(old_state);
6798         bool new_vrr_active = amdgpu_dm_vrr_active(new_state);
6799
6800         if (!old_vrr_active && new_vrr_active) {
6801                 /* Transition VRR inactive -> active:
6802                  * While VRR is active, we must not disable vblank irq, as a
6803                  * reenable after disable would compute bogus vblank/pflip
6804                  * timestamps if it likely happened inside display front-porch.
6805                  *
6806                  * We also need vupdate irq for the actual core vblank handling
6807                  * at end of vblank.
6808                  */
6809                 dm_set_vupdate_irq(new_state->base.crtc, true);
6810                 drm_crtc_vblank_get(new_state->base.crtc);
6811                 DRM_DEBUG_DRIVER("%s: crtc=%u VRR off->on: Get vblank ref\n",
6812                                  __func__, new_state->base.crtc->base.id);
6813         } else if (old_vrr_active && !new_vrr_active) {
6814                 /* Transition VRR active -> inactive:
6815                  * Allow vblank irq disable again for fixed refresh rate.
6816                  */
6817                 dm_set_vupdate_irq(new_state->base.crtc, false);
6818                 drm_crtc_vblank_put(new_state->base.crtc);
6819                 DRM_DEBUG_DRIVER("%s: crtc=%u VRR on->off: Drop vblank ref\n",
6820                                  __func__, new_state->base.crtc->base.id);
6821         }
6822 }
6823
6824 static void amdgpu_dm_commit_cursors(struct drm_atomic_state *state)
6825 {
6826         struct drm_plane *plane;
6827         struct drm_plane_state *old_plane_state, *new_plane_state;
6828         int i;
6829
6830         /*
6831          * TODO: Make this per-stream so we don't issue redundant updates for
6832          * commits with multiple streams.
6833          */
6834         for_each_oldnew_plane_in_state(state, plane, old_plane_state,
6835                                        new_plane_state, i)
6836                 if (plane->type == DRM_PLANE_TYPE_CURSOR)
6837                         handle_cursor_update(plane, old_plane_state);
6838 }
6839
6840 static void amdgpu_dm_commit_planes(struct drm_atomic_state *state,
6841                                     struct dc_state *dc_state,
6842                                     struct drm_device *dev,
6843                                     struct amdgpu_display_manager *dm,
6844                                     struct drm_crtc *pcrtc,
6845                                     bool wait_for_vblank)
6846 {
6847         uint32_t i;
6848         uint64_t timestamp_ns;
6849         struct drm_plane *plane;
6850         struct drm_plane_state *old_plane_state, *new_plane_state;
6851         struct amdgpu_crtc *acrtc_attach = to_amdgpu_crtc(pcrtc);
6852         struct drm_crtc_state *new_pcrtc_state =
6853                         drm_atomic_get_new_crtc_state(state, pcrtc);
6854         struct dm_crtc_state *acrtc_state = to_dm_crtc_state(new_pcrtc_state);
6855         struct dm_crtc_state *dm_old_crtc_state =
6856                         to_dm_crtc_state(drm_atomic_get_old_crtc_state(state, pcrtc));
6857         int planes_count = 0, vpos, hpos;
6858         long r;
6859         unsigned long flags;
6860         struct amdgpu_bo *abo;
6861         uint64_t tiling_flags;
6862         bool tmz_surface = false;
6863         uint32_t target_vblank, last_flip_vblank;
6864         bool vrr_active = amdgpu_dm_vrr_active(acrtc_state);
6865         bool pflip_present = false;
6866         struct {
6867                 struct dc_surface_update surface_updates[MAX_SURFACES];
6868                 struct dc_plane_info plane_infos[MAX_SURFACES];
6869                 struct dc_scaling_info scaling_infos[MAX_SURFACES];
6870                 struct dc_flip_addrs flip_addrs[MAX_SURFACES];
6871                 struct dc_stream_update stream_update;
6872         } *bundle;
6873
6874         bundle = kzalloc(sizeof(*bundle), GFP_KERNEL);
6875
6876         if (!bundle) {
6877                 dm_error("Failed to allocate update bundle\n");
6878                 goto cleanup;
6879         }
6880
6881         /*
6882          * Disable the cursor first if we're disabling all the planes.
6883          * It'll remain on the screen after the planes are re-enabled
6884          * if we don't.
6885          */
6886         if (acrtc_state->active_planes == 0)
6887                 amdgpu_dm_commit_cursors(state);
6888
6889         /* update planes when needed */
6890         for_each_oldnew_plane_in_state(state, plane, old_plane_state, new_plane_state, i) {
6891                 struct drm_crtc *crtc = new_plane_state->crtc;
6892                 struct drm_crtc_state *new_crtc_state;
6893                 struct drm_framebuffer *fb = new_plane_state->fb;
6894                 bool plane_needs_flip;
6895                 struct dc_plane_state *dc_plane;
6896                 struct dm_plane_state *dm_new_plane_state = to_dm_plane_state(new_plane_state);
6897
6898                 /* Cursor plane is handled after stream updates */
6899                 if (plane->type == DRM_PLANE_TYPE_CURSOR)
6900                         continue;
6901
6902                 if (!fb || !crtc || pcrtc != crtc)
6903                         continue;
6904
6905                 new_crtc_state = drm_atomic_get_new_crtc_state(state, crtc);
6906                 if (!new_crtc_state->active)
6907                         continue;
6908
6909                 dc_plane = dm_new_plane_state->dc_state;
6910
6911                 bundle->surface_updates[planes_count].surface = dc_plane;
6912                 if (new_pcrtc_state->color_mgmt_changed) {
6913                         bundle->surface_updates[planes_count].gamma = dc_plane->gamma_correction;
6914                         bundle->surface_updates[planes_count].in_transfer_func = dc_plane->in_transfer_func;
6915                         bundle->surface_updates[planes_count].gamut_remap_matrix = &dc_plane->gamut_remap_matrix;
6916                 }
6917
6918                 fill_dc_scaling_info(new_plane_state,
6919                                      &bundle->scaling_infos[planes_count]);
6920
6921                 bundle->surface_updates[planes_count].scaling_info =
6922                         &bundle->scaling_infos[planes_count];
6923
6924                 plane_needs_flip = old_plane_state->fb && new_plane_state->fb;
6925
6926                 pflip_present = pflip_present || plane_needs_flip;
6927
6928                 if (!plane_needs_flip) {
6929                         planes_count += 1;
6930                         continue;
6931                 }
6932
6933                 abo = gem_to_amdgpu_bo(fb->obj[0]);
6934
6935                 /*
6936                  * Wait for all fences on this FB. Do limited wait to avoid
6937                  * deadlock during GPU reset when this fence will not signal
6938                  * but we hold reservation lock for the BO.
6939                  */
6940                 r = dma_resv_wait_timeout_rcu(abo->tbo.base.resv, true,
6941                                                         false,
6942                                                         msecs_to_jiffies(5000));
6943                 if (unlikely(r <= 0))
6944                         DRM_ERROR("Waiting for fences timed out!");
6945
6946                 /*
6947                  * TODO This might fail and hence better not used, wait
6948                  * explicitly on fences instead
6949                  * and in general should be called for
6950                  * blocking commit to as per framework helpers
6951                  */
6952                 r = amdgpu_bo_reserve(abo, true);
6953                 if (unlikely(r != 0))
6954                         DRM_ERROR("failed to reserve buffer before flip\n");
6955
6956                 amdgpu_bo_get_tiling_flags(abo, &tiling_flags);
6957
6958                 tmz_surface = amdgpu_bo_encrypted(abo);
6959
6960                 amdgpu_bo_unreserve(abo);
6961
6962                 fill_dc_plane_info_and_addr(
6963                         dm->adev, new_plane_state, tiling_flags,
6964                         &bundle->plane_infos[planes_count],
6965                         &bundle->flip_addrs[planes_count].address,
6966                         tmz_surface,
6967                         false);
6968
6969                 DRM_DEBUG_DRIVER("plane: id=%d dcc_en=%d\n",
6970                                  new_plane_state->plane->index,
6971                                  bundle->plane_infos[planes_count].dcc.enable);
6972
6973                 bundle->surface_updates[planes_count].plane_info =
6974                         &bundle->plane_infos[planes_count];
6975
6976                 /*
6977                  * Only allow immediate flips for fast updates that don't
6978                  * change FB pitch, DCC state, rotation or mirroing.
6979                  */
6980                 bundle->flip_addrs[planes_count].flip_immediate =
6981                         crtc->state->async_flip &&
6982                         acrtc_state->update_type == UPDATE_TYPE_FAST;
6983
6984                 timestamp_ns = ktime_get_ns();
6985                 bundle->flip_addrs[planes_count].flip_timestamp_in_us = div_u64(timestamp_ns, 1000);
6986                 bundle->surface_updates[planes_count].flip_addr = &bundle->flip_addrs[planes_count];
6987                 bundle->surface_updates[planes_count].surface = dc_plane;
6988
6989                 if (!bundle->surface_updates[planes_count].surface) {
6990                         DRM_ERROR("No surface for CRTC: id=%d\n",
6991                                         acrtc_attach->crtc_id);
6992                         continue;
6993                 }
6994
6995                 if (plane == pcrtc->primary)
6996                         update_freesync_state_on_stream(
6997                                 dm,
6998                                 acrtc_state,
6999                                 acrtc_state->stream,
7000                                 dc_plane,
7001                                 bundle->flip_addrs[planes_count].flip_timestamp_in_us);
7002
7003                 DRM_DEBUG_DRIVER("%s Flipping to hi: 0x%x, low: 0x%x\n",
7004                                  __func__,
7005                                  bundle->flip_addrs[planes_count].address.grph.addr.high_part,
7006                                  bundle->flip_addrs[planes_count].address.grph.addr.low_part);
7007
7008                 planes_count += 1;
7009
7010         }
7011
7012         if (pflip_present) {
7013                 if (!vrr_active) {
7014                         /* Use old throttling in non-vrr fixed refresh rate mode
7015                          * to keep flip scheduling based on target vblank counts
7016                          * working in a backwards compatible way, e.g., for
7017                          * clients using the GLX_OML_sync_control extension or
7018                          * DRI3/Present extension with defined target_msc.
7019                          */
7020                         last_flip_vblank = amdgpu_get_vblank_counter_kms(pcrtc);
7021                 }
7022                 else {
7023                         /* For variable refresh rate mode only:
7024                          * Get vblank of last completed flip to avoid > 1 vrr
7025                          * flips per video frame by use of throttling, but allow
7026                          * flip programming anywhere in the possibly large
7027                          * variable vrr vblank interval for fine-grained flip
7028                          * timing control and more opportunity to avoid stutter
7029                          * on late submission of flips.
7030                          */
7031                         spin_lock_irqsave(&pcrtc->dev->event_lock, flags);
7032                         last_flip_vblank = acrtc_attach->last_flip_vblank;
7033                         spin_unlock_irqrestore(&pcrtc->dev->event_lock, flags);
7034                 }
7035
7036                 target_vblank = last_flip_vblank + wait_for_vblank;
7037
7038                 /*
7039                  * Wait until we're out of the vertical blank period before the one
7040                  * targeted by the flip
7041                  */
7042                 while ((acrtc_attach->enabled &&
7043                         (amdgpu_display_get_crtc_scanoutpos(dm->ddev, acrtc_attach->crtc_id,
7044                                                             0, &vpos, &hpos, NULL,
7045                                                             NULL, &pcrtc->hwmode)
7046                          & (DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_IN_VBLANK)) ==
7047                         (DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_IN_VBLANK) &&
7048                         (int)(target_vblank -
7049                           amdgpu_get_vblank_counter_kms(pcrtc)) > 0)) {
7050                         usleep_range(1000, 1100);
7051                 }
7052
7053                 if (acrtc_attach->base.state->event) {
7054                         drm_crtc_vblank_get(pcrtc);
7055
7056                         spin_lock_irqsave(&pcrtc->dev->event_lock, flags);
7057
7058                         WARN_ON(acrtc_attach->pflip_status != AMDGPU_FLIP_NONE);
7059                         prepare_flip_isr(acrtc_attach);
7060
7061                         spin_unlock_irqrestore(&pcrtc->dev->event_lock, flags);
7062                 }
7063
7064                 if (acrtc_state->stream) {
7065                         if (acrtc_state->freesync_vrr_info_changed)
7066                                 bundle->stream_update.vrr_infopacket =
7067                                         &acrtc_state->stream->vrr_infopacket;
7068                 }
7069         }
7070
7071         /* Update the planes if changed or disable if we don't have any. */
7072         if ((planes_count || acrtc_state->active_planes == 0) &&
7073                 acrtc_state->stream) {
7074                 bundle->stream_update.stream = acrtc_state->stream;
7075                 if (new_pcrtc_state->mode_changed) {
7076                         bundle->stream_update.src = acrtc_state->stream->src;
7077                         bundle->stream_update.dst = acrtc_state->stream->dst;
7078                 }
7079
7080                 if (new_pcrtc_state->color_mgmt_changed) {
7081                         /*
7082                          * TODO: This isn't fully correct since we've actually
7083                          * already modified the stream in place.
7084                          */
7085                         bundle->stream_update.gamut_remap =
7086                                 &acrtc_state->stream->gamut_remap_matrix;
7087                         bundle->stream_update.output_csc_transform =
7088                                 &acrtc_state->stream->csc_color_matrix;
7089                         bundle->stream_update.out_transfer_func =
7090                                 acrtc_state->stream->out_transfer_func;
7091                 }
7092
7093                 acrtc_state->stream->abm_level = acrtc_state->abm_level;
7094                 if (acrtc_state->abm_level != dm_old_crtc_state->abm_level)
7095                         bundle->stream_update.abm_level = &acrtc_state->abm_level;
7096
7097                 /*
7098                  * If FreeSync state on the stream has changed then we need to
7099                  * re-adjust the min/max bounds now that DC doesn't handle this
7100                  * as part of commit.
7101                  */
7102                 if (amdgpu_dm_vrr_active(dm_old_crtc_state) !=
7103                     amdgpu_dm_vrr_active(acrtc_state)) {
7104                         spin_lock_irqsave(&pcrtc->dev->event_lock, flags);
7105                         dc_stream_adjust_vmin_vmax(
7106                                 dm->dc, acrtc_state->stream,
7107                                 &acrtc_state->vrr_params.adjust);
7108                         spin_unlock_irqrestore(&pcrtc->dev->event_lock, flags);
7109                 }
7110                 mutex_lock(&dm->dc_lock);
7111                 if ((acrtc_state->update_type > UPDATE_TYPE_FAST) &&
7112                                 acrtc_state->stream->link->psr_settings.psr_allow_active)
7113                         amdgpu_dm_psr_disable(acrtc_state->stream);
7114
7115                 dc_commit_updates_for_stream(dm->dc,
7116                                                      bundle->surface_updates,
7117                                                      planes_count,
7118                                                      acrtc_state->stream,
7119                                                      &bundle->stream_update,
7120                                                      dc_state);
7121
7122                 if ((acrtc_state->update_type > UPDATE_TYPE_FAST) &&
7123                                 acrtc_state->stream->link->psr_settings.psr_version != DC_PSR_VERSION_UNSUPPORTED &&
7124                                 !acrtc_state->stream->link->psr_settings.psr_feature_enabled)
7125                         amdgpu_dm_link_setup_psr(acrtc_state->stream);
7126                 else if ((acrtc_state->update_type == UPDATE_TYPE_FAST) &&
7127                                 acrtc_state->stream->link->psr_settings.psr_feature_enabled &&
7128                                 !acrtc_state->stream->link->psr_settings.psr_allow_active) {
7129                         amdgpu_dm_psr_enable(acrtc_state->stream);
7130                 }
7131
7132                 mutex_unlock(&dm->dc_lock);
7133         }
7134
7135         /*
7136          * Update cursor state *after* programming all the planes.
7137          * This avoids redundant programming in the case where we're going
7138          * to be disabling a single plane - those pipes are being disabled.
7139          */
7140         if (acrtc_state->active_planes)
7141                 amdgpu_dm_commit_cursors(state);
7142
7143 cleanup:
7144         kfree(bundle);
7145 }
7146
7147 static void amdgpu_dm_commit_audio(struct drm_device *dev,
7148                                    struct drm_atomic_state *state)
7149 {
7150         struct amdgpu_device *adev = dev->dev_private;
7151         struct amdgpu_dm_connector *aconnector;
7152         struct drm_connector *connector;
7153         struct drm_connector_state *old_con_state, *new_con_state;
7154         struct drm_crtc_state *new_crtc_state;
7155         struct dm_crtc_state *new_dm_crtc_state;
7156         const struct dc_stream_status *status;
7157         int i, inst;
7158
7159         /* Notify device removals. */
7160         for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
7161                 if (old_con_state->crtc != new_con_state->crtc) {
7162                         /* CRTC changes require notification. */
7163                         goto notify;
7164                 }
7165
7166                 if (!new_con_state->crtc)
7167                         continue;
7168
7169                 new_crtc_state = drm_atomic_get_new_crtc_state(
7170                         state, new_con_state->crtc);
7171
7172                 if (!new_crtc_state)
7173                         continue;
7174
7175                 if (!drm_atomic_crtc_needs_modeset(new_crtc_state))
7176                         continue;
7177
7178         notify:
7179                 aconnector = to_amdgpu_dm_connector(connector);
7180
7181                 mutex_lock(&adev->dm.audio_lock);
7182                 inst = aconnector->audio_inst;
7183                 aconnector->audio_inst = -1;
7184                 mutex_unlock(&adev->dm.audio_lock);
7185
7186                 amdgpu_dm_audio_eld_notify(adev, inst);
7187         }
7188
7189         /* Notify audio device additions. */
7190         for_each_new_connector_in_state(state, connector, new_con_state, i) {
7191                 if (!new_con_state->crtc)
7192                         continue;
7193
7194                 new_crtc_state = drm_atomic_get_new_crtc_state(
7195                         state, new_con_state->crtc);
7196
7197                 if (!new_crtc_state)
7198                         continue;
7199
7200                 if (!drm_atomic_crtc_needs_modeset(new_crtc_state))
7201                         continue;
7202
7203                 new_dm_crtc_state = to_dm_crtc_state(new_crtc_state);
7204                 if (!new_dm_crtc_state->stream)
7205                         continue;
7206
7207                 status = dc_stream_get_status(new_dm_crtc_state->stream);
7208                 if (!status)
7209                         continue;
7210
7211                 aconnector = to_amdgpu_dm_connector(connector);
7212
7213                 mutex_lock(&adev->dm.audio_lock);
7214                 inst = status->audio_inst;
7215                 aconnector->audio_inst = inst;
7216                 mutex_unlock(&adev->dm.audio_lock);
7217
7218                 amdgpu_dm_audio_eld_notify(adev, inst);
7219         }
7220 }
7221
7222 /*
7223  * Enable interrupts on CRTCs that are newly active, undergone
7224  * a modeset, or have active planes again.
7225  *
7226  * Done in two passes, based on the for_modeset flag:
7227  * Pass 1: For CRTCs going through modeset
7228  * Pass 2: For CRTCs going from 0 to n active planes
7229  *
7230  * Interrupts can only be enabled after the planes are programmed,
7231  * so this requires a two-pass approach since we don't want to
7232  * just defer the interrupts until after commit planes every time.
7233  */
7234 static void amdgpu_dm_enable_crtc_interrupts(struct drm_device *dev,
7235                                              struct drm_atomic_state *state,
7236                                              bool for_modeset)
7237 {
7238         struct amdgpu_device *adev = dev->dev_private;
7239         struct drm_crtc *crtc;
7240         struct drm_crtc_state *old_crtc_state, *new_crtc_state;
7241         int i;
7242 #ifdef CONFIG_DEBUG_FS
7243         enum amdgpu_dm_pipe_crc_source source;
7244 #endif
7245
7246         for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state,
7247                                       new_crtc_state, i) {
7248                 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
7249                 struct dm_crtc_state *dm_new_crtc_state =
7250                         to_dm_crtc_state(new_crtc_state);
7251                 struct dm_crtc_state *dm_old_crtc_state =
7252                         to_dm_crtc_state(old_crtc_state);
7253                 bool modeset = drm_atomic_crtc_needs_modeset(new_crtc_state);
7254                 bool run_pass;
7255
7256                 run_pass = (for_modeset && modeset) ||
7257                            (!for_modeset && !modeset &&
7258                             !dm_old_crtc_state->interrupts_enabled);
7259
7260                 if (!run_pass)
7261                         continue;
7262
7263                 if (!dm_new_crtc_state->interrupts_enabled)
7264                         continue;
7265
7266                 manage_dm_interrupts(adev, acrtc, true);
7267
7268 #ifdef CONFIG_DEBUG_FS
7269                 /* The stream has changed so CRC capture needs to re-enabled. */
7270                 source = dm_new_crtc_state->crc_src;
7271                 if (amdgpu_dm_is_valid_crc_source(source)) {
7272                         amdgpu_dm_crtc_configure_crc_source(
7273                                 crtc, dm_new_crtc_state,
7274                                 dm_new_crtc_state->crc_src);
7275                 }
7276 #endif
7277         }
7278 }
7279
7280 /*
7281  * amdgpu_dm_crtc_copy_transient_flags - copy mirrored flags from DRM to DC
7282  * @crtc_state: the DRM CRTC state
7283  * @stream_state: the DC stream state.
7284  *
7285  * Copy the mirrored transient state flags from DRM, to DC. It is used to bring
7286  * a dc_stream_state's flags in sync with a drm_crtc_state's flags.
7287  */
7288 static void amdgpu_dm_crtc_copy_transient_flags(struct drm_crtc_state *crtc_state,
7289                                                 struct dc_stream_state *stream_state)
7290 {
7291         stream_state->mode_changed = drm_atomic_crtc_needs_modeset(crtc_state);
7292 }
7293
7294 static int amdgpu_dm_atomic_commit(struct drm_device *dev,
7295                                    struct drm_atomic_state *state,
7296                                    bool nonblock)
7297 {
7298         struct drm_crtc *crtc;
7299         struct drm_crtc_state *old_crtc_state, *new_crtc_state;
7300         struct amdgpu_device *adev = dev->dev_private;
7301         int i;
7302
7303         /*
7304          * We evade vblank and pflip interrupts on CRTCs that are undergoing
7305          * a modeset, being disabled, or have no active planes.
7306          *
7307          * It's done in atomic commit rather than commit tail for now since
7308          * some of these interrupt handlers access the current CRTC state and
7309          * potentially the stream pointer itself.
7310          *
7311          * Since the atomic state is swapped within atomic commit and not within
7312          * commit tail this would leave to new state (that hasn't been committed yet)
7313          * being accesssed from within the handlers.
7314          *
7315          * TODO: Fix this so we can do this in commit tail and not have to block
7316          * in atomic check.
7317          */
7318         for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
7319                 struct dm_crtc_state *dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
7320                 struct dm_crtc_state *dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
7321                 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
7322
7323                 if (dm_old_crtc_state->interrupts_enabled &&
7324                     (!dm_new_crtc_state->interrupts_enabled ||
7325                      drm_atomic_crtc_needs_modeset(new_crtc_state)))
7326                         manage_dm_interrupts(adev, acrtc, false);
7327         }
7328         /*
7329          * Add check here for SoC's that support hardware cursor plane, to
7330          * unset legacy_cursor_update
7331          */
7332
7333         return drm_atomic_helper_commit(dev, state, nonblock);
7334
7335         /*TODO Handle EINTR, reenable IRQ*/
7336 }
7337
7338 /**
7339  * amdgpu_dm_atomic_commit_tail() - AMDgpu DM's commit tail implementation.
7340  * @state: The atomic state to commit
7341  *
7342  * This will tell DC to commit the constructed DC state from atomic_check,
7343  * programming the hardware. Any failures here implies a hardware failure, since
7344  * atomic check should have filtered anything non-kosher.
7345  */
7346 static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state)
7347 {
7348         struct drm_device *dev = state->dev;
7349         struct amdgpu_device *adev = dev->dev_private;
7350         struct amdgpu_display_manager *dm = &adev->dm;
7351         struct dm_atomic_state *dm_state;
7352         struct dc_state *dc_state = NULL, *dc_state_temp = NULL;
7353         uint32_t i, j;
7354         struct drm_crtc *crtc;
7355         struct drm_crtc_state *old_crtc_state, *new_crtc_state;
7356         unsigned long flags;
7357         bool wait_for_vblank = true;
7358         struct drm_connector *connector;
7359         struct drm_connector_state *old_con_state, *new_con_state;
7360         struct dm_crtc_state *dm_old_crtc_state, *dm_new_crtc_state;
7361         int crtc_disable_count = 0;
7362
7363         drm_atomic_helper_update_legacy_modeset_state(dev, state);
7364
7365         dm_state = dm_atomic_get_new_state(state);
7366         if (dm_state && dm_state->context) {
7367                 dc_state = dm_state->context;
7368         } else {
7369                 /* No state changes, retain current state. */
7370                 dc_state_temp = dc_create_state(dm->dc);
7371                 ASSERT(dc_state_temp);
7372                 dc_state = dc_state_temp;
7373                 dc_resource_state_copy_construct_current(dm->dc, dc_state);
7374         }
7375
7376         /* update changed items */
7377         for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
7378                 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
7379
7380                 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
7381                 dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
7382
7383                 DRM_DEBUG_DRIVER(
7384                         "amdgpu_crtc id:%d crtc_state_flags: enable:%d, active:%d, "
7385                         "planes_changed:%d, mode_changed:%d,active_changed:%d,"
7386                         "connectors_changed:%d\n",
7387                         acrtc->crtc_id,
7388                         new_crtc_state->enable,
7389                         new_crtc_state->active,
7390                         new_crtc_state->planes_changed,
7391                         new_crtc_state->mode_changed,
7392                         new_crtc_state->active_changed,
7393                         new_crtc_state->connectors_changed);
7394
7395                 /* Copy all transient state flags into dc state */
7396                 if (dm_new_crtc_state->stream) {
7397                         amdgpu_dm_crtc_copy_transient_flags(&dm_new_crtc_state->base,
7398                                                             dm_new_crtc_state->stream);
7399                 }
7400
7401                 /* handles headless hotplug case, updating new_state and
7402                  * aconnector as needed
7403                  */
7404
7405                 if (modeset_required(new_crtc_state, dm_new_crtc_state->stream, dm_old_crtc_state->stream)) {
7406
7407                         DRM_DEBUG_DRIVER("Atomic commit: SET crtc id %d: [%p]\n", acrtc->crtc_id, acrtc);
7408
7409                         if (!dm_new_crtc_state->stream) {
7410                                 /*
7411                                  * this could happen because of issues with
7412                                  * userspace notifications delivery.
7413                                  * In this case userspace tries to set mode on
7414                                  * display which is disconnected in fact.
7415                                  * dc_sink is NULL in this case on aconnector.
7416                                  * We expect reset mode will come soon.
7417                                  *
7418                                  * This can also happen when unplug is done
7419                                  * during resume sequence ended
7420                                  *
7421                                  * In this case, we want to pretend we still
7422                                  * have a sink to keep the pipe running so that
7423                                  * hw state is consistent with the sw state
7424                                  */
7425                                 DRM_DEBUG_DRIVER("%s: Failed to create new stream for crtc %d\n",
7426                                                 __func__, acrtc->base.base.id);
7427                                 continue;
7428                         }
7429
7430                         if (dm_old_crtc_state->stream)
7431                                 remove_stream(adev, acrtc, dm_old_crtc_state->stream);
7432
7433                         pm_runtime_get_noresume(dev->dev);
7434
7435                         acrtc->enabled = true;
7436                         acrtc->hw_mode = new_crtc_state->mode;
7437                         crtc->hwmode = new_crtc_state->mode;
7438                 } else if (modereset_required(new_crtc_state)) {
7439                         DRM_DEBUG_DRIVER("Atomic commit: RESET. crtc id %d:[%p]\n", acrtc->crtc_id, acrtc);
7440                         /* i.e. reset mode */
7441                         if (dm_old_crtc_state->stream) {
7442                                 if (dm_old_crtc_state->stream->link->psr_settings.psr_allow_active)
7443                                         amdgpu_dm_psr_disable(dm_old_crtc_state->stream);
7444
7445                                 remove_stream(adev, acrtc, dm_old_crtc_state->stream);
7446                         }
7447                 }
7448         } /* for_each_crtc_in_state() */
7449
7450         if (dc_state) {
7451                 dm_enable_per_frame_crtc_master_sync(dc_state);
7452                 mutex_lock(&dm->dc_lock);
7453                 WARN_ON(!dc_commit_state(dm->dc, dc_state));
7454                 mutex_unlock(&dm->dc_lock);
7455         }
7456
7457         for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) {
7458                 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
7459
7460                 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
7461
7462                 if (dm_new_crtc_state->stream != NULL) {
7463                         const struct dc_stream_status *status =
7464                                         dc_stream_get_status(dm_new_crtc_state->stream);
7465
7466                         if (!status)
7467                                 status = dc_stream_get_status_from_state(dc_state,
7468                                                                          dm_new_crtc_state->stream);
7469
7470                         if (!status)
7471                                 DC_ERR("got no status for stream %p on acrtc%p\n", dm_new_crtc_state->stream, acrtc);
7472                         else
7473                                 acrtc->otg_inst = status->primary_otg_inst;
7474                 }
7475         }
7476 #ifdef CONFIG_DRM_AMD_DC_HDCP
7477         for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
7478                 struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state);
7479                 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(dm_new_con_state->base.crtc);
7480                 struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
7481
7482                 new_crtc_state = NULL;
7483
7484                 if (acrtc)
7485                         new_crtc_state = drm_atomic_get_new_crtc_state(state, &acrtc->base);
7486
7487                 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
7488
7489                 if (dm_new_crtc_state && dm_new_crtc_state->stream == NULL &&
7490                     connector->state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED) {
7491                         hdcp_reset_display(adev->dm.hdcp_workqueue, aconnector->dc_link->link_index);
7492                         new_con_state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED;
7493                         continue;
7494                 }
7495
7496                 if (is_content_protection_different(new_con_state, old_con_state, connector, adev->dm.hdcp_workqueue))
7497                         hdcp_update_display(
7498                                 adev->dm.hdcp_workqueue, aconnector->dc_link->link_index, aconnector,
7499                                 new_con_state->hdcp_content_type,
7500                                 new_con_state->content_protection == DRM_MODE_CONTENT_PROTECTION_DESIRED ? true
7501                                                                                                          : false);
7502         }
7503 #endif
7504
7505         /* Handle connector state changes */
7506         for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
7507                 struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state);
7508                 struct dm_connector_state *dm_old_con_state = to_dm_connector_state(old_con_state);
7509                 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(dm_new_con_state->base.crtc);
7510                 struct dc_surface_update dummy_updates[MAX_SURFACES];
7511                 struct dc_stream_update stream_update;
7512                 struct dc_info_packet hdr_packet;
7513                 struct dc_stream_status *status = NULL;
7514                 bool abm_changed, hdr_changed, scaling_changed;
7515
7516                 memset(&dummy_updates, 0, sizeof(dummy_updates));
7517                 memset(&stream_update, 0, sizeof(stream_update));
7518
7519                 if (acrtc) {
7520                         new_crtc_state = drm_atomic_get_new_crtc_state(state, &acrtc->base);
7521                         old_crtc_state = drm_atomic_get_old_crtc_state(state, &acrtc->base);
7522                 }
7523
7524                 /* Skip any modesets/resets */
7525                 if (!acrtc || drm_atomic_crtc_needs_modeset(new_crtc_state))
7526                         continue;
7527
7528                 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
7529                 dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
7530
7531                 scaling_changed = is_scaling_state_different(dm_new_con_state,
7532                                                              dm_old_con_state);
7533
7534                 abm_changed = dm_new_crtc_state->abm_level !=
7535                               dm_old_crtc_state->abm_level;
7536
7537                 hdr_changed =
7538                         is_hdr_metadata_different(old_con_state, new_con_state);
7539
7540                 if (!scaling_changed && !abm_changed && !hdr_changed)
7541                         continue;
7542
7543                 stream_update.stream = dm_new_crtc_state->stream;
7544                 if (scaling_changed) {
7545                         update_stream_scaling_settings(&dm_new_con_state->base.crtc->mode,
7546                                         dm_new_con_state, dm_new_crtc_state->stream);
7547
7548                         stream_update.src = dm_new_crtc_state->stream->src;
7549                         stream_update.dst = dm_new_crtc_state->stream->dst;
7550                 }
7551
7552                 if (abm_changed) {
7553                         dm_new_crtc_state->stream->abm_level = dm_new_crtc_state->abm_level;
7554
7555                         stream_update.abm_level = &dm_new_crtc_state->abm_level;
7556                 }
7557
7558                 if (hdr_changed) {
7559                         fill_hdr_info_packet(new_con_state, &hdr_packet);
7560                         stream_update.hdr_static_metadata = &hdr_packet;
7561                 }
7562
7563                 status = dc_stream_get_status(dm_new_crtc_state->stream);
7564                 WARN_ON(!status);
7565                 WARN_ON(!status->plane_count);
7566
7567                 /*
7568                  * TODO: DC refuses to perform stream updates without a dc_surface_update.
7569                  * Here we create an empty update on each plane.
7570                  * To fix this, DC should permit updating only stream properties.
7571                  */
7572                 for (j = 0; j < status->plane_count; j++)
7573                         dummy_updates[j].surface = status->plane_states[0];
7574
7575
7576                 mutex_lock(&dm->dc_lock);
7577                 dc_commit_updates_for_stream(dm->dc,
7578                                                      dummy_updates,
7579                                                      status->plane_count,
7580                                                      dm_new_crtc_state->stream,
7581                                                      &stream_update,
7582                                                      dc_state);
7583                 mutex_unlock(&dm->dc_lock);
7584         }
7585
7586         /* Count number of newly disabled CRTCs for dropping PM refs later. */
7587         for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state,
7588                                       new_crtc_state, i) {
7589                 if (old_crtc_state->active && !new_crtc_state->active)
7590                         crtc_disable_count++;
7591
7592                 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
7593                 dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
7594
7595                 /* Update freesync active state. */
7596                 pre_update_freesync_state_on_stream(dm, dm_new_crtc_state);
7597
7598                 /* Handle vrr on->off / off->on transitions */
7599                 amdgpu_dm_handle_vrr_transition(dm_old_crtc_state,
7600                                                 dm_new_crtc_state);
7601         }
7602
7603         /* Enable interrupts for CRTCs going through a modeset. */
7604         amdgpu_dm_enable_crtc_interrupts(dev, state, true);
7605
7606         for_each_new_crtc_in_state(state, crtc, new_crtc_state, j)
7607                 if (new_crtc_state->async_flip)
7608                         wait_for_vblank = false;
7609
7610         /* update planes when needed per crtc*/
7611         for_each_new_crtc_in_state(state, crtc, new_crtc_state, j) {
7612                 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
7613
7614                 if (dm_new_crtc_state->stream)
7615                         amdgpu_dm_commit_planes(state, dc_state, dev,
7616                                                 dm, crtc, wait_for_vblank);
7617         }
7618
7619         /* Enable interrupts for CRTCs going from 0 to n active planes. */
7620         amdgpu_dm_enable_crtc_interrupts(dev, state, false);
7621
7622         /* Update audio instances for each connector. */
7623         amdgpu_dm_commit_audio(dev, state);
7624
7625         /*
7626          * send vblank event on all events not handled in flip and
7627          * mark consumed event for drm_atomic_helper_commit_hw_done
7628          */
7629         spin_lock_irqsave(&adev->ddev->event_lock, flags);
7630         for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) {
7631
7632                 if (new_crtc_state->event)
7633                         drm_send_event_locked(dev, &new_crtc_state->event->base);
7634
7635                 new_crtc_state->event = NULL;
7636         }
7637         spin_unlock_irqrestore(&adev->ddev->event_lock, flags);
7638
7639         /* Signal HW programming completion */
7640         drm_atomic_helper_commit_hw_done(state);
7641
7642         if (wait_for_vblank)
7643                 drm_atomic_helper_wait_for_flip_done(dev, state);
7644
7645         drm_atomic_helper_cleanup_planes(dev, state);
7646
7647         /*
7648          * Finally, drop a runtime PM reference for each newly disabled CRTC,
7649          * so we can put the GPU into runtime suspend if we're not driving any
7650          * displays anymore
7651          */
7652         for (i = 0; i < crtc_disable_count; i++)
7653                 pm_runtime_put_autosuspend(dev->dev);
7654         pm_runtime_mark_last_busy(dev->dev);
7655
7656         if (dc_state_temp)
7657                 dc_release_state(dc_state_temp);
7658 }
7659
7660
7661 static int dm_force_atomic_commit(struct drm_connector *connector)
7662 {
7663         int ret = 0;
7664         struct drm_device *ddev = connector->dev;
7665         struct drm_atomic_state *state = drm_atomic_state_alloc(ddev);
7666         struct amdgpu_crtc *disconnected_acrtc = to_amdgpu_crtc(connector->encoder->crtc);
7667         struct drm_plane *plane = disconnected_acrtc->base.primary;
7668         struct drm_connector_state *conn_state;
7669         struct drm_crtc_state *crtc_state;
7670         struct drm_plane_state *plane_state;
7671
7672         if (!state)
7673                 return -ENOMEM;
7674
7675         state->acquire_ctx = ddev->mode_config.acquire_ctx;
7676
7677         /* Construct an atomic state to restore previous display setting */
7678
7679         /*
7680          * Attach connectors to drm_atomic_state
7681          */
7682         conn_state = drm_atomic_get_connector_state(state, connector);
7683
7684         ret = PTR_ERR_OR_ZERO(conn_state);
7685         if (ret)
7686                 goto err;
7687
7688         /* Attach crtc to drm_atomic_state*/
7689         crtc_state = drm_atomic_get_crtc_state(state, &disconnected_acrtc->base);
7690
7691         ret = PTR_ERR_OR_ZERO(crtc_state);
7692         if (ret)
7693                 goto err;
7694
7695         /* force a restore */
7696         crtc_state->mode_changed = true;
7697
7698         /* Attach plane to drm_atomic_state */
7699         plane_state = drm_atomic_get_plane_state(state, plane);
7700
7701         ret = PTR_ERR_OR_ZERO(plane_state);
7702         if (ret)
7703                 goto err;
7704
7705
7706         /* Call commit internally with the state we just constructed */
7707         ret = drm_atomic_commit(state);
7708         if (!ret)
7709                 return 0;
7710
7711 err:
7712         DRM_ERROR("Restoring old state failed with %i\n", ret);
7713         drm_atomic_state_put(state);
7714
7715         return ret;
7716 }
7717
7718 /*
7719  * This function handles all cases when set mode does not come upon hotplug.
7720  * This includes when a display is unplugged then plugged back into the
7721  * same port and when running without usermode desktop manager supprot
7722  */
7723 void dm_restore_drm_connector_state(struct drm_device *dev,
7724                                     struct drm_connector *connector)
7725 {
7726         struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
7727         struct amdgpu_crtc *disconnected_acrtc;
7728         struct dm_crtc_state *acrtc_state;
7729
7730         if (!aconnector->dc_sink || !connector->state || !connector->encoder)
7731                 return;
7732
7733         disconnected_acrtc = to_amdgpu_crtc(connector->encoder->crtc);
7734         if (!disconnected_acrtc)
7735                 return;
7736
7737         acrtc_state = to_dm_crtc_state(disconnected_acrtc->base.state);
7738         if (!acrtc_state->stream)
7739                 return;
7740
7741         /*
7742          * If the previous sink is not released and different from the current,
7743          * we deduce we are in a state where we can not rely on usermode call
7744          * to turn on the display, so we do it here
7745          */
7746         if (acrtc_state->stream->sink != aconnector->dc_sink)
7747                 dm_force_atomic_commit(&aconnector->base);
7748 }
7749
7750 /*
7751  * Grabs all modesetting locks to serialize against any blocking commits,
7752  * Waits for completion of all non blocking commits.
7753  */
7754 static int do_aquire_global_lock(struct drm_device *dev,
7755                                  struct drm_atomic_state *state)
7756 {
7757         struct drm_crtc *crtc;
7758         struct drm_crtc_commit *commit;
7759         long ret;
7760
7761         /*
7762          * Adding all modeset locks to aquire_ctx will
7763          * ensure that when the framework release it the
7764          * extra locks we are locking here will get released to
7765          */
7766         ret = drm_modeset_lock_all_ctx(dev, state->acquire_ctx);
7767         if (ret)
7768                 return ret;
7769
7770         list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
7771                 spin_lock(&crtc->commit_lock);
7772                 commit = list_first_entry_or_null(&crtc->commit_list,
7773                                 struct drm_crtc_commit, commit_entry);
7774                 if (commit)
7775                         drm_crtc_commit_get(commit);
7776                 spin_unlock(&crtc->commit_lock);
7777
7778                 if (!commit)
7779                         continue;
7780
7781                 /*
7782                  * Make sure all pending HW programming completed and
7783                  * page flips done
7784                  */
7785                 ret = wait_for_completion_interruptible_timeout(&commit->hw_done, 10*HZ);
7786
7787                 if (ret > 0)
7788                         ret = wait_for_completion_interruptible_timeout(
7789                                         &commit->flip_done, 10*HZ);
7790
7791                 if (ret == 0)
7792                         DRM_ERROR("[CRTC:%d:%s] hw_done or flip_done "
7793                                   "timed out\n", crtc->base.id, crtc->name);
7794
7795                 drm_crtc_commit_put(commit);
7796         }
7797
7798         return ret < 0 ? ret : 0;
7799 }
7800
7801 static void get_freesync_config_for_crtc(
7802         struct dm_crtc_state *new_crtc_state,
7803         struct dm_connector_state *new_con_state)
7804 {
7805         struct mod_freesync_config config = {0};
7806         struct amdgpu_dm_connector *aconnector =
7807                         to_amdgpu_dm_connector(new_con_state->base.connector);
7808         struct drm_display_mode *mode = &new_crtc_state->base.mode;
7809         int vrefresh = drm_mode_vrefresh(mode);
7810
7811         new_crtc_state->vrr_supported = new_con_state->freesync_capable &&
7812                                         vrefresh >= aconnector->min_vfreq &&
7813                                         vrefresh <= aconnector->max_vfreq;
7814
7815         if (new_crtc_state->vrr_supported) {
7816                 new_crtc_state->stream->ignore_msa_timing_param = true;
7817                 config.state = new_crtc_state->base.vrr_enabled ?
7818                                 VRR_STATE_ACTIVE_VARIABLE :
7819                                 VRR_STATE_INACTIVE;
7820                 config.min_refresh_in_uhz =
7821                                 aconnector->min_vfreq * 1000000;
7822                 config.max_refresh_in_uhz =
7823                                 aconnector->max_vfreq * 1000000;
7824                 config.vsif_supported = true;
7825                 config.btr = true;
7826         }
7827
7828         new_crtc_state->freesync_config = config;
7829 }
7830
7831 static void reset_freesync_config_for_crtc(
7832         struct dm_crtc_state *new_crtc_state)
7833 {
7834         new_crtc_state->vrr_supported = false;
7835
7836         memset(&new_crtc_state->vrr_params, 0,
7837                sizeof(new_crtc_state->vrr_params));
7838         memset(&new_crtc_state->vrr_infopacket, 0,
7839                sizeof(new_crtc_state->vrr_infopacket));
7840 }
7841
7842 static int dm_update_crtc_state(struct amdgpu_display_manager *dm,
7843                                 struct drm_atomic_state *state,
7844                                 struct drm_crtc *crtc,
7845                                 struct drm_crtc_state *old_crtc_state,
7846                                 struct drm_crtc_state *new_crtc_state,
7847                                 bool enable,
7848                                 bool *lock_and_validation_needed)
7849 {
7850         struct dm_atomic_state *dm_state = NULL;
7851         struct dm_crtc_state *dm_old_crtc_state, *dm_new_crtc_state;
7852         struct dc_stream_state *new_stream;
7853         int ret = 0;
7854
7855         /*
7856          * TODO Move this code into dm_crtc_atomic_check once we get rid of dc_validation_set
7857          * update changed items
7858          */
7859         struct amdgpu_crtc *acrtc = NULL;
7860         struct amdgpu_dm_connector *aconnector = NULL;
7861         struct drm_connector_state *drm_new_conn_state = NULL, *drm_old_conn_state = NULL;
7862         struct dm_connector_state *dm_new_conn_state = NULL, *dm_old_conn_state = NULL;
7863
7864         new_stream = NULL;
7865
7866         dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
7867         dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
7868         acrtc = to_amdgpu_crtc(crtc);
7869         aconnector = amdgpu_dm_find_first_crtc_matching_connector(state, crtc);
7870
7871         /* TODO This hack should go away */
7872         if (aconnector && enable) {
7873                 /* Make sure fake sink is created in plug-in scenario */
7874                 drm_new_conn_state = drm_atomic_get_new_connector_state(state,
7875                                                             &aconnector->base);
7876                 drm_old_conn_state = drm_atomic_get_old_connector_state(state,
7877                                                             &aconnector->base);
7878
7879                 if (IS_ERR(drm_new_conn_state)) {
7880                         ret = PTR_ERR_OR_ZERO(drm_new_conn_state);
7881                         goto fail;
7882                 }
7883
7884                 dm_new_conn_state = to_dm_connector_state(drm_new_conn_state);
7885                 dm_old_conn_state = to_dm_connector_state(drm_old_conn_state);
7886
7887                 if (!drm_atomic_crtc_needs_modeset(new_crtc_state))
7888                         goto skip_modeset;
7889
7890                 new_stream = create_validate_stream_for_sink(aconnector,
7891                                                              &new_crtc_state->mode,
7892                                                              dm_new_conn_state,
7893                                                              dm_old_crtc_state->stream);
7894
7895                 /*
7896                  * we can have no stream on ACTION_SET if a display
7897                  * was disconnected during S3, in this case it is not an
7898                  * error, the OS will be updated after detection, and
7899                  * will do the right thing on next atomic commit
7900                  */
7901
7902                 if (!new_stream) {
7903                         DRM_DEBUG_DRIVER("%s: Failed to create new stream for crtc %d\n",
7904                                         __func__, acrtc->base.base.id);
7905                         ret = -ENOMEM;
7906                         goto fail;
7907                 }
7908
7909                 dm_new_crtc_state->abm_level = dm_new_conn_state->abm_level;
7910
7911                 ret = fill_hdr_info_packet(drm_new_conn_state,
7912                                            &new_stream->hdr_static_metadata);
7913                 if (ret)
7914                         goto fail;
7915
7916                 /*
7917                  * If we already removed the old stream from the context
7918                  * (and set the new stream to NULL) then we can't reuse
7919                  * the old stream even if the stream and scaling are unchanged.
7920                  * We'll hit the BUG_ON and black screen.
7921                  *
7922                  * TODO: Refactor this function to allow this check to work
7923                  * in all conditions.
7924                  */
7925                 if (dm_new_crtc_state->stream &&
7926                     dc_is_stream_unchanged(new_stream, dm_old_crtc_state->stream) &&
7927                     dc_is_stream_scaling_unchanged(new_stream, dm_old_crtc_state->stream)) {
7928                         new_crtc_state->mode_changed = false;
7929                         DRM_DEBUG_DRIVER("Mode change not required, setting mode_changed to %d",
7930                                          new_crtc_state->mode_changed);
7931                 }
7932         }
7933
7934         /* mode_changed flag may get updated above, need to check again */
7935         if (!drm_atomic_crtc_needs_modeset(new_crtc_state))
7936                 goto skip_modeset;
7937
7938         DRM_DEBUG_DRIVER(
7939                 "amdgpu_crtc id:%d crtc_state_flags: enable:%d, active:%d, "
7940                 "planes_changed:%d, mode_changed:%d,active_changed:%d,"
7941                 "connectors_changed:%d\n",
7942                 acrtc->crtc_id,
7943                 new_crtc_state->enable,
7944                 new_crtc_state->active,
7945                 new_crtc_state->planes_changed,
7946                 new_crtc_state->mode_changed,
7947                 new_crtc_state->active_changed,
7948                 new_crtc_state->connectors_changed);
7949
7950         /* Remove stream for any changed/disabled CRTC */
7951         if (!enable) {
7952
7953                 if (!dm_old_crtc_state->stream)
7954                         goto skip_modeset;
7955
7956                 ret = dm_atomic_get_state(state, &dm_state);
7957                 if (ret)
7958                         goto fail;
7959
7960                 DRM_DEBUG_DRIVER("Disabling DRM crtc: %d\n",
7961                                 crtc->base.id);
7962
7963                 /* i.e. reset mode */
7964                 if (dc_remove_stream_from_ctx(
7965                                 dm->dc,
7966                                 dm_state->context,
7967                                 dm_old_crtc_state->stream) != DC_OK) {
7968                         ret = -EINVAL;
7969                         goto fail;
7970                 }
7971
7972                 dc_stream_release(dm_old_crtc_state->stream);
7973                 dm_new_crtc_state->stream = NULL;
7974
7975                 reset_freesync_config_for_crtc(dm_new_crtc_state);
7976
7977                 *lock_and_validation_needed = true;
7978
7979         } else {/* Add stream for any updated/enabled CRTC */
7980                 /*
7981                  * Quick fix to prevent NULL pointer on new_stream when
7982                  * added MST connectors not found in existing crtc_state in the chained mode
7983                  * TODO: need to dig out the root cause of that
7984                  */
7985                 if (!aconnector || (!aconnector->dc_sink && aconnector->mst_port))
7986                         goto skip_modeset;
7987
7988                 if (modereset_required(new_crtc_state))
7989                         goto skip_modeset;
7990
7991                 if (modeset_required(new_crtc_state, new_stream,
7992                                      dm_old_crtc_state->stream)) {
7993
7994                         WARN_ON(dm_new_crtc_state->stream);
7995
7996                         ret = dm_atomic_get_state(state, &dm_state);
7997                         if (ret)
7998                                 goto fail;
7999
8000                         dm_new_crtc_state->stream = new_stream;
8001
8002                         dc_stream_retain(new_stream);
8003
8004                         DRM_DEBUG_DRIVER("Enabling DRM crtc: %d\n",
8005                                                 crtc->base.id);
8006
8007                         if (dc_add_stream_to_ctx(
8008                                         dm->dc,
8009                                         dm_state->context,
8010                                         dm_new_crtc_state->stream) != DC_OK) {
8011                                 ret = -EINVAL;
8012                                 goto fail;
8013                         }
8014
8015                         *lock_and_validation_needed = true;
8016                 }
8017         }
8018
8019 skip_modeset:
8020         /* Release extra reference */
8021         if (new_stream)
8022                  dc_stream_release(new_stream);
8023
8024         /*
8025          * We want to do dc stream updates that do not require a
8026          * full modeset below.
8027          */
8028         if (!(enable && aconnector && new_crtc_state->enable &&
8029               new_crtc_state->active))
8030                 return 0;
8031         /*
8032          * Given above conditions, the dc state cannot be NULL because:
8033          * 1. We're in the process of enabling CRTCs (just been added
8034          *    to the dc context, or already is on the context)
8035          * 2. Has a valid connector attached, and
8036          * 3. Is currently active and enabled.
8037          * => The dc stream state currently exists.
8038          */
8039         BUG_ON(dm_new_crtc_state->stream == NULL);
8040
8041         /* Scaling or underscan settings */
8042         if (is_scaling_state_different(dm_old_conn_state, dm_new_conn_state))
8043                 update_stream_scaling_settings(
8044                         &new_crtc_state->mode, dm_new_conn_state, dm_new_crtc_state->stream);
8045
8046         /* ABM settings */
8047         dm_new_crtc_state->abm_level = dm_new_conn_state->abm_level;
8048
8049         /*
8050          * Color management settings. We also update color properties
8051          * when a modeset is needed, to ensure it gets reprogrammed.
8052          */
8053         if (dm_new_crtc_state->base.color_mgmt_changed ||
8054             drm_atomic_crtc_needs_modeset(new_crtc_state)) {
8055                 ret = amdgpu_dm_update_crtc_color_mgmt(dm_new_crtc_state);
8056                 if (ret)
8057                         goto fail;
8058         }
8059
8060         /* Update Freesync settings. */
8061         get_freesync_config_for_crtc(dm_new_crtc_state,
8062                                      dm_new_conn_state);
8063
8064         return ret;
8065
8066 fail:
8067         if (new_stream)
8068                 dc_stream_release(new_stream);
8069         return ret;
8070 }
8071
8072 static bool should_reset_plane(struct drm_atomic_state *state,
8073                                struct drm_plane *plane,
8074                                struct drm_plane_state *old_plane_state,
8075                                struct drm_plane_state *new_plane_state)
8076 {
8077         struct drm_plane *other;
8078         struct drm_plane_state *old_other_state, *new_other_state;
8079         struct drm_crtc_state *new_crtc_state;
8080         int i;
8081
8082         /*
8083          * TODO: Remove this hack once the checks below are sufficient
8084          * enough to determine when we need to reset all the planes on
8085          * the stream.
8086          */
8087         if (state->allow_modeset)
8088                 return true;
8089
8090         /* Exit early if we know that we're adding or removing the plane. */
8091         if (old_plane_state->crtc != new_plane_state->crtc)
8092                 return true;
8093
8094         /* old crtc == new_crtc == NULL, plane not in context. */
8095         if (!new_plane_state->crtc)
8096                 return false;
8097
8098         new_crtc_state =
8099                 drm_atomic_get_new_crtc_state(state, new_plane_state->crtc);
8100
8101         if (!new_crtc_state)
8102                 return true;
8103
8104         /* CRTC Degamma changes currently require us to recreate planes. */
8105         if (new_crtc_state->color_mgmt_changed)
8106                 return true;
8107
8108         if (drm_atomic_crtc_needs_modeset(new_crtc_state))
8109                 return true;
8110
8111         /*
8112          * If there are any new primary or overlay planes being added or
8113          * removed then the z-order can potentially change. To ensure
8114          * correct z-order and pipe acquisition the current DC architecture
8115          * requires us to remove and recreate all existing planes.
8116          *
8117          * TODO: Come up with a more elegant solution for this.
8118          */
8119         for_each_oldnew_plane_in_state(state, other, old_other_state, new_other_state, i) {
8120                 if (other->type == DRM_PLANE_TYPE_CURSOR)
8121                         continue;
8122
8123                 if (old_other_state->crtc != new_plane_state->crtc &&
8124                     new_other_state->crtc != new_plane_state->crtc)
8125                         continue;
8126
8127                 if (old_other_state->crtc != new_other_state->crtc)
8128                         return true;
8129
8130                 /* TODO: Remove this once we can handle fast format changes. */
8131                 if (old_other_state->fb && new_other_state->fb &&
8132                     old_other_state->fb->format != new_other_state->fb->format)
8133                         return true;
8134         }
8135
8136         return false;
8137 }
8138
8139 static int dm_update_plane_state(struct dc *dc,
8140                                  struct drm_atomic_state *state,
8141                                  struct drm_plane *plane,
8142                                  struct drm_plane_state *old_plane_state,
8143                                  struct drm_plane_state *new_plane_state,
8144                                  bool enable,
8145                                  bool *lock_and_validation_needed)
8146 {
8147
8148         struct dm_atomic_state *dm_state = NULL;
8149         struct drm_crtc *new_plane_crtc, *old_plane_crtc;
8150         struct drm_crtc_state *old_crtc_state, *new_crtc_state;
8151         struct dm_crtc_state *dm_new_crtc_state, *dm_old_crtc_state;
8152         struct dm_plane_state *dm_new_plane_state, *dm_old_plane_state;
8153         struct amdgpu_crtc *new_acrtc;
8154         bool needs_reset;
8155         int ret = 0;
8156
8157
8158         new_plane_crtc = new_plane_state->crtc;
8159         old_plane_crtc = old_plane_state->crtc;
8160         dm_new_plane_state = to_dm_plane_state(new_plane_state);
8161         dm_old_plane_state = to_dm_plane_state(old_plane_state);
8162
8163         /*TODO Implement better atomic check for cursor plane */
8164         if (plane->type == DRM_PLANE_TYPE_CURSOR) {
8165                 if (!enable || !new_plane_crtc ||
8166                         drm_atomic_plane_disabling(plane->state, new_plane_state))
8167                         return 0;
8168
8169                 new_acrtc = to_amdgpu_crtc(new_plane_crtc);
8170
8171                 if ((new_plane_state->crtc_w > new_acrtc->max_cursor_width) ||
8172                         (new_plane_state->crtc_h > new_acrtc->max_cursor_height)) {
8173                         DRM_DEBUG_ATOMIC("Bad cursor size %d x %d\n",
8174                                                          new_plane_state->crtc_w, new_plane_state->crtc_h);
8175                         return -EINVAL;
8176                 }
8177
8178                 return 0;
8179         }
8180
8181         needs_reset = should_reset_plane(state, plane, old_plane_state,
8182                                          new_plane_state);
8183
8184         /* Remove any changed/removed planes */
8185         if (!enable) {
8186                 if (!needs_reset)
8187                         return 0;
8188
8189                 if (!old_plane_crtc)
8190                         return 0;
8191
8192                 old_crtc_state = drm_atomic_get_old_crtc_state(
8193                                 state, old_plane_crtc);
8194                 dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
8195
8196                 if (!dm_old_crtc_state->stream)
8197                         return 0;
8198
8199                 DRM_DEBUG_ATOMIC("Disabling DRM plane: %d on DRM crtc %d\n",
8200                                 plane->base.id, old_plane_crtc->base.id);
8201
8202                 ret = dm_atomic_get_state(state, &dm_state);
8203                 if (ret)
8204                         return ret;
8205
8206                 if (!dc_remove_plane_from_context(
8207                                 dc,
8208                                 dm_old_crtc_state->stream,
8209                                 dm_old_plane_state->dc_state,
8210                                 dm_state->context)) {
8211
8212                         ret = EINVAL;
8213                         return ret;
8214                 }
8215
8216
8217                 dc_plane_state_release(dm_old_plane_state->dc_state);
8218                 dm_new_plane_state->dc_state = NULL;
8219
8220                 *lock_and_validation_needed = true;
8221
8222         } else { /* Add new planes */
8223                 struct dc_plane_state *dc_new_plane_state;
8224
8225                 if (drm_atomic_plane_disabling(plane->state, new_plane_state))
8226                         return 0;
8227
8228                 if (!new_plane_crtc)
8229                         return 0;
8230
8231                 new_crtc_state = drm_atomic_get_new_crtc_state(state, new_plane_crtc);
8232                 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
8233
8234                 if (!dm_new_crtc_state->stream)
8235                         return 0;
8236
8237                 if (!needs_reset)
8238                         return 0;
8239
8240                 ret = dm_plane_helper_check_state(new_plane_state, new_crtc_state);
8241                 if (ret)
8242                         return ret;
8243
8244                 WARN_ON(dm_new_plane_state->dc_state);
8245
8246                 dc_new_plane_state = dc_create_plane_state(dc);
8247                 if (!dc_new_plane_state)
8248                         return -ENOMEM;
8249
8250                 DRM_DEBUG_DRIVER("Enabling DRM plane: %d on DRM crtc %d\n",
8251                                 plane->base.id, new_plane_crtc->base.id);
8252
8253                 ret = fill_dc_plane_attributes(
8254                         new_plane_crtc->dev->dev_private,
8255                         dc_new_plane_state,
8256                         new_plane_state,
8257                         new_crtc_state);
8258                 if (ret) {
8259                         dc_plane_state_release(dc_new_plane_state);
8260                         return ret;
8261                 }
8262
8263                 ret = dm_atomic_get_state(state, &dm_state);
8264                 if (ret) {
8265                         dc_plane_state_release(dc_new_plane_state);
8266                         return ret;
8267                 }
8268
8269                 /*
8270                  * Any atomic check errors that occur after this will
8271                  * not need a release. The plane state will be attached
8272                  * to the stream, and therefore part of the atomic
8273                  * state. It'll be released when the atomic state is
8274                  * cleaned.
8275                  */
8276                 if (!dc_add_plane_to_context(
8277                                 dc,
8278                                 dm_new_crtc_state->stream,
8279                                 dc_new_plane_state,
8280                                 dm_state->context)) {
8281
8282                         dc_plane_state_release(dc_new_plane_state);
8283                         return -EINVAL;
8284                 }
8285
8286                 dm_new_plane_state->dc_state = dc_new_plane_state;
8287
8288                 /* Tell DC to do a full surface update every time there
8289                  * is a plane change. Inefficient, but works for now.
8290                  */
8291                 dm_new_plane_state->dc_state->update_flags.bits.full_update = 1;
8292
8293                 *lock_and_validation_needed = true;
8294         }
8295
8296
8297         return ret;
8298 }
8299
8300 static int
8301 dm_determine_update_type_for_commit(struct amdgpu_display_manager *dm,
8302                                     struct drm_atomic_state *state,
8303                                     enum surface_update_type *out_type)
8304 {
8305         struct dc *dc = dm->dc;
8306         struct dm_atomic_state *dm_state = NULL, *old_dm_state = NULL;
8307         int i, j, num_plane, ret = 0;
8308         struct drm_plane_state *old_plane_state, *new_plane_state;
8309         struct dm_plane_state *new_dm_plane_state, *old_dm_plane_state;
8310         struct drm_crtc *new_plane_crtc;
8311         struct drm_plane *plane;
8312
8313         struct drm_crtc *crtc;
8314         struct drm_crtc_state *new_crtc_state, *old_crtc_state;
8315         struct dm_crtc_state *new_dm_crtc_state, *old_dm_crtc_state;
8316         struct dc_stream_status *status = NULL;
8317         enum surface_update_type update_type = UPDATE_TYPE_FAST;
8318         struct surface_info_bundle {
8319                 struct dc_surface_update surface_updates[MAX_SURFACES];
8320                 struct dc_plane_info plane_infos[MAX_SURFACES];
8321                 struct dc_scaling_info scaling_infos[MAX_SURFACES];
8322                 struct dc_flip_addrs flip_addrs[MAX_SURFACES];
8323                 struct dc_stream_update stream_update;
8324         } *bundle;
8325
8326         bundle = kzalloc(sizeof(*bundle), GFP_KERNEL);
8327
8328         if (!bundle) {
8329                 DRM_ERROR("Failed to allocate update bundle\n");
8330                 /* Set type to FULL to avoid crashing in DC*/
8331                 update_type = UPDATE_TYPE_FULL;
8332                 goto cleanup;
8333         }
8334
8335         for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
8336
8337                 memset(bundle, 0, sizeof(struct surface_info_bundle));
8338
8339                 new_dm_crtc_state = to_dm_crtc_state(new_crtc_state);
8340                 old_dm_crtc_state = to_dm_crtc_state(old_crtc_state);
8341                 num_plane = 0;
8342
8343                 if (new_dm_crtc_state->stream != old_dm_crtc_state->stream) {
8344                         update_type = UPDATE_TYPE_FULL;
8345                         goto cleanup;
8346                 }
8347
8348                 if (!new_dm_crtc_state->stream)
8349                         continue;
8350
8351                 for_each_oldnew_plane_in_state(state, plane, old_plane_state, new_plane_state, j) {
8352                         const struct amdgpu_framebuffer *amdgpu_fb =
8353                                 to_amdgpu_framebuffer(new_plane_state->fb);
8354                         struct dc_plane_info *plane_info = &bundle->plane_infos[num_plane];
8355                         struct dc_flip_addrs *flip_addr = &bundle->flip_addrs[num_plane];
8356                         struct dc_scaling_info *scaling_info = &bundle->scaling_infos[num_plane];
8357                         uint64_t tiling_flags;
8358                         bool tmz_surface = false;
8359
8360                         new_plane_crtc = new_plane_state->crtc;
8361                         new_dm_plane_state = to_dm_plane_state(new_plane_state);
8362                         old_dm_plane_state = to_dm_plane_state(old_plane_state);
8363
8364                         if (plane->type == DRM_PLANE_TYPE_CURSOR)
8365                                 continue;
8366
8367                         if (new_dm_plane_state->dc_state != old_dm_plane_state->dc_state) {
8368                                 update_type = UPDATE_TYPE_FULL;
8369                                 goto cleanup;
8370                         }
8371
8372                         if (crtc != new_plane_crtc)
8373                                 continue;
8374
8375                         bundle->surface_updates[num_plane].surface =
8376                                         new_dm_plane_state->dc_state;
8377
8378                         if (new_crtc_state->mode_changed) {
8379                                 bundle->stream_update.dst = new_dm_crtc_state->stream->dst;
8380                                 bundle->stream_update.src = new_dm_crtc_state->stream->src;
8381                         }
8382
8383                         if (new_crtc_state->color_mgmt_changed) {
8384                                 bundle->surface_updates[num_plane].gamma =
8385                                                 new_dm_plane_state->dc_state->gamma_correction;
8386                                 bundle->surface_updates[num_plane].in_transfer_func =
8387                                                 new_dm_plane_state->dc_state->in_transfer_func;
8388                                 bundle->surface_updates[num_plane].gamut_remap_matrix =
8389                                                 &new_dm_plane_state->dc_state->gamut_remap_matrix;
8390                                 bundle->stream_update.gamut_remap =
8391                                                 &new_dm_crtc_state->stream->gamut_remap_matrix;
8392                                 bundle->stream_update.output_csc_transform =
8393                                                 &new_dm_crtc_state->stream->csc_color_matrix;
8394                                 bundle->stream_update.out_transfer_func =
8395                                                 new_dm_crtc_state->stream->out_transfer_func;
8396                         }
8397
8398                         ret = fill_dc_scaling_info(new_plane_state,
8399                                                    scaling_info);
8400                         if (ret)
8401                                 goto cleanup;
8402
8403                         bundle->surface_updates[num_plane].scaling_info = scaling_info;
8404
8405                         if (amdgpu_fb) {
8406                                 ret = get_fb_info(amdgpu_fb, &tiling_flags, &tmz_surface);
8407                                 if (ret)
8408                                         goto cleanup;
8409
8410                                 ret = fill_dc_plane_info_and_addr(
8411                                         dm->adev, new_plane_state, tiling_flags,
8412                                         plane_info,
8413                                         &flip_addr->address, tmz_surface,
8414                                         false);
8415                                 if (ret)
8416                                         goto cleanup;
8417
8418                                 bundle->surface_updates[num_plane].plane_info = plane_info;
8419                                 bundle->surface_updates[num_plane].flip_addr = flip_addr;
8420                         }
8421
8422                         num_plane++;
8423                 }
8424
8425                 if (num_plane == 0)
8426                         continue;
8427
8428                 ret = dm_atomic_get_state(state, &dm_state);
8429                 if (ret)
8430                         goto cleanup;
8431
8432                 old_dm_state = dm_atomic_get_old_state(state);
8433                 if (!old_dm_state) {
8434                         ret = -EINVAL;
8435                         goto cleanup;
8436                 }
8437
8438                 status = dc_stream_get_status_from_state(old_dm_state->context,
8439                                                          new_dm_crtc_state->stream);
8440                 bundle->stream_update.stream = new_dm_crtc_state->stream;
8441                 /*
8442                  * TODO: DC modifies the surface during this call so we need
8443                  * to lock here - find a way to do this without locking.
8444                  */
8445                 mutex_lock(&dm->dc_lock);
8446                 update_type = dc_check_update_surfaces_for_stream(
8447                                 dc,     bundle->surface_updates, num_plane,
8448                                 &bundle->stream_update, status);
8449                 mutex_unlock(&dm->dc_lock);
8450
8451                 if (update_type > UPDATE_TYPE_MED) {
8452                         update_type = UPDATE_TYPE_FULL;
8453                         goto cleanup;
8454                 }
8455         }
8456
8457 cleanup:
8458         kfree(bundle);
8459
8460         *out_type = update_type;
8461         return ret;
8462 }
8463
8464 static int add_affected_mst_dsc_crtcs(struct drm_atomic_state *state, struct drm_crtc *crtc)
8465 {
8466         struct drm_connector *connector;
8467         struct drm_connector_state *conn_state;
8468         struct amdgpu_dm_connector *aconnector = NULL;
8469         int i;
8470         for_each_new_connector_in_state(state, connector, conn_state, i) {
8471                 if (conn_state->crtc != crtc)
8472                         continue;
8473
8474                 aconnector = to_amdgpu_dm_connector(connector);
8475                 if (!aconnector->port || !aconnector->mst_port)
8476                         aconnector = NULL;
8477                 else
8478                         break;
8479         }
8480
8481         if (!aconnector)
8482                 return 0;
8483
8484         return drm_dp_mst_add_affected_dsc_crtcs(state, &aconnector->mst_port->mst_mgr);
8485 }
8486
8487 /**
8488  * amdgpu_dm_atomic_check() - Atomic check implementation for AMDgpu DM.
8489  * @dev: The DRM device
8490  * @state: The atomic state to commit
8491  *
8492  * Validate that the given atomic state is programmable by DC into hardware.
8493  * This involves constructing a &struct dc_state reflecting the new hardware
8494  * state we wish to commit, then querying DC to see if it is programmable. It's
8495  * important not to modify the existing DC state. Otherwise, atomic_check
8496  * may unexpectedly commit hardware changes.
8497  *
8498  * When validating the DC state, it's important that the right locks are
8499  * acquired. For full updates case which removes/adds/updates streams on one
8500  * CRTC while flipping on another CRTC, acquiring global lock will guarantee
8501  * that any such full update commit will wait for completion of any outstanding
8502  * flip using DRMs synchronization events. See
8503  * dm_determine_update_type_for_commit()
8504  *
8505  * Note that DM adds the affected connectors for all CRTCs in state, when that
8506  * might not seem necessary. This is because DC stream creation requires the
8507  * DC sink, which is tied to the DRM connector state. Cleaning this up should
8508  * be possible but non-trivial - a possible TODO item.
8509  *
8510  * Return: -Error code if validation failed.
8511  */
8512 static int amdgpu_dm_atomic_check(struct drm_device *dev,
8513                                   struct drm_atomic_state *state)
8514 {
8515         struct amdgpu_device *adev = dev->dev_private;
8516         struct dm_atomic_state *dm_state = NULL;
8517         struct dc *dc = adev->dm.dc;
8518         struct drm_connector *connector;
8519         struct drm_connector_state *old_con_state, *new_con_state;
8520         struct drm_crtc *crtc;
8521         struct drm_crtc_state *old_crtc_state, *new_crtc_state;
8522         struct drm_plane *plane;
8523         struct drm_plane_state *old_plane_state, *new_plane_state;
8524         enum surface_update_type update_type = UPDATE_TYPE_FAST;
8525         enum surface_update_type overall_update_type = UPDATE_TYPE_FAST;
8526         enum dc_status status;
8527         int ret, i;
8528
8529         /*
8530          * This bool will be set for true for any modeset/reset
8531          * or plane update which implies non fast surface update.
8532          */
8533         bool lock_and_validation_needed = false;
8534
8535         ret = drm_atomic_helper_check_modeset(dev, state);
8536         if (ret)
8537                 goto fail;
8538
8539         if (adev->asic_type >= CHIP_NAVI10) {
8540                 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
8541                         if (drm_atomic_crtc_needs_modeset(new_crtc_state)) {
8542                                 ret = add_affected_mst_dsc_crtcs(state, crtc);
8543                                 if (ret)
8544                                         goto fail;
8545                         }
8546                 }
8547         }
8548
8549         for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
8550                 if (!drm_atomic_crtc_needs_modeset(new_crtc_state) &&
8551                     !new_crtc_state->color_mgmt_changed &&
8552                     old_crtc_state->vrr_enabled == new_crtc_state->vrr_enabled)
8553                         continue;
8554
8555                 if (!new_crtc_state->enable)
8556                         continue;
8557
8558                 ret = drm_atomic_add_affected_connectors(state, crtc);
8559                 if (ret)
8560                         return ret;
8561
8562                 ret = drm_atomic_add_affected_planes(state, crtc);
8563                 if (ret)
8564                         goto fail;
8565         }
8566
8567         /*
8568          * Add all primary and overlay planes on the CRTC to the state
8569          * whenever a plane is enabled to maintain correct z-ordering
8570          * and to enable fast surface updates.
8571          */
8572         drm_for_each_crtc(crtc, dev) {
8573                 bool modified = false;
8574
8575                 for_each_oldnew_plane_in_state(state, plane, old_plane_state, new_plane_state, i) {
8576                         if (plane->type == DRM_PLANE_TYPE_CURSOR)
8577                                 continue;
8578
8579                         if (new_plane_state->crtc == crtc ||
8580                             old_plane_state->crtc == crtc) {
8581                                 modified = true;
8582                                 break;
8583                         }
8584                 }
8585
8586                 if (!modified)
8587                         continue;
8588
8589                 drm_for_each_plane_mask(plane, state->dev, crtc->state->plane_mask) {
8590                         if (plane->type == DRM_PLANE_TYPE_CURSOR)
8591                                 continue;
8592
8593                         new_plane_state =
8594                                 drm_atomic_get_plane_state(state, plane);
8595
8596                         if (IS_ERR(new_plane_state)) {
8597                                 ret = PTR_ERR(new_plane_state);
8598                                 goto fail;
8599                         }
8600                 }
8601         }
8602
8603         /* Remove exiting planes if they are modified */
8604         for_each_oldnew_plane_in_state_reverse(state, plane, old_plane_state, new_plane_state, i) {
8605                 ret = dm_update_plane_state(dc, state, plane,
8606                                             old_plane_state,
8607                                             new_plane_state,
8608                                             false,
8609                                             &lock_and_validation_needed);
8610                 if (ret)
8611                         goto fail;
8612         }
8613
8614         /* Disable all crtcs which require disable */
8615         for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
8616                 ret = dm_update_crtc_state(&adev->dm, state, crtc,
8617                                            old_crtc_state,
8618                                            new_crtc_state,
8619                                            false,
8620                                            &lock_and_validation_needed);
8621                 if (ret)
8622                         goto fail;
8623         }
8624
8625         /* Enable all crtcs which require enable */
8626         for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
8627                 ret = dm_update_crtc_state(&adev->dm, state, crtc,
8628                                            old_crtc_state,
8629                                            new_crtc_state,
8630                                            true,
8631                                            &lock_and_validation_needed);
8632                 if (ret)
8633                         goto fail;
8634         }
8635
8636         /* Add new/modified planes */
8637         for_each_oldnew_plane_in_state_reverse(state, plane, old_plane_state, new_plane_state, i) {
8638                 ret = dm_update_plane_state(dc, state, plane,
8639                                             old_plane_state,
8640                                             new_plane_state,
8641                                             true,
8642                                             &lock_and_validation_needed);
8643                 if (ret)
8644                         goto fail;
8645         }
8646
8647         /* Run this here since we want to validate the streams we created */
8648         ret = drm_atomic_helper_check_planes(dev, state);
8649         if (ret)
8650                 goto fail;
8651
8652         if (state->legacy_cursor_update) {
8653                 /*
8654                  * This is a fast cursor update coming from the plane update
8655                  * helper, check if it can be done asynchronously for better
8656                  * performance.
8657                  */
8658                 state->async_update =
8659                         !drm_atomic_helper_async_check(dev, state);
8660
8661                 /*
8662                  * Skip the remaining global validation if this is an async
8663                  * update. Cursor updates can be done without affecting
8664                  * state or bandwidth calcs and this avoids the performance
8665                  * penalty of locking the private state object and
8666                  * allocating a new dc_state.
8667                  */
8668                 if (state->async_update)
8669                         return 0;
8670         }
8671
8672         /* Check scaling and underscan changes*/
8673         /* TODO Removed scaling changes validation due to inability to commit
8674          * new stream into context w\o causing full reset. Need to
8675          * decide how to handle.
8676          */
8677         for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
8678                 struct dm_connector_state *dm_old_con_state = to_dm_connector_state(old_con_state);
8679                 struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state);
8680                 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(dm_new_con_state->base.crtc);
8681
8682                 /* Skip any modesets/resets */
8683                 if (!acrtc || drm_atomic_crtc_needs_modeset(
8684                                 drm_atomic_get_new_crtc_state(state, &acrtc->base)))
8685                         continue;
8686
8687                 /* Skip any thing not scale or underscan changes */
8688                 if (!is_scaling_state_different(dm_new_con_state, dm_old_con_state))
8689                         continue;
8690
8691                 overall_update_type = UPDATE_TYPE_FULL;
8692                 lock_and_validation_needed = true;
8693         }
8694
8695         ret = dm_determine_update_type_for_commit(&adev->dm, state, &update_type);
8696         if (ret)
8697                 goto fail;
8698
8699         if (overall_update_type < update_type)
8700                 overall_update_type = update_type;
8701
8702         /*
8703          * lock_and_validation_needed was an old way to determine if we need to set
8704          * the global lock. Leaving it in to check if we broke any corner cases
8705          * lock_and_validation_needed true = UPDATE_TYPE_FULL or UPDATE_TYPE_MED
8706          * lock_and_validation_needed false = UPDATE_TYPE_FAST
8707          */
8708         if (lock_and_validation_needed && overall_update_type <= UPDATE_TYPE_FAST)
8709                 WARN(1, "Global lock should be Set, overall_update_type should be UPDATE_TYPE_MED or UPDATE_TYPE_FULL");
8710
8711         if (overall_update_type > UPDATE_TYPE_FAST) {
8712                 ret = dm_atomic_get_state(state, &dm_state);
8713                 if (ret)
8714                         goto fail;
8715
8716                 ret = do_aquire_global_lock(dev, state);
8717                 if (ret)
8718                         goto fail;
8719
8720 #if defined(CONFIG_DRM_AMD_DC_DCN)
8721                 if (!compute_mst_dsc_configs_for_state(state, dm_state->context))
8722                         goto fail;
8723
8724                 ret = dm_update_mst_vcpi_slots_for_dsc(state, dm_state->context);
8725                 if (ret)
8726                         goto fail;
8727 #endif
8728
8729                 /*
8730                  * Perform validation of MST topology in the state:
8731                  * We need to perform MST atomic check before calling
8732                  * dc_validate_global_state(), or there is a chance
8733                  * to get stuck in an infinite loop and hang eventually.
8734                  */
8735                 ret = drm_dp_mst_atomic_check(state);
8736                 if (ret)
8737                         goto fail;
8738                 status = dc_validate_global_state(dc, dm_state->context, false);
8739                 if (status != DC_OK) {
8740                         DC_LOG_WARNING("DC global validation failure: %s (%d)",
8741                                        dc_status_to_str(status), status);
8742                         ret = -EINVAL;
8743                         goto fail;
8744                 }
8745         } else {
8746                 /*
8747                  * The commit is a fast update. Fast updates shouldn't change
8748                  * the DC context, affect global validation, and can have their
8749                  * commit work done in parallel with other commits not touching
8750                  * the same resource. If we have a new DC context as part of
8751                  * the DM atomic state from validation we need to free it and
8752                  * retain the existing one instead.
8753                  */
8754                 struct dm_atomic_state *new_dm_state, *old_dm_state;
8755
8756                 new_dm_state = dm_atomic_get_new_state(state);
8757                 old_dm_state = dm_atomic_get_old_state(state);
8758
8759                 if (new_dm_state && old_dm_state) {
8760                         if (new_dm_state->context)
8761                                 dc_release_state(new_dm_state->context);
8762
8763                         new_dm_state->context = old_dm_state->context;
8764
8765                         if (old_dm_state->context)
8766                                 dc_retain_state(old_dm_state->context);
8767                 }
8768         }
8769
8770         /* Store the overall update type for use later in atomic check. */
8771         for_each_new_crtc_in_state (state, crtc, new_crtc_state, i) {
8772                 struct dm_crtc_state *dm_new_crtc_state =
8773                         to_dm_crtc_state(new_crtc_state);
8774
8775                 dm_new_crtc_state->update_type = (int)overall_update_type;
8776         }
8777
8778         /* Must be success */
8779         WARN_ON(ret);
8780         return ret;
8781
8782 fail:
8783         if (ret == -EDEADLK)
8784                 DRM_DEBUG_DRIVER("Atomic check stopped to avoid deadlock.\n");
8785         else if (ret == -EINTR || ret == -EAGAIN || ret == -ERESTARTSYS)
8786                 DRM_DEBUG_DRIVER("Atomic check stopped due to signal.\n");
8787         else
8788                 DRM_DEBUG_DRIVER("Atomic check failed with err: %d \n", ret);
8789
8790         return ret;
8791 }
8792
8793 static bool is_dp_capable_without_timing_msa(struct dc *dc,
8794                                              struct amdgpu_dm_connector *amdgpu_dm_connector)
8795 {
8796         uint8_t dpcd_data;
8797         bool capable = false;
8798
8799         if (amdgpu_dm_connector->dc_link &&
8800                 dm_helpers_dp_read_dpcd(
8801                                 NULL,
8802                                 amdgpu_dm_connector->dc_link,
8803                                 DP_DOWN_STREAM_PORT_COUNT,
8804                                 &dpcd_data,
8805                                 sizeof(dpcd_data))) {
8806                 capable = (dpcd_data & DP_MSA_TIMING_PAR_IGNORED) ? true:false;
8807         }
8808
8809         return capable;
8810 }
8811 void amdgpu_dm_update_freesync_caps(struct drm_connector *connector,
8812                                         struct edid *edid)
8813 {
8814         int i;
8815         bool edid_check_required;
8816         struct detailed_timing *timing;
8817         struct detailed_non_pixel *data;
8818         struct detailed_data_monitor_range *range;
8819         struct amdgpu_dm_connector *amdgpu_dm_connector =
8820                         to_amdgpu_dm_connector(connector);
8821         struct dm_connector_state *dm_con_state = NULL;
8822
8823         struct drm_device *dev = connector->dev;
8824         struct amdgpu_device *adev = dev->dev_private;
8825         bool freesync_capable = false;
8826
8827         if (!connector->state) {
8828                 DRM_ERROR("%s - Connector has no state", __func__);
8829                 goto update;
8830         }
8831
8832         if (!edid) {
8833                 dm_con_state = to_dm_connector_state(connector->state);
8834
8835                 amdgpu_dm_connector->min_vfreq = 0;
8836                 amdgpu_dm_connector->max_vfreq = 0;
8837                 amdgpu_dm_connector->pixel_clock_mhz = 0;
8838
8839                 goto update;
8840         }
8841
8842         dm_con_state = to_dm_connector_state(connector->state);
8843
8844         edid_check_required = false;
8845         if (!amdgpu_dm_connector->dc_sink) {
8846                 DRM_ERROR("dc_sink NULL, could not add free_sync module.\n");
8847                 goto update;
8848         }
8849         if (!adev->dm.freesync_module)
8850                 goto update;
8851         /*
8852          * if edid non zero restrict freesync only for dp and edp
8853          */
8854         if (edid) {
8855                 if (amdgpu_dm_connector->dc_sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT
8856                         || amdgpu_dm_connector->dc_sink->sink_signal == SIGNAL_TYPE_EDP) {
8857                         edid_check_required = is_dp_capable_without_timing_msa(
8858                                                 adev->dm.dc,
8859                                                 amdgpu_dm_connector);
8860                 }
8861         }
8862         if (edid_check_required == true && (edid->version > 1 ||
8863            (edid->version == 1 && edid->revision > 1))) {
8864                 for (i = 0; i < 4; i++) {
8865
8866                         timing  = &edid->detailed_timings[i];
8867                         data    = &timing->data.other_data;
8868                         range   = &data->data.range;
8869                         /*
8870                          * Check if monitor has continuous frequency mode
8871                          */
8872                         if (data->type != EDID_DETAIL_MONITOR_RANGE)
8873                                 continue;
8874                         /*
8875                          * Check for flag range limits only. If flag == 1 then
8876                          * no additional timing information provided.
8877                          * Default GTF, GTF Secondary curve and CVT are not
8878                          * supported
8879                          */
8880                         if (range->flags != 1)
8881                                 continue;
8882
8883                         amdgpu_dm_connector->min_vfreq = range->min_vfreq;
8884                         amdgpu_dm_connector->max_vfreq = range->max_vfreq;
8885                         amdgpu_dm_connector->pixel_clock_mhz =
8886                                 range->pixel_clock_mhz * 10;
8887                         break;
8888                 }
8889
8890                 if (amdgpu_dm_connector->max_vfreq -
8891                     amdgpu_dm_connector->min_vfreq > 10) {
8892
8893                         freesync_capable = true;
8894                 }
8895         }
8896
8897 update:
8898         if (dm_con_state)
8899                 dm_con_state->freesync_capable = freesync_capable;
8900
8901         if (connector->vrr_capable_property)
8902                 drm_connector_set_vrr_capable_property(connector,
8903                                                        freesync_capable);
8904 }
8905
8906 static void amdgpu_dm_set_psr_caps(struct dc_link *link)
8907 {
8908         uint8_t dpcd_data[EDP_PSR_RECEIVER_CAP_SIZE];
8909
8910         if (!(link->connector_signal & SIGNAL_TYPE_EDP))
8911                 return;
8912         if (link->type == dc_connection_none)
8913                 return;
8914         if (dm_helpers_dp_read_dpcd(NULL, link, DP_PSR_SUPPORT,
8915                                         dpcd_data, sizeof(dpcd_data))) {
8916                 link->dpcd_caps.psr_caps.psr_version = dpcd_data[0];
8917
8918                 if (dpcd_data[0] == 0) {
8919                         link->psr_settings.psr_version = DC_PSR_VERSION_UNSUPPORTED;
8920                         link->psr_settings.psr_feature_enabled = false;
8921                 } else {
8922                         link->psr_settings.psr_version = DC_PSR_VERSION_1;
8923                         link->psr_settings.psr_feature_enabled = true;
8924                 }
8925
8926                 DRM_INFO("PSR support:%d\n", link->psr_settings.psr_feature_enabled);
8927         }
8928 }
8929
8930 /*
8931  * amdgpu_dm_link_setup_psr() - configure psr link
8932  * @stream: stream state
8933  *
8934  * Return: true if success
8935  */
8936 static bool amdgpu_dm_link_setup_psr(struct dc_stream_state *stream)
8937 {
8938         struct dc_link *link = NULL;
8939         struct psr_config psr_config = {0};
8940         struct psr_context psr_context = {0};
8941         bool ret = false;
8942
8943         if (stream == NULL)
8944                 return false;
8945
8946         link = stream->link;
8947
8948         psr_config.psr_version = link->dpcd_caps.psr_caps.psr_version;
8949
8950         if (psr_config.psr_version > 0) {
8951                 psr_config.psr_exit_link_training_required = 0x1;
8952                 psr_config.psr_frame_capture_indication_req = 0;
8953                 psr_config.psr_rfb_setup_time = 0x37;
8954                 psr_config.psr_sdp_transmit_line_num_deadline = 0x20;
8955                 psr_config.allow_smu_optimizations = 0x0;
8956
8957                 ret = dc_link_setup_psr(link, stream, &psr_config, &psr_context);
8958
8959         }
8960         DRM_DEBUG_DRIVER("PSR link: %d\n",      link->psr_settings.psr_feature_enabled);
8961
8962         return ret;
8963 }
8964
8965 /*
8966  * amdgpu_dm_psr_enable() - enable psr f/w
8967  * @stream: stream state
8968  *
8969  * Return: true if success
8970  */
8971 bool amdgpu_dm_psr_enable(struct dc_stream_state *stream)
8972 {
8973         struct dc_link *link = stream->link;
8974         unsigned int vsync_rate_hz = 0;
8975         struct dc_static_screen_params params = {0};
8976         /* Calculate number of static frames before generating interrupt to
8977          * enter PSR.
8978          */
8979         // Init fail safe of 2 frames static
8980         unsigned int num_frames_static = 2;
8981
8982         DRM_DEBUG_DRIVER("Enabling psr...\n");
8983
8984         vsync_rate_hz = div64_u64(div64_u64((
8985                         stream->timing.pix_clk_100hz * 100),
8986                         stream->timing.v_total),
8987                         stream->timing.h_total);
8988
8989         /* Round up
8990          * Calculate number of frames such that at least 30 ms of time has
8991          * passed.
8992          */
8993         if (vsync_rate_hz != 0) {
8994                 unsigned int frame_time_microsec = 1000000 / vsync_rate_hz;
8995                 num_frames_static = (30000 / frame_time_microsec) + 1;
8996         }
8997
8998         params.triggers.cursor_update = true;
8999         params.triggers.overlay_update = true;
9000         params.triggers.surface_update = true;
9001         params.num_frames = num_frames_static;
9002
9003         dc_stream_set_static_screen_params(link->ctx->dc,
9004                                            &stream, 1,
9005                                            &params);
9006
9007         return dc_link_set_psr_allow_active(link, true, false);
9008 }
9009
9010 /*
9011  * amdgpu_dm_psr_disable() - disable psr f/w
9012  * @stream:  stream state
9013  *
9014  * Return: true if success
9015  */
9016 static bool amdgpu_dm_psr_disable(struct dc_stream_state *stream)
9017 {
9018
9019         DRM_DEBUG_DRIVER("Disabling psr...\n");
9020
9021         return dc_link_set_psr_allow_active(stream->link, false, true);
9022 }
This page took 0.635465 seconds and 4 git commands to generate.