]> Git Repo - linux.git/blob - drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
drm/amdgpu: Fix connector atomic_check compilation fail
[linux.git] / drivers / gpu / drm / amd / display / amdgpu_dm / amdgpu_dm.c
1 /*
2  * Copyright 2015 Advanced Micro Devices, Inc.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20  * OTHER DEALINGS IN THE SOFTWARE.
21  *
22  * Authors: AMD
23  *
24  */
25
26 /* The caprices of the preprocessor require that this be declared right here */
27 #define CREATE_TRACE_POINTS
28
29 #include "dm_services_types.h"
30 #include "dc.h"
31 #include "dc/inc/core_types.h"
32
33 #include "vid.h"
34 #include "amdgpu.h"
35 #include "amdgpu_display.h"
36 #include "amdgpu_ucode.h"
37 #include "atom.h"
38 #include "amdgpu_dm.h"
39 #include "amdgpu_pm.h"
40
41 #include "amd_shared.h"
42 #include "amdgpu_dm_irq.h"
43 #include "dm_helpers.h"
44 #include "amdgpu_dm_mst_types.h"
45 #if defined(CONFIG_DEBUG_FS)
46 #include "amdgpu_dm_debugfs.h"
47 #endif
48
49 #include "ivsrcid/ivsrcid_vislands30.h"
50
51 #include <linux/module.h>
52 #include <linux/moduleparam.h>
53 #include <linux/version.h>
54 #include <linux/types.h>
55 #include <linux/pm_runtime.h>
56 #include <linux/pci.h>
57 #include <linux/firmware.h>
58
59 #include <drm/drm_atomic.h>
60 #include <drm/drm_atomic_uapi.h>
61 #include <drm/drm_atomic_helper.h>
62 #include <drm/drm_dp_mst_helper.h>
63 #include <drm/drm_fb_helper.h>
64 #include <drm/drm_fourcc.h>
65 #include <drm/drm_edid.h>
66 #include <drm/drm_vblank.h>
67
68 #if defined(CONFIG_DRM_AMD_DC_DCN1_0)
69 #include "ivsrcid/irqsrcs_dcn_1_0.h"
70
71 #include "dcn/dcn_1_0_offset.h"
72 #include "dcn/dcn_1_0_sh_mask.h"
73 #include "soc15_hw_ip.h"
74 #include "vega10_ip_offset.h"
75
76 #include "soc15_common.h"
77 #endif
78
79 #include "modules/inc/mod_freesync.h"
80 #include "modules/power/power_helpers.h"
81 #include "modules/inc/mod_info_packet.h"
82
83 #define FIRMWARE_RAVEN_DMCU             "amdgpu/raven_dmcu.bin"
84 MODULE_FIRMWARE(FIRMWARE_RAVEN_DMCU);
85
86 /**
87  * DOC: overview
88  *
89  * The AMDgpu display manager, **amdgpu_dm** (or even simpler,
90  * **dm**) sits between DRM and DC. It acts as a liason, converting DRM
91  * requests into DC requests, and DC responses into DRM responses.
92  *
93  * The root control structure is &struct amdgpu_display_manager.
94  */
95
96 /* basic init/fini API */
97 static int amdgpu_dm_init(struct amdgpu_device *adev);
98 static void amdgpu_dm_fini(struct amdgpu_device *adev);
99
100 /*
101  * initializes drm_device display related structures, based on the information
102  * provided by DAL. The drm strcutures are: drm_crtc, drm_connector,
103  * drm_encoder, drm_mode_config
104  *
105  * Returns 0 on success
106  */
107 static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev);
108 /* removes and deallocates the drm structures, created by the above function */
109 static void amdgpu_dm_destroy_drm_device(struct amdgpu_display_manager *dm);
110
111 static void
112 amdgpu_dm_update_connector_after_detect(struct amdgpu_dm_connector *aconnector);
113
114 static int amdgpu_dm_plane_init(struct amdgpu_display_manager *dm,
115                                 struct drm_plane *plane,
116                                 unsigned long possible_crtcs,
117                                 const struct dc_plane_cap *plane_cap);
118 static int amdgpu_dm_crtc_init(struct amdgpu_display_manager *dm,
119                                struct drm_plane *plane,
120                                uint32_t link_index);
121 static int amdgpu_dm_connector_init(struct amdgpu_display_manager *dm,
122                                     struct amdgpu_dm_connector *amdgpu_dm_connector,
123                                     uint32_t link_index,
124                                     struct amdgpu_encoder *amdgpu_encoder);
125 static int amdgpu_dm_encoder_init(struct drm_device *dev,
126                                   struct amdgpu_encoder *aencoder,
127                                   uint32_t link_index);
128
129 static int amdgpu_dm_connector_get_modes(struct drm_connector *connector);
130
131 static int amdgpu_dm_atomic_commit(struct drm_device *dev,
132                                    struct drm_atomic_state *state,
133                                    bool nonblock);
134
135 static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state);
136
137 static int amdgpu_dm_atomic_check(struct drm_device *dev,
138                                   struct drm_atomic_state *state);
139
140 static void handle_cursor_update(struct drm_plane *plane,
141                                  struct drm_plane_state *old_plane_state);
142
143 /*
144  * dm_vblank_get_counter
145  *
146  * @brief
147  * Get counter for number of vertical blanks
148  *
149  * @param
150  * struct amdgpu_device *adev - [in] desired amdgpu device
151  * int disp_idx - [in] which CRTC to get the counter from
152  *
153  * @return
154  * Counter for vertical blanks
155  */
156 static u32 dm_vblank_get_counter(struct amdgpu_device *adev, int crtc)
157 {
158         if (crtc >= adev->mode_info.num_crtc)
159                 return 0;
160         else {
161                 struct amdgpu_crtc *acrtc = adev->mode_info.crtcs[crtc];
162                 struct dm_crtc_state *acrtc_state = to_dm_crtc_state(
163                                 acrtc->base.state);
164
165
166                 if (acrtc_state->stream == NULL) {
167                         DRM_ERROR("dc_stream_state is NULL for crtc '%d'!\n",
168                                   crtc);
169                         return 0;
170                 }
171
172                 return dc_stream_get_vblank_counter(acrtc_state->stream);
173         }
174 }
175
176 static int dm_crtc_get_scanoutpos(struct amdgpu_device *adev, int crtc,
177                                   u32 *vbl, u32 *position)
178 {
179         uint32_t v_blank_start, v_blank_end, h_position, v_position;
180
181         if ((crtc < 0) || (crtc >= adev->mode_info.num_crtc))
182                 return -EINVAL;
183         else {
184                 struct amdgpu_crtc *acrtc = adev->mode_info.crtcs[crtc];
185                 struct dm_crtc_state *acrtc_state = to_dm_crtc_state(
186                                                 acrtc->base.state);
187
188                 if (acrtc_state->stream ==  NULL) {
189                         DRM_ERROR("dc_stream_state is NULL for crtc '%d'!\n",
190                                   crtc);
191                         return 0;
192                 }
193
194                 /*
195                  * TODO rework base driver to use values directly.
196                  * for now parse it back into reg-format
197                  */
198                 dc_stream_get_scanoutpos(acrtc_state->stream,
199                                          &v_blank_start,
200                                          &v_blank_end,
201                                          &h_position,
202                                          &v_position);
203
204                 *position = v_position | (h_position << 16);
205                 *vbl = v_blank_start | (v_blank_end << 16);
206         }
207
208         return 0;
209 }
210
211 static bool dm_is_idle(void *handle)
212 {
213         /* XXX todo */
214         return true;
215 }
216
217 static int dm_wait_for_idle(void *handle)
218 {
219         /* XXX todo */
220         return 0;
221 }
222
223 static bool dm_check_soft_reset(void *handle)
224 {
225         return false;
226 }
227
228 static int dm_soft_reset(void *handle)
229 {
230         /* XXX todo */
231         return 0;
232 }
233
234 static struct amdgpu_crtc *
235 get_crtc_by_otg_inst(struct amdgpu_device *adev,
236                      int otg_inst)
237 {
238         struct drm_device *dev = adev->ddev;
239         struct drm_crtc *crtc;
240         struct amdgpu_crtc *amdgpu_crtc;
241
242         if (otg_inst == -1) {
243                 WARN_ON(1);
244                 return adev->mode_info.crtcs[0];
245         }
246
247         list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
248                 amdgpu_crtc = to_amdgpu_crtc(crtc);
249
250                 if (amdgpu_crtc->otg_inst == otg_inst)
251                         return amdgpu_crtc;
252         }
253
254         return NULL;
255 }
256
257 static inline bool amdgpu_dm_vrr_active(struct dm_crtc_state *dm_state)
258 {
259         return dm_state->freesync_config.state == VRR_STATE_ACTIVE_VARIABLE ||
260                dm_state->freesync_config.state == VRR_STATE_ACTIVE_FIXED;
261 }
262
263 static void dm_pflip_high_irq(void *interrupt_params)
264 {
265         struct amdgpu_crtc *amdgpu_crtc;
266         struct common_irq_params *irq_params = interrupt_params;
267         struct amdgpu_device *adev = irq_params->adev;
268         unsigned long flags;
269         struct drm_pending_vblank_event *e;
270         struct dm_crtc_state *acrtc_state;
271         uint32_t vpos, hpos, v_blank_start, v_blank_end;
272         bool vrr_active;
273
274         amdgpu_crtc = get_crtc_by_otg_inst(adev, irq_params->irq_src - IRQ_TYPE_PFLIP);
275
276         /* IRQ could occur when in initial stage */
277         /* TODO work and BO cleanup */
278         if (amdgpu_crtc == NULL) {
279                 DRM_DEBUG_DRIVER("CRTC is null, returning.\n");
280                 return;
281         }
282
283         spin_lock_irqsave(&adev->ddev->event_lock, flags);
284
285         if (amdgpu_crtc->pflip_status != AMDGPU_FLIP_SUBMITTED){
286                 DRM_DEBUG_DRIVER("amdgpu_crtc->pflip_status = %d !=AMDGPU_FLIP_SUBMITTED(%d) on crtc:%d[%p] \n",
287                                                  amdgpu_crtc->pflip_status,
288                                                  AMDGPU_FLIP_SUBMITTED,
289                                                  amdgpu_crtc->crtc_id,
290                                                  amdgpu_crtc);
291                 spin_unlock_irqrestore(&adev->ddev->event_lock, flags);
292                 return;
293         }
294
295         /* page flip completed. */
296         e = amdgpu_crtc->event;
297         amdgpu_crtc->event = NULL;
298
299         if (!e)
300                 WARN_ON(1);
301
302         acrtc_state = to_dm_crtc_state(amdgpu_crtc->base.state);
303         vrr_active = amdgpu_dm_vrr_active(acrtc_state);
304
305         /* Fixed refresh rate, or VRR scanout position outside front-porch? */
306         if (!vrr_active ||
307             !dc_stream_get_scanoutpos(acrtc_state->stream, &v_blank_start,
308                                       &v_blank_end, &hpos, &vpos) ||
309             (vpos < v_blank_start)) {
310                 /* Update to correct count and vblank timestamp if racing with
311                  * vblank irq. This also updates to the correct vblank timestamp
312                  * even in VRR mode, as scanout is past the front-porch atm.
313                  */
314                 drm_crtc_accurate_vblank_count(&amdgpu_crtc->base);
315
316                 /* Wake up userspace by sending the pageflip event with proper
317                  * count and timestamp of vblank of flip completion.
318                  */
319                 if (e) {
320                         drm_crtc_send_vblank_event(&amdgpu_crtc->base, e);
321
322                         /* Event sent, so done with vblank for this flip */
323                         drm_crtc_vblank_put(&amdgpu_crtc->base);
324                 }
325         } else if (e) {
326                 /* VRR active and inside front-porch: vblank count and
327                  * timestamp for pageflip event will only be up to date after
328                  * drm_crtc_handle_vblank() has been executed from late vblank
329                  * irq handler after start of back-porch (vline 0). We queue the
330                  * pageflip event for send-out by drm_crtc_handle_vblank() with
331                  * updated timestamp and count, once it runs after us.
332                  *
333                  * We need to open-code this instead of using the helper
334                  * drm_crtc_arm_vblank_event(), as that helper would
335                  * call drm_crtc_accurate_vblank_count(), which we must
336                  * not call in VRR mode while we are in front-porch!
337                  */
338
339                 /* sequence will be replaced by real count during send-out. */
340                 e->sequence = drm_crtc_vblank_count(&amdgpu_crtc->base);
341                 e->pipe = amdgpu_crtc->crtc_id;
342
343                 list_add_tail(&e->base.link, &adev->ddev->vblank_event_list);
344                 e = NULL;
345         }
346
347         /* Keep track of vblank of this flip for flip throttling. We use the
348          * cooked hw counter, as that one incremented at start of this vblank
349          * of pageflip completion, so last_flip_vblank is the forbidden count
350          * for queueing new pageflips if vsync + VRR is enabled.
351          */
352         amdgpu_crtc->last_flip_vblank = amdgpu_get_vblank_counter_kms(adev->ddev,
353                                                         amdgpu_crtc->crtc_id);
354
355         amdgpu_crtc->pflip_status = AMDGPU_FLIP_NONE;
356         spin_unlock_irqrestore(&adev->ddev->event_lock, flags);
357
358         DRM_DEBUG_DRIVER("crtc:%d[%p], pflip_stat:AMDGPU_FLIP_NONE, vrr[%d]-fp %d\n",
359                          amdgpu_crtc->crtc_id, amdgpu_crtc,
360                          vrr_active, (int) !e);
361 }
362
363 static void dm_vupdate_high_irq(void *interrupt_params)
364 {
365         struct common_irq_params *irq_params = interrupt_params;
366         struct amdgpu_device *adev = irq_params->adev;
367         struct amdgpu_crtc *acrtc;
368         struct dm_crtc_state *acrtc_state;
369         unsigned long flags;
370
371         acrtc = get_crtc_by_otg_inst(adev, irq_params->irq_src - IRQ_TYPE_VUPDATE);
372
373         if (acrtc) {
374                 acrtc_state = to_dm_crtc_state(acrtc->base.state);
375
376                 DRM_DEBUG_DRIVER("crtc:%d, vupdate-vrr:%d\n", acrtc->crtc_id,
377                                  amdgpu_dm_vrr_active(acrtc_state));
378
379                 /* Core vblank handling is done here after end of front-porch in
380                  * vrr mode, as vblank timestamping will give valid results
381                  * while now done after front-porch. This will also deliver
382                  * page-flip completion events that have been queued to us
383                  * if a pageflip happened inside front-porch.
384                  */
385                 if (amdgpu_dm_vrr_active(acrtc_state)) {
386                         drm_crtc_handle_vblank(&acrtc->base);
387
388                         /* BTR processing for pre-DCE12 ASICs */
389                         if (acrtc_state->stream &&
390                             adev->family < AMDGPU_FAMILY_AI) {
391                                 spin_lock_irqsave(&adev->ddev->event_lock, flags);
392                                 mod_freesync_handle_v_update(
393                                     adev->dm.freesync_module,
394                                     acrtc_state->stream,
395                                     &acrtc_state->vrr_params);
396
397                                 dc_stream_adjust_vmin_vmax(
398                                     adev->dm.dc,
399                                     acrtc_state->stream,
400                                     &acrtc_state->vrr_params.adjust);
401                                 spin_unlock_irqrestore(&adev->ddev->event_lock, flags);
402                         }
403                 }
404         }
405 }
406
407 static void dm_crtc_high_irq(void *interrupt_params)
408 {
409         struct common_irq_params *irq_params = interrupt_params;
410         struct amdgpu_device *adev = irq_params->adev;
411         struct amdgpu_crtc *acrtc;
412         struct dm_crtc_state *acrtc_state;
413         unsigned long flags;
414
415         acrtc = get_crtc_by_otg_inst(adev, irq_params->irq_src - IRQ_TYPE_VBLANK);
416
417         if (acrtc) {
418                 acrtc_state = to_dm_crtc_state(acrtc->base.state);
419
420                 DRM_DEBUG_DRIVER("crtc:%d, vupdate-vrr:%d\n", acrtc->crtc_id,
421                                  amdgpu_dm_vrr_active(acrtc_state));
422
423                 /* Core vblank handling at start of front-porch is only possible
424                  * in non-vrr mode, as only there vblank timestamping will give
425                  * valid results while done in front-porch. Otherwise defer it
426                  * to dm_vupdate_high_irq after end of front-porch.
427                  */
428                 if (!amdgpu_dm_vrr_active(acrtc_state))
429                         drm_crtc_handle_vblank(&acrtc->base);
430
431                 /* Following stuff must happen at start of vblank, for crc
432                  * computation and below-the-range btr support in vrr mode.
433                  */
434                 amdgpu_dm_crtc_handle_crc_irq(&acrtc->base);
435
436                 if (acrtc_state->stream && adev->family >= AMDGPU_FAMILY_AI &&
437                     acrtc_state->vrr_params.supported &&
438                     acrtc_state->freesync_config.state == VRR_STATE_ACTIVE_VARIABLE) {
439                         spin_lock_irqsave(&adev->ddev->event_lock, flags);
440                         mod_freesync_handle_v_update(
441                                 adev->dm.freesync_module,
442                                 acrtc_state->stream,
443                                 &acrtc_state->vrr_params);
444
445                         dc_stream_adjust_vmin_vmax(
446                                 adev->dm.dc,
447                                 acrtc_state->stream,
448                                 &acrtc_state->vrr_params.adjust);
449                         spin_unlock_irqrestore(&adev->ddev->event_lock, flags);
450                 }
451         }
452 }
453
454 static int dm_set_clockgating_state(void *handle,
455                   enum amd_clockgating_state state)
456 {
457         return 0;
458 }
459
460 static int dm_set_powergating_state(void *handle,
461                   enum amd_powergating_state state)
462 {
463         return 0;
464 }
465
466 /* Prototypes of private functions */
467 static int dm_early_init(void* handle);
468
469 /* Allocate memory for FBC compressed data  */
470 static void amdgpu_dm_fbc_init(struct drm_connector *connector)
471 {
472         struct drm_device *dev = connector->dev;
473         struct amdgpu_device *adev = dev->dev_private;
474         struct dm_comressor_info *compressor = &adev->dm.compressor;
475         struct amdgpu_dm_connector *aconn = to_amdgpu_dm_connector(connector);
476         struct drm_display_mode *mode;
477         unsigned long max_size = 0;
478
479         if (adev->dm.dc->fbc_compressor == NULL)
480                 return;
481
482         if (aconn->dc_link->connector_signal != SIGNAL_TYPE_EDP)
483                 return;
484
485         if (compressor->bo_ptr)
486                 return;
487
488
489         list_for_each_entry(mode, &connector->modes, head) {
490                 if (max_size < mode->htotal * mode->vtotal)
491                         max_size = mode->htotal * mode->vtotal;
492         }
493
494         if (max_size) {
495                 int r = amdgpu_bo_create_kernel(adev, max_size * 4, PAGE_SIZE,
496                             AMDGPU_GEM_DOMAIN_GTT, &compressor->bo_ptr,
497                             &compressor->gpu_addr, &compressor->cpu_addr);
498
499                 if (r)
500                         DRM_ERROR("DM: Failed to initialize FBC\n");
501                 else {
502                         adev->dm.dc->ctx->fbc_gpu_addr = compressor->gpu_addr;
503                         DRM_INFO("DM: FBC alloc %lu\n", max_size*4);
504                 }
505
506         }
507
508 }
509
510 static int amdgpu_dm_init(struct amdgpu_device *adev)
511 {
512         struct dc_init_data init_data;
513         adev->dm.ddev = adev->ddev;
514         adev->dm.adev = adev;
515
516         /* Zero all the fields */
517         memset(&init_data, 0, sizeof(init_data));
518
519         mutex_init(&adev->dm.dc_lock);
520
521         if(amdgpu_dm_irq_init(adev)) {
522                 DRM_ERROR("amdgpu: failed to initialize DM IRQ support.\n");
523                 goto error;
524         }
525
526         init_data.asic_id.chip_family = adev->family;
527
528         init_data.asic_id.pci_revision_id = adev->rev_id;
529         init_data.asic_id.hw_internal_rev = adev->external_rev_id;
530
531         init_data.asic_id.vram_width = adev->gmc.vram_width;
532         /* TODO: initialize init_data.asic_id.vram_type here!!!! */
533         init_data.asic_id.atombios_base_address =
534                 adev->mode_info.atom_context->bios;
535
536         init_data.driver = adev;
537
538         adev->dm.cgs_device = amdgpu_cgs_create_device(adev);
539
540         if (!adev->dm.cgs_device) {
541                 DRM_ERROR("amdgpu: failed to create cgs device.\n");
542                 goto error;
543         }
544
545         init_data.cgs_device = adev->dm.cgs_device;
546
547         init_data.dce_environment = DCE_ENV_PRODUCTION_DRV;
548
549         /*
550          * TODO debug why this doesn't work on Raven
551          */
552         if (adev->flags & AMD_IS_APU &&
553             adev->asic_type >= CHIP_CARRIZO &&
554             adev->asic_type < CHIP_RAVEN)
555                 init_data.flags.gpu_vm_support = true;
556
557         if (amdgpu_dc_feature_mask & DC_FBC_MASK)
558                 init_data.flags.fbc_support = true;
559
560         init_data.flags.power_down_display_on_boot = true;
561
562         /* Display Core create. */
563         adev->dm.dc = dc_create(&init_data);
564
565         if (adev->dm.dc) {
566                 DRM_INFO("Display Core initialized with v%s!\n", DC_VER);
567         } else {
568                 DRM_INFO("Display Core failed to initialize with v%s!\n", DC_VER);
569                 goto error;
570         }
571
572         adev->dm.freesync_module = mod_freesync_create(adev->dm.dc);
573         if (!adev->dm.freesync_module) {
574                 DRM_ERROR(
575                 "amdgpu: failed to initialize freesync_module.\n");
576         } else
577                 DRM_DEBUG_DRIVER("amdgpu: freesync_module init done %p.\n",
578                                 adev->dm.freesync_module);
579
580         amdgpu_dm_init_color_mod();
581
582         if (amdgpu_dm_initialize_drm_device(adev)) {
583                 DRM_ERROR(
584                 "amdgpu: failed to initialize sw for display support.\n");
585                 goto error;
586         }
587
588         /* Update the actual used number of crtc */
589         adev->mode_info.num_crtc = adev->dm.display_indexes_num;
590
591         /* TODO: Add_display_info? */
592
593         /* TODO use dynamic cursor width */
594         adev->ddev->mode_config.cursor_width = adev->dm.dc->caps.max_cursor_size;
595         adev->ddev->mode_config.cursor_height = adev->dm.dc->caps.max_cursor_size;
596
597         if (drm_vblank_init(adev->ddev, adev->dm.display_indexes_num)) {
598                 DRM_ERROR(
599                 "amdgpu: failed to initialize sw for display support.\n");
600                 goto error;
601         }
602
603 #if defined(CONFIG_DEBUG_FS)
604         if (dtn_debugfs_init(adev))
605                 DRM_ERROR("amdgpu: failed initialize dtn debugfs support.\n");
606 #endif
607
608         DRM_DEBUG_DRIVER("KMS initialized.\n");
609
610         return 0;
611 error:
612         amdgpu_dm_fini(adev);
613
614         return -EINVAL;
615 }
616
617 static void amdgpu_dm_fini(struct amdgpu_device *adev)
618 {
619         amdgpu_dm_destroy_drm_device(&adev->dm);
620         /*
621          * TODO: pageflip, vlank interrupt
622          *
623          * amdgpu_dm_irq_fini(adev);
624          */
625
626         if (adev->dm.cgs_device) {
627                 amdgpu_cgs_destroy_device(adev->dm.cgs_device);
628                 adev->dm.cgs_device = NULL;
629         }
630         if (adev->dm.freesync_module) {
631                 mod_freesync_destroy(adev->dm.freesync_module);
632                 adev->dm.freesync_module = NULL;
633         }
634         /* DC Destroy TODO: Replace destroy DAL */
635         if (adev->dm.dc)
636                 dc_destroy(&adev->dm.dc);
637
638         mutex_destroy(&adev->dm.dc_lock);
639
640         return;
641 }
642
643 static int load_dmcu_fw(struct amdgpu_device *adev)
644 {
645         const char *fw_name_dmcu;
646         int r;
647         const struct dmcu_firmware_header_v1_0 *hdr;
648
649         switch(adev->asic_type) {
650         case CHIP_BONAIRE:
651         case CHIP_HAWAII:
652         case CHIP_KAVERI:
653         case CHIP_KABINI:
654         case CHIP_MULLINS:
655         case CHIP_TONGA:
656         case CHIP_FIJI:
657         case CHIP_CARRIZO:
658         case CHIP_STONEY:
659         case CHIP_POLARIS11:
660         case CHIP_POLARIS10:
661         case CHIP_POLARIS12:
662         case CHIP_VEGAM:
663         case CHIP_VEGA10:
664         case CHIP_VEGA12:
665         case CHIP_VEGA20:
666                 return 0;
667         case CHIP_RAVEN:
668                 fw_name_dmcu = FIRMWARE_RAVEN_DMCU;
669                 break;
670         default:
671                 DRM_ERROR("Unsupported ASIC type: 0x%X\n", adev->asic_type);
672                 return -EINVAL;
673         }
674
675         if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) {
676                 DRM_DEBUG_KMS("dm: DMCU firmware not supported on direct or SMU loading\n");
677                 return 0;
678         }
679
680         r = request_firmware_direct(&adev->dm.fw_dmcu, fw_name_dmcu, adev->dev);
681         if (r == -ENOENT) {
682                 /* DMCU firmware is not necessary, so don't raise a fuss if it's missing */
683                 DRM_DEBUG_KMS("dm: DMCU firmware not found\n");
684                 adev->dm.fw_dmcu = NULL;
685                 return 0;
686         }
687         if (r) {
688                 dev_err(adev->dev, "amdgpu_dm: Can't load firmware \"%s\"\n",
689                         fw_name_dmcu);
690                 return r;
691         }
692
693         r = amdgpu_ucode_validate(adev->dm.fw_dmcu);
694         if (r) {
695                 dev_err(adev->dev, "amdgpu_dm: Can't validate firmware \"%s\"\n",
696                         fw_name_dmcu);
697                 release_firmware(adev->dm.fw_dmcu);
698                 adev->dm.fw_dmcu = NULL;
699                 return r;
700         }
701
702         hdr = (const struct dmcu_firmware_header_v1_0 *)adev->dm.fw_dmcu->data;
703         adev->firmware.ucode[AMDGPU_UCODE_ID_DMCU_ERAM].ucode_id = AMDGPU_UCODE_ID_DMCU_ERAM;
704         adev->firmware.ucode[AMDGPU_UCODE_ID_DMCU_ERAM].fw = adev->dm.fw_dmcu;
705         adev->firmware.fw_size +=
706                 ALIGN(le32_to_cpu(hdr->header.ucode_size_bytes) - le32_to_cpu(hdr->intv_size_bytes), PAGE_SIZE);
707
708         adev->firmware.ucode[AMDGPU_UCODE_ID_DMCU_INTV].ucode_id = AMDGPU_UCODE_ID_DMCU_INTV;
709         adev->firmware.ucode[AMDGPU_UCODE_ID_DMCU_INTV].fw = adev->dm.fw_dmcu;
710         adev->firmware.fw_size +=
711                 ALIGN(le32_to_cpu(hdr->intv_size_bytes), PAGE_SIZE);
712
713         adev->dm.dmcu_fw_version = le32_to_cpu(hdr->header.ucode_version);
714
715         DRM_DEBUG_KMS("PSP loading DMCU firmware\n");
716
717         return 0;
718 }
719
720 static int dm_sw_init(void *handle)
721 {
722         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
723
724         return load_dmcu_fw(adev);
725 }
726
727 static int dm_sw_fini(void *handle)
728 {
729         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
730
731         if(adev->dm.fw_dmcu) {
732                 release_firmware(adev->dm.fw_dmcu);
733                 adev->dm.fw_dmcu = NULL;
734         }
735
736         return 0;
737 }
738
739 static int detect_mst_link_for_all_connectors(struct drm_device *dev)
740 {
741         struct amdgpu_dm_connector *aconnector;
742         struct drm_connector *connector;
743         int ret = 0;
744
745         drm_modeset_lock(&dev->mode_config.connection_mutex, NULL);
746
747         list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
748                 aconnector = to_amdgpu_dm_connector(connector);
749                 if (aconnector->dc_link->type == dc_connection_mst_branch &&
750                     aconnector->mst_mgr.aux) {
751                         DRM_DEBUG_DRIVER("DM_MST: starting TM on aconnector: %p [id: %d]\n",
752                                         aconnector, aconnector->base.base.id);
753
754                         ret = drm_dp_mst_topology_mgr_set_mst(&aconnector->mst_mgr, true);
755                         if (ret < 0) {
756                                 DRM_ERROR("DM_MST: Failed to start MST\n");
757                                 ((struct dc_link *)aconnector->dc_link)->type = dc_connection_single;
758                                 return ret;
759                                 }
760                         }
761         }
762
763         drm_modeset_unlock(&dev->mode_config.connection_mutex);
764         return ret;
765 }
766
767 static int dm_late_init(void *handle)
768 {
769         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
770
771         struct dmcu_iram_parameters params;
772         unsigned int linear_lut[16];
773         int i;
774         struct dmcu *dmcu = adev->dm.dc->res_pool->dmcu;
775         bool ret;
776
777         for (i = 0; i < 16; i++)
778                 linear_lut[i] = 0xFFFF * i / 15;
779
780         params.set = 0;
781         params.backlight_ramping_start = 0xCCCC;
782         params.backlight_ramping_reduction = 0xCCCCCCCC;
783         params.backlight_lut_array_size = 16;
784         params.backlight_lut_array = linear_lut;
785
786         ret = dmcu_load_iram(dmcu, params);
787
788         if (!ret)
789                 return -EINVAL;
790
791         return detect_mst_link_for_all_connectors(adev->ddev);
792 }
793
794 static void s3_handle_mst(struct drm_device *dev, bool suspend)
795 {
796         struct amdgpu_dm_connector *aconnector;
797         struct drm_connector *connector;
798         struct drm_dp_mst_topology_mgr *mgr;
799         int ret;
800         bool need_hotplug = false;
801
802         drm_modeset_lock(&dev->mode_config.connection_mutex, NULL);
803
804         list_for_each_entry(connector, &dev->mode_config.connector_list,
805                             head) {
806                 aconnector = to_amdgpu_dm_connector(connector);
807                 if (aconnector->dc_link->type != dc_connection_mst_branch ||
808                     aconnector->mst_port)
809                         continue;
810
811                 mgr = &aconnector->mst_mgr;
812
813                 if (suspend) {
814                         drm_dp_mst_topology_mgr_suspend(mgr);
815                 } else {
816                         ret = drm_dp_mst_topology_mgr_resume(mgr);
817                         if (ret < 0) {
818                                 drm_dp_mst_topology_mgr_set_mst(mgr, false);
819                                 need_hotplug = true;
820                         }
821                 }
822         }
823
824         drm_modeset_unlock(&dev->mode_config.connection_mutex);
825
826         if (need_hotplug)
827                 drm_kms_helper_hotplug_event(dev);
828 }
829
830 /**
831  * dm_hw_init() - Initialize DC device
832  * @handle: The base driver device containing the amdpgu_dm device.
833  *
834  * Initialize the &struct amdgpu_display_manager device. This involves calling
835  * the initializers of each DM component, then populating the struct with them.
836  *
837  * Although the function implies hardware initialization, both hardware and
838  * software are initialized here. Splitting them out to their relevant init
839  * hooks is a future TODO item.
840  *
841  * Some notable things that are initialized here:
842  *
843  * - Display Core, both software and hardware
844  * - DC modules that we need (freesync and color management)
845  * - DRM software states
846  * - Interrupt sources and handlers
847  * - Vblank support
848  * - Debug FS entries, if enabled
849  */
850 static int dm_hw_init(void *handle)
851 {
852         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
853         /* Create DAL display manager */
854         amdgpu_dm_init(adev);
855         amdgpu_dm_hpd_init(adev);
856
857         return 0;
858 }
859
860 /**
861  * dm_hw_fini() - Teardown DC device
862  * @handle: The base driver device containing the amdpgu_dm device.
863  *
864  * Teardown components within &struct amdgpu_display_manager that require
865  * cleanup. This involves cleaning up the DRM device, DC, and any modules that
866  * were loaded. Also flush IRQ workqueues and disable them.
867  */
868 static int dm_hw_fini(void *handle)
869 {
870         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
871
872         amdgpu_dm_hpd_fini(adev);
873
874         amdgpu_dm_irq_fini(adev);
875         amdgpu_dm_fini(adev);
876         return 0;
877 }
878
879 static int dm_suspend(void *handle)
880 {
881         struct amdgpu_device *adev = handle;
882         struct amdgpu_display_manager *dm = &adev->dm;
883         int ret = 0;
884
885         WARN_ON(adev->dm.cached_state);
886         adev->dm.cached_state = drm_atomic_helper_suspend(adev->ddev);
887
888         s3_handle_mst(adev->ddev, true);
889
890         amdgpu_dm_irq_suspend(adev);
891
892
893         dc_set_power_state(dm->dc, DC_ACPI_CM_POWER_STATE_D3);
894
895         return ret;
896 }
897
898 static struct amdgpu_dm_connector *
899 amdgpu_dm_find_first_crtc_matching_connector(struct drm_atomic_state *state,
900                                              struct drm_crtc *crtc)
901 {
902         uint32_t i;
903         struct drm_connector_state *new_con_state;
904         struct drm_connector *connector;
905         struct drm_crtc *crtc_from_state;
906
907         for_each_new_connector_in_state(state, connector, new_con_state, i) {
908                 crtc_from_state = new_con_state->crtc;
909
910                 if (crtc_from_state == crtc)
911                         return to_amdgpu_dm_connector(connector);
912         }
913
914         return NULL;
915 }
916
917 static void emulated_link_detect(struct dc_link *link)
918 {
919         struct dc_sink_init_data sink_init_data = { 0 };
920         struct display_sink_capability sink_caps = { 0 };
921         enum dc_edid_status edid_status;
922         struct dc_context *dc_ctx = link->ctx;
923         struct dc_sink *sink = NULL;
924         struct dc_sink *prev_sink = NULL;
925
926         link->type = dc_connection_none;
927         prev_sink = link->local_sink;
928
929         if (prev_sink != NULL)
930                 dc_sink_retain(prev_sink);
931
932         switch (link->connector_signal) {
933         case SIGNAL_TYPE_HDMI_TYPE_A: {
934                 sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C;
935                 sink_caps.signal = SIGNAL_TYPE_HDMI_TYPE_A;
936                 break;
937         }
938
939         case SIGNAL_TYPE_DVI_SINGLE_LINK: {
940                 sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C;
941                 sink_caps.signal = SIGNAL_TYPE_DVI_SINGLE_LINK;
942                 break;
943         }
944
945         case SIGNAL_TYPE_DVI_DUAL_LINK: {
946                 sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C;
947                 sink_caps.signal = SIGNAL_TYPE_DVI_DUAL_LINK;
948                 break;
949         }
950
951         case SIGNAL_TYPE_LVDS: {
952                 sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C;
953                 sink_caps.signal = SIGNAL_TYPE_LVDS;
954                 break;
955         }
956
957         case SIGNAL_TYPE_EDP: {
958                 sink_caps.transaction_type =
959                         DDC_TRANSACTION_TYPE_I2C_OVER_AUX;
960                 sink_caps.signal = SIGNAL_TYPE_EDP;
961                 break;
962         }
963
964         case SIGNAL_TYPE_DISPLAY_PORT: {
965                 sink_caps.transaction_type =
966                         DDC_TRANSACTION_TYPE_I2C_OVER_AUX;
967                 sink_caps.signal = SIGNAL_TYPE_VIRTUAL;
968                 break;
969         }
970
971         default:
972                 DC_ERROR("Invalid connector type! signal:%d\n",
973                         link->connector_signal);
974                 return;
975         }
976
977         sink_init_data.link = link;
978         sink_init_data.sink_signal = sink_caps.signal;
979
980         sink = dc_sink_create(&sink_init_data);
981         if (!sink) {
982                 DC_ERROR("Failed to create sink!\n");
983                 return;
984         }
985
986         /* dc_sink_create returns a new reference */
987         link->local_sink = sink;
988
989         edid_status = dm_helpers_read_local_edid(
990                         link->ctx,
991                         link,
992                         sink);
993
994         if (edid_status != EDID_OK)
995                 DC_ERROR("Failed to read EDID");
996
997 }
998
999 static int dm_resume(void *handle)
1000 {
1001         struct amdgpu_device *adev = handle;
1002         struct drm_device *ddev = adev->ddev;
1003         struct amdgpu_display_manager *dm = &adev->dm;
1004         struct amdgpu_dm_connector *aconnector;
1005         struct drm_connector *connector;
1006         struct drm_crtc *crtc;
1007         struct drm_crtc_state *new_crtc_state;
1008         struct dm_crtc_state *dm_new_crtc_state;
1009         struct drm_plane *plane;
1010         struct drm_plane_state *new_plane_state;
1011         struct dm_plane_state *dm_new_plane_state;
1012         struct dm_atomic_state *dm_state = to_dm_atomic_state(dm->atomic_obj.state);
1013         enum dc_connection_type new_connection_type = dc_connection_none;
1014         int i;
1015
1016         /* Recreate dc_state - DC invalidates it when setting power state to S3. */
1017         dc_release_state(dm_state->context);
1018         dm_state->context = dc_create_state(dm->dc);
1019         /* TODO: Remove dc_state->dccg, use dc->dccg directly. */
1020         dc_resource_state_construct(dm->dc, dm_state->context);
1021
1022         /* power on hardware */
1023         dc_set_power_state(dm->dc, DC_ACPI_CM_POWER_STATE_D0);
1024
1025         /* program HPD filter */
1026         dc_resume(dm->dc);
1027
1028         /* On resume we need to  rewrite the MSTM control bits to enamble MST*/
1029         s3_handle_mst(ddev, false);
1030
1031         /*
1032          * early enable HPD Rx IRQ, should be done before set mode as short
1033          * pulse interrupts are used for MST
1034          */
1035         amdgpu_dm_irq_resume_early(adev);
1036
1037         /* Do detection*/
1038         list_for_each_entry(connector, &ddev->mode_config.connector_list, head) {
1039                 aconnector = to_amdgpu_dm_connector(connector);
1040
1041                 /*
1042                  * this is the case when traversing through already created
1043                  * MST connectors, should be skipped
1044                  */
1045                 if (aconnector->mst_port)
1046                         continue;
1047
1048                 mutex_lock(&aconnector->hpd_lock);
1049                 if (!dc_link_detect_sink(aconnector->dc_link, &new_connection_type))
1050                         DRM_ERROR("KMS: Failed to detect connector\n");
1051
1052                 if (aconnector->base.force && new_connection_type == dc_connection_none)
1053                         emulated_link_detect(aconnector->dc_link);
1054                 else
1055                         dc_link_detect(aconnector->dc_link, DETECT_REASON_HPD);
1056
1057                 if (aconnector->fake_enable && aconnector->dc_link->local_sink)
1058                         aconnector->fake_enable = false;
1059
1060                 if (aconnector->dc_sink)
1061                         dc_sink_release(aconnector->dc_sink);
1062                 aconnector->dc_sink = NULL;
1063                 amdgpu_dm_update_connector_after_detect(aconnector);
1064                 mutex_unlock(&aconnector->hpd_lock);
1065         }
1066
1067         /* Force mode set in atomic commit */
1068         for_each_new_crtc_in_state(dm->cached_state, crtc, new_crtc_state, i)
1069                 new_crtc_state->active_changed = true;
1070
1071         /*
1072          * atomic_check is expected to create the dc states. We need to release
1073          * them here, since they were duplicated as part of the suspend
1074          * procedure.
1075          */
1076         for_each_new_crtc_in_state(dm->cached_state, crtc, new_crtc_state, i) {
1077                 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
1078                 if (dm_new_crtc_state->stream) {
1079                         WARN_ON(kref_read(&dm_new_crtc_state->stream->refcount) > 1);
1080                         dc_stream_release(dm_new_crtc_state->stream);
1081                         dm_new_crtc_state->stream = NULL;
1082                 }
1083         }
1084
1085         for_each_new_plane_in_state(dm->cached_state, plane, new_plane_state, i) {
1086                 dm_new_plane_state = to_dm_plane_state(new_plane_state);
1087                 if (dm_new_plane_state->dc_state) {
1088                         WARN_ON(kref_read(&dm_new_plane_state->dc_state->refcount) > 1);
1089                         dc_plane_state_release(dm_new_plane_state->dc_state);
1090                         dm_new_plane_state->dc_state = NULL;
1091                 }
1092         }
1093
1094         drm_atomic_helper_resume(ddev, dm->cached_state);
1095
1096         dm->cached_state = NULL;
1097
1098         amdgpu_dm_irq_resume_late(adev);
1099
1100         return 0;
1101 }
1102
1103 /**
1104  * DOC: DM Lifecycle
1105  *
1106  * DM (and consequently DC) is registered in the amdgpu base driver as a IP
1107  * block. When CONFIG_DRM_AMD_DC is enabled, the DM device IP block is added to
1108  * the base driver's device list to be initialized and torn down accordingly.
1109  *
1110  * The functions to do so are provided as hooks in &struct amd_ip_funcs.
1111  */
1112
1113 static const struct amd_ip_funcs amdgpu_dm_funcs = {
1114         .name = "dm",
1115         .early_init = dm_early_init,
1116         .late_init = dm_late_init,
1117         .sw_init = dm_sw_init,
1118         .sw_fini = dm_sw_fini,
1119         .hw_init = dm_hw_init,
1120         .hw_fini = dm_hw_fini,
1121         .suspend = dm_suspend,
1122         .resume = dm_resume,
1123         .is_idle = dm_is_idle,
1124         .wait_for_idle = dm_wait_for_idle,
1125         .check_soft_reset = dm_check_soft_reset,
1126         .soft_reset = dm_soft_reset,
1127         .set_clockgating_state = dm_set_clockgating_state,
1128         .set_powergating_state = dm_set_powergating_state,
1129 };
1130
1131 const struct amdgpu_ip_block_version dm_ip_block =
1132 {
1133         .type = AMD_IP_BLOCK_TYPE_DCE,
1134         .major = 1,
1135         .minor = 0,
1136         .rev = 0,
1137         .funcs = &amdgpu_dm_funcs,
1138 };
1139
1140
1141 /**
1142  * DOC: atomic
1143  *
1144  * *WIP*
1145  */
1146
1147 static const struct drm_mode_config_funcs amdgpu_dm_mode_funcs = {
1148         .fb_create = amdgpu_display_user_framebuffer_create,
1149         .output_poll_changed = drm_fb_helper_output_poll_changed,
1150         .atomic_check = amdgpu_dm_atomic_check,
1151         .atomic_commit = amdgpu_dm_atomic_commit,
1152 };
1153
1154 static struct drm_mode_config_helper_funcs amdgpu_dm_mode_config_helperfuncs = {
1155         .atomic_commit_tail = amdgpu_dm_atomic_commit_tail
1156 };
1157
1158 static void
1159 amdgpu_dm_update_connector_after_detect(struct amdgpu_dm_connector *aconnector)
1160 {
1161         struct drm_connector *connector = &aconnector->base;
1162         struct drm_device *dev = connector->dev;
1163         struct dc_sink *sink;
1164
1165         /* MST handled by drm_mst framework */
1166         if (aconnector->mst_mgr.mst_state == true)
1167                 return;
1168
1169
1170         sink = aconnector->dc_link->local_sink;
1171         if (sink)
1172                 dc_sink_retain(sink);
1173
1174         /*
1175          * Edid mgmt connector gets first update only in mode_valid hook and then
1176          * the connector sink is set to either fake or physical sink depends on link status.
1177          * Skip if already done during boot.
1178          */
1179         if (aconnector->base.force != DRM_FORCE_UNSPECIFIED
1180                         && aconnector->dc_em_sink) {
1181
1182                 /*
1183                  * For S3 resume with headless use eml_sink to fake stream
1184                  * because on resume connector->sink is set to NULL
1185                  */
1186                 mutex_lock(&dev->mode_config.mutex);
1187
1188                 if (sink) {
1189                         if (aconnector->dc_sink) {
1190                                 amdgpu_dm_update_freesync_caps(connector, NULL);
1191                                 /*
1192                                  * retain and release below are used to
1193                                  * bump up refcount for sink because the link doesn't point
1194                                  * to it anymore after disconnect, so on next crtc to connector
1195                                  * reshuffle by UMD we will get into unwanted dc_sink release
1196                                  */
1197                                 dc_sink_release(aconnector->dc_sink);
1198                         }
1199                         aconnector->dc_sink = sink;
1200                         dc_sink_retain(aconnector->dc_sink);
1201                         amdgpu_dm_update_freesync_caps(connector,
1202                                         aconnector->edid);
1203                 } else {
1204                         amdgpu_dm_update_freesync_caps(connector, NULL);
1205                         if (!aconnector->dc_sink) {
1206                                 aconnector->dc_sink = aconnector->dc_em_sink;
1207                                 dc_sink_retain(aconnector->dc_sink);
1208                         }
1209                 }
1210
1211                 mutex_unlock(&dev->mode_config.mutex);
1212
1213                 if (sink)
1214                         dc_sink_release(sink);
1215                 return;
1216         }
1217
1218         /*
1219          * TODO: temporary guard to look for proper fix
1220          * if this sink is MST sink, we should not do anything
1221          */
1222         if (sink && sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT_MST) {
1223                 dc_sink_release(sink);
1224                 return;
1225         }
1226
1227         if (aconnector->dc_sink == sink) {
1228                 /*
1229                  * We got a DP short pulse (Link Loss, DP CTS, etc...).
1230                  * Do nothing!!
1231                  */
1232                 DRM_DEBUG_DRIVER("DCHPD: connector_id=%d: dc_sink didn't change.\n",
1233                                 aconnector->connector_id);
1234                 if (sink)
1235                         dc_sink_release(sink);
1236                 return;
1237         }
1238
1239         DRM_DEBUG_DRIVER("DCHPD: connector_id=%d: Old sink=%p New sink=%p\n",
1240                 aconnector->connector_id, aconnector->dc_sink, sink);
1241
1242         mutex_lock(&dev->mode_config.mutex);
1243
1244         /*
1245          * 1. Update status of the drm connector
1246          * 2. Send an event and let userspace tell us what to do
1247          */
1248         if (sink) {
1249                 /*
1250                  * TODO: check if we still need the S3 mode update workaround.
1251                  * If yes, put it here.
1252                  */
1253                 if (aconnector->dc_sink)
1254                         amdgpu_dm_update_freesync_caps(connector, NULL);
1255
1256                 aconnector->dc_sink = sink;
1257                 dc_sink_retain(aconnector->dc_sink);
1258                 if (sink->dc_edid.length == 0) {
1259                         aconnector->edid = NULL;
1260                         drm_dp_cec_unset_edid(&aconnector->dm_dp_aux.aux);
1261                 } else {
1262                         aconnector->edid =
1263                                 (struct edid *) sink->dc_edid.raw_edid;
1264
1265
1266                         drm_connector_update_edid_property(connector,
1267                                         aconnector->edid);
1268                         drm_dp_cec_set_edid(&aconnector->dm_dp_aux.aux,
1269                                             aconnector->edid);
1270                 }
1271                 amdgpu_dm_update_freesync_caps(connector, aconnector->edid);
1272
1273         } else {
1274                 drm_dp_cec_unset_edid(&aconnector->dm_dp_aux.aux);
1275                 amdgpu_dm_update_freesync_caps(connector, NULL);
1276                 drm_connector_update_edid_property(connector, NULL);
1277                 aconnector->num_modes = 0;
1278                 dc_sink_release(aconnector->dc_sink);
1279                 aconnector->dc_sink = NULL;
1280                 aconnector->edid = NULL;
1281         }
1282
1283         mutex_unlock(&dev->mode_config.mutex);
1284
1285         if (sink)
1286                 dc_sink_release(sink);
1287 }
1288
1289 static void handle_hpd_irq(void *param)
1290 {
1291         struct amdgpu_dm_connector *aconnector = (struct amdgpu_dm_connector *)param;
1292         struct drm_connector *connector = &aconnector->base;
1293         struct drm_device *dev = connector->dev;
1294         enum dc_connection_type new_connection_type = dc_connection_none;
1295
1296         /*
1297          * In case of failure or MST no need to update connector status or notify the OS
1298          * since (for MST case) MST does this in its own context.
1299          */
1300         mutex_lock(&aconnector->hpd_lock);
1301
1302         if (aconnector->fake_enable)
1303                 aconnector->fake_enable = false;
1304
1305         if (!dc_link_detect_sink(aconnector->dc_link, &new_connection_type))
1306                 DRM_ERROR("KMS: Failed to detect connector\n");
1307
1308         if (aconnector->base.force && new_connection_type == dc_connection_none) {
1309                 emulated_link_detect(aconnector->dc_link);
1310
1311
1312                 drm_modeset_lock_all(dev);
1313                 dm_restore_drm_connector_state(dev, connector);
1314                 drm_modeset_unlock_all(dev);
1315
1316                 if (aconnector->base.force == DRM_FORCE_UNSPECIFIED)
1317                         drm_kms_helper_hotplug_event(dev);
1318
1319         } else if (dc_link_detect(aconnector->dc_link, DETECT_REASON_HPD)) {
1320                 amdgpu_dm_update_connector_after_detect(aconnector);
1321
1322
1323                 drm_modeset_lock_all(dev);
1324                 dm_restore_drm_connector_state(dev, connector);
1325                 drm_modeset_unlock_all(dev);
1326
1327                 if (aconnector->base.force == DRM_FORCE_UNSPECIFIED)
1328                         drm_kms_helper_hotplug_event(dev);
1329         }
1330         mutex_unlock(&aconnector->hpd_lock);
1331
1332 }
1333
1334 static void dm_handle_hpd_rx_irq(struct amdgpu_dm_connector *aconnector)
1335 {
1336         uint8_t esi[DP_PSR_ERROR_STATUS - DP_SINK_COUNT_ESI] = { 0 };
1337         uint8_t dret;
1338         bool new_irq_handled = false;
1339         int dpcd_addr;
1340         int dpcd_bytes_to_read;
1341
1342         const int max_process_count = 30;
1343         int process_count = 0;
1344
1345         const struct dc_link_status *link_status = dc_link_get_status(aconnector->dc_link);
1346
1347         if (link_status->dpcd_caps->dpcd_rev.raw < 0x12) {
1348                 dpcd_bytes_to_read = DP_LANE0_1_STATUS - DP_SINK_COUNT;
1349                 /* DPCD 0x200 - 0x201 for downstream IRQ */
1350                 dpcd_addr = DP_SINK_COUNT;
1351         } else {
1352                 dpcd_bytes_to_read = DP_PSR_ERROR_STATUS - DP_SINK_COUNT_ESI;
1353                 /* DPCD 0x2002 - 0x2005 for downstream IRQ */
1354                 dpcd_addr = DP_SINK_COUNT_ESI;
1355         }
1356
1357         dret = drm_dp_dpcd_read(
1358                 &aconnector->dm_dp_aux.aux,
1359                 dpcd_addr,
1360                 esi,
1361                 dpcd_bytes_to_read);
1362
1363         while (dret == dpcd_bytes_to_read &&
1364                 process_count < max_process_count) {
1365                 uint8_t retry;
1366                 dret = 0;
1367
1368                 process_count++;
1369
1370                 DRM_DEBUG_DRIVER("ESI %02x %02x %02x\n", esi[0], esi[1], esi[2]);
1371                 /* handle HPD short pulse irq */
1372                 if (aconnector->mst_mgr.mst_state)
1373                         drm_dp_mst_hpd_irq(
1374                                 &aconnector->mst_mgr,
1375                                 esi,
1376                                 &new_irq_handled);
1377
1378                 if (new_irq_handled) {
1379                         /* ACK at DPCD to notify down stream */
1380                         const int ack_dpcd_bytes_to_write =
1381                                 dpcd_bytes_to_read - 1;
1382
1383                         for (retry = 0; retry < 3; retry++) {
1384                                 uint8_t wret;
1385
1386                                 wret = drm_dp_dpcd_write(
1387                                         &aconnector->dm_dp_aux.aux,
1388                                         dpcd_addr + 1,
1389                                         &esi[1],
1390                                         ack_dpcd_bytes_to_write);
1391                                 if (wret == ack_dpcd_bytes_to_write)
1392                                         break;
1393                         }
1394
1395                         /* check if there is new irq to be handled */
1396                         dret = drm_dp_dpcd_read(
1397                                 &aconnector->dm_dp_aux.aux,
1398                                 dpcd_addr,
1399                                 esi,
1400                                 dpcd_bytes_to_read);
1401
1402                         new_irq_handled = false;
1403                 } else {
1404                         break;
1405                 }
1406         }
1407
1408         if (process_count == max_process_count)
1409                 DRM_DEBUG_DRIVER("Loop exceeded max iterations\n");
1410 }
1411
1412 static void handle_hpd_rx_irq(void *param)
1413 {
1414         struct amdgpu_dm_connector *aconnector = (struct amdgpu_dm_connector *)param;
1415         struct drm_connector *connector = &aconnector->base;
1416         struct drm_device *dev = connector->dev;
1417         struct dc_link *dc_link = aconnector->dc_link;
1418         bool is_mst_root_connector = aconnector->mst_mgr.mst_state;
1419         enum dc_connection_type new_connection_type = dc_connection_none;
1420
1421         /*
1422          * TODO:Temporary add mutex to protect hpd interrupt not have a gpio
1423          * conflict, after implement i2c helper, this mutex should be
1424          * retired.
1425          */
1426         if (dc_link->type != dc_connection_mst_branch)
1427                 mutex_lock(&aconnector->hpd_lock);
1428
1429         if (dc_link_handle_hpd_rx_irq(dc_link, NULL, NULL) &&
1430                         !is_mst_root_connector) {
1431                 /* Downstream Port status changed. */
1432                 if (!dc_link_detect_sink(dc_link, &new_connection_type))
1433                         DRM_ERROR("KMS: Failed to detect connector\n");
1434
1435                 if (aconnector->base.force && new_connection_type == dc_connection_none) {
1436                         emulated_link_detect(dc_link);
1437
1438                         if (aconnector->fake_enable)
1439                                 aconnector->fake_enable = false;
1440
1441                         amdgpu_dm_update_connector_after_detect(aconnector);
1442
1443
1444                         drm_modeset_lock_all(dev);
1445                         dm_restore_drm_connector_state(dev, connector);
1446                         drm_modeset_unlock_all(dev);
1447
1448                         drm_kms_helper_hotplug_event(dev);
1449                 } else if (dc_link_detect(dc_link, DETECT_REASON_HPDRX)) {
1450
1451                         if (aconnector->fake_enable)
1452                                 aconnector->fake_enable = false;
1453
1454                         amdgpu_dm_update_connector_after_detect(aconnector);
1455
1456
1457                         drm_modeset_lock_all(dev);
1458                         dm_restore_drm_connector_state(dev, connector);
1459                         drm_modeset_unlock_all(dev);
1460
1461                         drm_kms_helper_hotplug_event(dev);
1462                 }
1463         }
1464         if ((dc_link->cur_link_settings.lane_count != LANE_COUNT_UNKNOWN) ||
1465             (dc_link->type == dc_connection_mst_branch))
1466                 dm_handle_hpd_rx_irq(aconnector);
1467
1468         if (dc_link->type != dc_connection_mst_branch) {
1469                 drm_dp_cec_irq(&aconnector->dm_dp_aux.aux);
1470                 mutex_unlock(&aconnector->hpd_lock);
1471         }
1472 }
1473
1474 static void register_hpd_handlers(struct amdgpu_device *adev)
1475 {
1476         struct drm_device *dev = adev->ddev;
1477         struct drm_connector *connector;
1478         struct amdgpu_dm_connector *aconnector;
1479         const struct dc_link *dc_link;
1480         struct dc_interrupt_params int_params = {0};
1481
1482         int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
1483         int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
1484
1485         list_for_each_entry(connector,
1486                         &dev->mode_config.connector_list, head) {
1487
1488                 aconnector = to_amdgpu_dm_connector(connector);
1489                 dc_link = aconnector->dc_link;
1490
1491                 if (DC_IRQ_SOURCE_INVALID != dc_link->irq_source_hpd) {
1492                         int_params.int_context = INTERRUPT_LOW_IRQ_CONTEXT;
1493                         int_params.irq_source = dc_link->irq_source_hpd;
1494
1495                         amdgpu_dm_irq_register_interrupt(adev, &int_params,
1496                                         handle_hpd_irq,
1497                                         (void *) aconnector);
1498                 }
1499
1500                 if (DC_IRQ_SOURCE_INVALID != dc_link->irq_source_hpd_rx) {
1501
1502                         /* Also register for DP short pulse (hpd_rx). */
1503                         int_params.int_context = INTERRUPT_LOW_IRQ_CONTEXT;
1504                         int_params.irq_source = dc_link->irq_source_hpd_rx;
1505
1506                         amdgpu_dm_irq_register_interrupt(adev, &int_params,
1507                                         handle_hpd_rx_irq,
1508                                         (void *) aconnector);
1509                 }
1510         }
1511 }
1512
1513 /* Register IRQ sources and initialize IRQ callbacks */
1514 static int dce110_register_irq_handlers(struct amdgpu_device *adev)
1515 {
1516         struct dc *dc = adev->dm.dc;
1517         struct common_irq_params *c_irq_params;
1518         struct dc_interrupt_params int_params = {0};
1519         int r;
1520         int i;
1521         unsigned client_id = AMDGPU_IRQ_CLIENTID_LEGACY;
1522
1523         if (adev->asic_type == CHIP_VEGA10 ||
1524             adev->asic_type == CHIP_VEGA12 ||
1525             adev->asic_type == CHIP_VEGA20 ||
1526             adev->asic_type == CHIP_RAVEN)
1527                 client_id = SOC15_IH_CLIENTID_DCE;
1528
1529         int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
1530         int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
1531
1532         /*
1533          * Actions of amdgpu_irq_add_id():
1534          * 1. Register a set() function with base driver.
1535          *    Base driver will call set() function to enable/disable an
1536          *    interrupt in DC hardware.
1537          * 2. Register amdgpu_dm_irq_handler().
1538          *    Base driver will call amdgpu_dm_irq_handler() for ALL interrupts
1539          *    coming from DC hardware.
1540          *    amdgpu_dm_irq_handler() will re-direct the interrupt to DC
1541          *    for acknowledging and handling. */
1542
1543         /* Use VBLANK interrupt */
1544         for (i = VISLANDS30_IV_SRCID_D1_VERTICAL_INTERRUPT0; i <= VISLANDS30_IV_SRCID_D6_VERTICAL_INTERRUPT0; i++) {
1545                 r = amdgpu_irq_add_id(adev, client_id, i, &adev->crtc_irq);
1546                 if (r) {
1547                         DRM_ERROR("Failed to add crtc irq id!\n");
1548                         return r;
1549                 }
1550
1551                 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
1552                 int_params.irq_source =
1553                         dc_interrupt_to_irq_source(dc, i, 0);
1554
1555                 c_irq_params = &adev->dm.vblank_params[int_params.irq_source - DC_IRQ_SOURCE_VBLANK1];
1556
1557                 c_irq_params->adev = adev;
1558                 c_irq_params->irq_src = int_params.irq_source;
1559
1560                 amdgpu_dm_irq_register_interrupt(adev, &int_params,
1561                                 dm_crtc_high_irq, c_irq_params);
1562         }
1563
1564         /* Use VUPDATE interrupt */
1565         for (i = VISLANDS30_IV_SRCID_D1_V_UPDATE_INT; i <= VISLANDS30_IV_SRCID_D6_V_UPDATE_INT; i += 2) {
1566                 r = amdgpu_irq_add_id(adev, client_id, i, &adev->vupdate_irq);
1567                 if (r) {
1568                         DRM_ERROR("Failed to add vupdate irq id!\n");
1569                         return r;
1570                 }
1571
1572                 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
1573                 int_params.irq_source =
1574                         dc_interrupt_to_irq_source(dc, i, 0);
1575
1576                 c_irq_params = &adev->dm.vupdate_params[int_params.irq_source - DC_IRQ_SOURCE_VUPDATE1];
1577
1578                 c_irq_params->adev = adev;
1579                 c_irq_params->irq_src = int_params.irq_source;
1580
1581                 amdgpu_dm_irq_register_interrupt(adev, &int_params,
1582                                 dm_vupdate_high_irq, c_irq_params);
1583         }
1584
1585         /* Use GRPH_PFLIP interrupt */
1586         for (i = VISLANDS30_IV_SRCID_D1_GRPH_PFLIP;
1587                         i <= VISLANDS30_IV_SRCID_D6_GRPH_PFLIP; i += 2) {
1588                 r = amdgpu_irq_add_id(adev, client_id, i, &adev->pageflip_irq);
1589                 if (r) {
1590                         DRM_ERROR("Failed to add page flip irq id!\n");
1591                         return r;
1592                 }
1593
1594                 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
1595                 int_params.irq_source =
1596                         dc_interrupt_to_irq_source(dc, i, 0);
1597
1598                 c_irq_params = &adev->dm.pflip_params[int_params.irq_source - DC_IRQ_SOURCE_PFLIP_FIRST];
1599
1600                 c_irq_params->adev = adev;
1601                 c_irq_params->irq_src = int_params.irq_source;
1602
1603                 amdgpu_dm_irq_register_interrupt(adev, &int_params,
1604                                 dm_pflip_high_irq, c_irq_params);
1605
1606         }
1607
1608         /* HPD */
1609         r = amdgpu_irq_add_id(adev, client_id,
1610                         VISLANDS30_IV_SRCID_HOTPLUG_DETECT_A, &adev->hpd_irq);
1611         if (r) {
1612                 DRM_ERROR("Failed to add hpd irq id!\n");
1613                 return r;
1614         }
1615
1616         register_hpd_handlers(adev);
1617
1618         return 0;
1619 }
1620
1621 #if defined(CONFIG_DRM_AMD_DC_DCN1_0)
1622 /* Register IRQ sources and initialize IRQ callbacks */
1623 static int dcn10_register_irq_handlers(struct amdgpu_device *adev)
1624 {
1625         struct dc *dc = adev->dm.dc;
1626         struct common_irq_params *c_irq_params;
1627         struct dc_interrupt_params int_params = {0};
1628         int r;
1629         int i;
1630
1631         int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
1632         int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
1633
1634         /*
1635          * Actions of amdgpu_irq_add_id():
1636          * 1. Register a set() function with base driver.
1637          *    Base driver will call set() function to enable/disable an
1638          *    interrupt in DC hardware.
1639          * 2. Register amdgpu_dm_irq_handler().
1640          *    Base driver will call amdgpu_dm_irq_handler() for ALL interrupts
1641          *    coming from DC hardware.
1642          *    amdgpu_dm_irq_handler() will re-direct the interrupt to DC
1643          *    for acknowledging and handling.
1644          */
1645
1646         /* Use VSTARTUP interrupt */
1647         for (i = DCN_1_0__SRCID__DC_D1_OTG_VSTARTUP;
1648                         i <= DCN_1_0__SRCID__DC_D1_OTG_VSTARTUP + adev->mode_info.num_crtc - 1;
1649                         i++) {
1650                 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, i, &adev->crtc_irq);
1651
1652                 if (r) {
1653                         DRM_ERROR("Failed to add crtc irq id!\n");
1654                         return r;
1655                 }
1656
1657                 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
1658                 int_params.irq_source =
1659                         dc_interrupt_to_irq_source(dc, i, 0);
1660
1661                 c_irq_params = &adev->dm.vblank_params[int_params.irq_source - DC_IRQ_SOURCE_VBLANK1];
1662
1663                 c_irq_params->adev = adev;
1664                 c_irq_params->irq_src = int_params.irq_source;
1665
1666                 amdgpu_dm_irq_register_interrupt(adev, &int_params,
1667                                 dm_crtc_high_irq, c_irq_params);
1668         }
1669
1670         /* Use VUPDATE_NO_LOCK interrupt on DCN, which seems to correspond to
1671          * the regular VUPDATE interrupt on DCE. We want DC_IRQ_SOURCE_VUPDATEx
1672          * to trigger at end of each vblank, regardless of state of the lock,
1673          * matching DCE behaviour.
1674          */
1675         for (i = DCN_1_0__SRCID__OTG0_IHC_V_UPDATE_NO_LOCK_INTERRUPT;
1676              i <= DCN_1_0__SRCID__OTG0_IHC_V_UPDATE_NO_LOCK_INTERRUPT + adev->mode_info.num_crtc - 1;
1677              i++) {
1678                 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, i, &adev->vupdate_irq);
1679
1680                 if (r) {
1681                         DRM_ERROR("Failed to add vupdate irq id!\n");
1682                         return r;
1683                 }
1684
1685                 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
1686                 int_params.irq_source =
1687                         dc_interrupt_to_irq_source(dc, i, 0);
1688
1689                 c_irq_params = &adev->dm.vupdate_params[int_params.irq_source - DC_IRQ_SOURCE_VUPDATE1];
1690
1691                 c_irq_params->adev = adev;
1692                 c_irq_params->irq_src = int_params.irq_source;
1693
1694                 amdgpu_dm_irq_register_interrupt(adev, &int_params,
1695                                 dm_vupdate_high_irq, c_irq_params);
1696         }
1697
1698         /* Use GRPH_PFLIP interrupt */
1699         for (i = DCN_1_0__SRCID__HUBP0_FLIP_INTERRUPT;
1700                         i <= DCN_1_0__SRCID__HUBP0_FLIP_INTERRUPT + adev->mode_info.num_crtc - 1;
1701                         i++) {
1702                 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, i, &adev->pageflip_irq);
1703                 if (r) {
1704                         DRM_ERROR("Failed to add page flip irq id!\n");
1705                         return r;
1706                 }
1707
1708                 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
1709                 int_params.irq_source =
1710                         dc_interrupt_to_irq_source(dc, i, 0);
1711
1712                 c_irq_params = &adev->dm.pflip_params[int_params.irq_source - DC_IRQ_SOURCE_PFLIP_FIRST];
1713
1714                 c_irq_params->adev = adev;
1715                 c_irq_params->irq_src = int_params.irq_source;
1716
1717                 amdgpu_dm_irq_register_interrupt(adev, &int_params,
1718                                 dm_pflip_high_irq, c_irq_params);
1719
1720         }
1721
1722         /* HPD */
1723         r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, DCN_1_0__SRCID__DC_HPD1_INT,
1724                         &adev->hpd_irq);
1725         if (r) {
1726                 DRM_ERROR("Failed to add hpd irq id!\n");
1727                 return r;
1728         }
1729
1730         register_hpd_handlers(adev);
1731
1732         return 0;
1733 }
1734 #endif
1735
1736 /*
1737  * Acquires the lock for the atomic state object and returns
1738  * the new atomic state.
1739  *
1740  * This should only be called during atomic check.
1741  */
1742 static int dm_atomic_get_state(struct drm_atomic_state *state,
1743                                struct dm_atomic_state **dm_state)
1744 {
1745         struct drm_device *dev = state->dev;
1746         struct amdgpu_device *adev = dev->dev_private;
1747         struct amdgpu_display_manager *dm = &adev->dm;
1748         struct drm_private_state *priv_state;
1749
1750         if (*dm_state)
1751                 return 0;
1752
1753         priv_state = drm_atomic_get_private_obj_state(state, &dm->atomic_obj);
1754         if (IS_ERR(priv_state))
1755                 return PTR_ERR(priv_state);
1756
1757         *dm_state = to_dm_atomic_state(priv_state);
1758
1759         return 0;
1760 }
1761
1762 struct dm_atomic_state *
1763 dm_atomic_get_new_state(struct drm_atomic_state *state)
1764 {
1765         struct drm_device *dev = state->dev;
1766         struct amdgpu_device *adev = dev->dev_private;
1767         struct amdgpu_display_manager *dm = &adev->dm;
1768         struct drm_private_obj *obj;
1769         struct drm_private_state *new_obj_state;
1770         int i;
1771
1772         for_each_new_private_obj_in_state(state, obj, new_obj_state, i) {
1773                 if (obj->funcs == dm->atomic_obj.funcs)
1774                         return to_dm_atomic_state(new_obj_state);
1775         }
1776
1777         return NULL;
1778 }
1779
1780 struct dm_atomic_state *
1781 dm_atomic_get_old_state(struct drm_atomic_state *state)
1782 {
1783         struct drm_device *dev = state->dev;
1784         struct amdgpu_device *adev = dev->dev_private;
1785         struct amdgpu_display_manager *dm = &adev->dm;
1786         struct drm_private_obj *obj;
1787         struct drm_private_state *old_obj_state;
1788         int i;
1789
1790         for_each_old_private_obj_in_state(state, obj, old_obj_state, i) {
1791                 if (obj->funcs == dm->atomic_obj.funcs)
1792                         return to_dm_atomic_state(old_obj_state);
1793         }
1794
1795         return NULL;
1796 }
1797
1798 static struct drm_private_state *
1799 dm_atomic_duplicate_state(struct drm_private_obj *obj)
1800 {
1801         struct dm_atomic_state *old_state, *new_state;
1802
1803         new_state = kzalloc(sizeof(*new_state), GFP_KERNEL);
1804         if (!new_state)
1805                 return NULL;
1806
1807         __drm_atomic_helper_private_obj_duplicate_state(obj, &new_state->base);
1808
1809         old_state = to_dm_atomic_state(obj->state);
1810
1811         if (old_state && old_state->context)
1812                 new_state->context = dc_copy_state(old_state->context);
1813
1814         if (!new_state->context) {
1815                 kfree(new_state);
1816                 return NULL;
1817         }
1818
1819         return &new_state->base;
1820 }
1821
1822 static void dm_atomic_destroy_state(struct drm_private_obj *obj,
1823                                     struct drm_private_state *state)
1824 {
1825         struct dm_atomic_state *dm_state = to_dm_atomic_state(state);
1826
1827         if (dm_state && dm_state->context)
1828                 dc_release_state(dm_state->context);
1829
1830         kfree(dm_state);
1831 }
1832
1833 static struct drm_private_state_funcs dm_atomic_state_funcs = {
1834         .atomic_duplicate_state = dm_atomic_duplicate_state,
1835         .atomic_destroy_state = dm_atomic_destroy_state,
1836 };
1837
1838 static int amdgpu_dm_mode_config_init(struct amdgpu_device *adev)
1839 {
1840         struct dm_atomic_state *state;
1841         int r;
1842
1843         adev->mode_info.mode_config_initialized = true;
1844
1845         adev->ddev->mode_config.funcs = (void *)&amdgpu_dm_mode_funcs;
1846         adev->ddev->mode_config.helper_private = &amdgpu_dm_mode_config_helperfuncs;
1847
1848         adev->ddev->mode_config.max_width = 16384;
1849         adev->ddev->mode_config.max_height = 16384;
1850
1851         adev->ddev->mode_config.preferred_depth = 24;
1852         adev->ddev->mode_config.prefer_shadow = 1;
1853         /* indicates support for immediate flip */
1854         adev->ddev->mode_config.async_page_flip = true;
1855
1856         adev->ddev->mode_config.fb_base = adev->gmc.aper_base;
1857
1858         state = kzalloc(sizeof(*state), GFP_KERNEL);
1859         if (!state)
1860                 return -ENOMEM;
1861
1862         state->context = dc_create_state(adev->dm.dc);
1863         if (!state->context) {
1864                 kfree(state);
1865                 return -ENOMEM;
1866         }
1867
1868         dc_resource_state_copy_construct_current(adev->dm.dc, state->context);
1869
1870         drm_atomic_private_obj_init(adev->ddev,
1871                                     &adev->dm.atomic_obj,
1872                                     &state->base,
1873                                     &dm_atomic_state_funcs);
1874
1875         r = amdgpu_display_modeset_create_props(adev);
1876         if (r)
1877                 return r;
1878
1879         return 0;
1880 }
1881
1882 #define AMDGPU_DM_DEFAULT_MIN_BACKLIGHT 12
1883 #define AMDGPU_DM_DEFAULT_MAX_BACKLIGHT 255
1884
1885 #if defined(CONFIG_BACKLIGHT_CLASS_DEVICE) ||\
1886         defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE)
1887
1888 static void amdgpu_dm_update_backlight_caps(struct amdgpu_display_manager *dm)
1889 {
1890 #if defined(CONFIG_ACPI)
1891         struct amdgpu_dm_backlight_caps caps;
1892
1893         if (dm->backlight_caps.caps_valid)
1894                 return;
1895
1896         amdgpu_acpi_get_backlight_caps(dm->adev, &caps);
1897         if (caps.caps_valid) {
1898                 dm->backlight_caps.min_input_signal = caps.min_input_signal;
1899                 dm->backlight_caps.max_input_signal = caps.max_input_signal;
1900                 dm->backlight_caps.caps_valid = true;
1901         } else {
1902                 dm->backlight_caps.min_input_signal =
1903                                 AMDGPU_DM_DEFAULT_MIN_BACKLIGHT;
1904                 dm->backlight_caps.max_input_signal =
1905                                 AMDGPU_DM_DEFAULT_MAX_BACKLIGHT;
1906         }
1907 #else
1908         dm->backlight_caps.min_input_signal = AMDGPU_DM_DEFAULT_MIN_BACKLIGHT;
1909         dm->backlight_caps.max_input_signal = AMDGPU_DM_DEFAULT_MAX_BACKLIGHT;
1910 #endif
1911 }
1912
1913 static int amdgpu_dm_backlight_update_status(struct backlight_device *bd)
1914 {
1915         struct amdgpu_display_manager *dm = bl_get_data(bd);
1916         struct amdgpu_dm_backlight_caps caps;
1917         uint32_t brightness = bd->props.brightness;
1918
1919         amdgpu_dm_update_backlight_caps(dm);
1920         caps = dm->backlight_caps;
1921         /*
1922          * The brightness input is in the range 0-255
1923          * It needs to be rescaled to be between the
1924          * requested min and max input signal
1925          *
1926          * It also needs to be scaled up by 0x101 to
1927          * match the DC interface which has a range of
1928          * 0 to 0xffff
1929          */
1930         brightness =
1931                 brightness
1932                 * 0x101
1933                 * (caps.max_input_signal - caps.min_input_signal)
1934                 / AMDGPU_MAX_BL_LEVEL
1935                 + caps.min_input_signal * 0x101;
1936
1937         if (dc_link_set_backlight_level(dm->backlight_link,
1938                         brightness, 0))
1939                 return 0;
1940         else
1941                 return 1;
1942 }
1943
1944 static int amdgpu_dm_backlight_get_brightness(struct backlight_device *bd)
1945 {
1946         struct amdgpu_display_manager *dm = bl_get_data(bd);
1947         int ret = dc_link_get_backlight_level(dm->backlight_link);
1948
1949         if (ret == DC_ERROR_UNEXPECTED)
1950                 return bd->props.brightness;
1951         return ret;
1952 }
1953
1954 static const struct backlight_ops amdgpu_dm_backlight_ops = {
1955         .get_brightness = amdgpu_dm_backlight_get_brightness,
1956         .update_status  = amdgpu_dm_backlight_update_status,
1957 };
1958
1959 static void
1960 amdgpu_dm_register_backlight_device(struct amdgpu_display_manager *dm)
1961 {
1962         char bl_name[16];
1963         struct backlight_properties props = { 0 };
1964
1965         amdgpu_dm_update_backlight_caps(dm);
1966
1967         props.max_brightness = AMDGPU_MAX_BL_LEVEL;
1968         props.brightness = AMDGPU_MAX_BL_LEVEL;
1969         props.type = BACKLIGHT_RAW;
1970
1971         snprintf(bl_name, sizeof(bl_name), "amdgpu_bl%d",
1972                         dm->adev->ddev->primary->index);
1973
1974         dm->backlight_dev = backlight_device_register(bl_name,
1975                         dm->adev->ddev->dev,
1976                         dm,
1977                         &amdgpu_dm_backlight_ops,
1978                         &props);
1979
1980         if (IS_ERR(dm->backlight_dev))
1981                 DRM_ERROR("DM: Backlight registration failed!\n");
1982         else
1983                 DRM_DEBUG_DRIVER("DM: Registered Backlight device: %s\n", bl_name);
1984 }
1985
1986 #endif
1987
1988 static int initialize_plane(struct amdgpu_display_manager *dm,
1989                             struct amdgpu_mode_info *mode_info, int plane_id,
1990                             enum drm_plane_type plane_type,
1991                             const struct dc_plane_cap *plane_cap)
1992 {
1993         struct drm_plane *plane;
1994         unsigned long possible_crtcs;
1995         int ret = 0;
1996
1997         plane = kzalloc(sizeof(struct drm_plane), GFP_KERNEL);
1998         if (!plane) {
1999                 DRM_ERROR("KMS: Failed to allocate plane\n");
2000                 return -ENOMEM;
2001         }
2002         plane->type = plane_type;
2003
2004         /*
2005          * HACK: IGT tests expect that the primary plane for a CRTC
2006          * can only have one possible CRTC. Only expose support for
2007          * any CRTC if they're not going to be used as a primary plane
2008          * for a CRTC - like overlay or underlay planes.
2009          */
2010         possible_crtcs = 1 << plane_id;
2011         if (plane_id >= dm->dc->caps.max_streams)
2012                 possible_crtcs = 0xff;
2013
2014         ret = amdgpu_dm_plane_init(dm, plane, possible_crtcs, plane_cap);
2015
2016         if (ret) {
2017                 DRM_ERROR("KMS: Failed to initialize plane\n");
2018                 kfree(plane);
2019                 return ret;
2020         }
2021
2022         if (mode_info)
2023                 mode_info->planes[plane_id] = plane;
2024
2025         return ret;
2026 }
2027
2028
2029 static void register_backlight_device(struct amdgpu_display_manager *dm,
2030                                       struct dc_link *link)
2031 {
2032 #if defined(CONFIG_BACKLIGHT_CLASS_DEVICE) ||\
2033         defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE)
2034
2035         if ((link->connector_signal & (SIGNAL_TYPE_EDP | SIGNAL_TYPE_LVDS)) &&
2036             link->type != dc_connection_none) {
2037                 /*
2038                  * Event if registration failed, we should continue with
2039                  * DM initialization because not having a backlight control
2040                  * is better then a black screen.
2041                  */
2042                 amdgpu_dm_register_backlight_device(dm);
2043
2044                 if (dm->backlight_dev)
2045                         dm->backlight_link = link;
2046         }
2047 #endif
2048 }
2049
2050
2051 /*
2052  * In this architecture, the association
2053  * connector -> encoder -> crtc
2054  * id not really requried. The crtc and connector will hold the
2055  * display_index as an abstraction to use with DAL component
2056  *
2057  * Returns 0 on success
2058  */
2059 static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev)
2060 {
2061         struct amdgpu_display_manager *dm = &adev->dm;
2062         int32_t i;
2063         struct amdgpu_dm_connector *aconnector = NULL;
2064         struct amdgpu_encoder *aencoder = NULL;
2065         struct amdgpu_mode_info *mode_info = &adev->mode_info;
2066         uint32_t link_cnt;
2067         int32_t primary_planes;
2068         enum dc_connection_type new_connection_type = dc_connection_none;
2069         const struct dc_plane_cap *plane;
2070
2071         link_cnt = dm->dc->caps.max_links;
2072         if (amdgpu_dm_mode_config_init(dm->adev)) {
2073                 DRM_ERROR("DM: Failed to initialize mode config\n");
2074                 return -EINVAL;
2075         }
2076
2077         /* There is one primary plane per CRTC */
2078         primary_planes = dm->dc->caps.max_streams;
2079         ASSERT(primary_planes <= AMDGPU_MAX_PLANES);
2080
2081         /*
2082          * Initialize primary planes, implicit planes for legacy IOCTLS.
2083          * Order is reversed to match iteration order in atomic check.
2084          */
2085         for (i = (primary_planes - 1); i >= 0; i--) {
2086                 plane = &dm->dc->caps.planes[i];
2087
2088                 if (initialize_plane(dm, mode_info, i,
2089                                      DRM_PLANE_TYPE_PRIMARY, plane)) {
2090                         DRM_ERROR("KMS: Failed to initialize primary plane\n");
2091                         goto fail;
2092                 }
2093         }
2094
2095         /*
2096          * Initialize overlay planes, index starting after primary planes.
2097          * These planes have a higher DRM index than the primary planes since
2098          * they should be considered as having a higher z-order.
2099          * Order is reversed to match iteration order in atomic check.
2100          *
2101          * Only support DCN for now, and only expose one so we don't encourage
2102          * userspace to use up all the pipes.
2103          */
2104         for (i = 0; i < dm->dc->caps.max_planes; ++i) {
2105                 struct dc_plane_cap *plane = &dm->dc->caps.planes[i];
2106
2107                 if (plane->type != DC_PLANE_TYPE_DCN_UNIVERSAL)
2108                         continue;
2109
2110                 if (!plane->blends_with_above || !plane->blends_with_below)
2111                         continue;
2112
2113                 if (!plane->pixel_format_support.argb8888)
2114                         continue;
2115
2116                 if (initialize_plane(dm, NULL, primary_planes + i,
2117                                      DRM_PLANE_TYPE_OVERLAY, plane)) {
2118                         DRM_ERROR("KMS: Failed to initialize overlay plane\n");
2119                         goto fail;
2120                 }
2121
2122                 /* Only create one overlay plane. */
2123                 break;
2124         }
2125
2126         for (i = 0; i < dm->dc->caps.max_streams; i++)
2127                 if (amdgpu_dm_crtc_init(dm, mode_info->planes[i], i)) {
2128                         DRM_ERROR("KMS: Failed to initialize crtc\n");
2129                         goto fail;
2130                 }
2131
2132         dm->display_indexes_num = dm->dc->caps.max_streams;
2133
2134         /* loops over all connectors on the board */
2135         for (i = 0; i < link_cnt; i++) {
2136                 struct dc_link *link = NULL;
2137
2138                 if (i > AMDGPU_DM_MAX_DISPLAY_INDEX) {
2139                         DRM_ERROR(
2140                                 "KMS: Cannot support more than %d display indexes\n",
2141                                         AMDGPU_DM_MAX_DISPLAY_INDEX);
2142                         continue;
2143                 }
2144
2145                 aconnector = kzalloc(sizeof(*aconnector), GFP_KERNEL);
2146                 if (!aconnector)
2147                         goto fail;
2148
2149                 aencoder = kzalloc(sizeof(*aencoder), GFP_KERNEL);
2150                 if (!aencoder)
2151                         goto fail;
2152
2153                 if (amdgpu_dm_encoder_init(dm->ddev, aencoder, i)) {
2154                         DRM_ERROR("KMS: Failed to initialize encoder\n");
2155                         goto fail;
2156                 }
2157
2158                 if (amdgpu_dm_connector_init(dm, aconnector, i, aencoder)) {
2159                         DRM_ERROR("KMS: Failed to initialize connector\n");
2160                         goto fail;
2161                 }
2162
2163                 link = dc_get_link_at_index(dm->dc, i);
2164
2165                 if (!dc_link_detect_sink(link, &new_connection_type))
2166                         DRM_ERROR("KMS: Failed to detect connector\n");
2167
2168                 if (aconnector->base.force && new_connection_type == dc_connection_none) {
2169                         emulated_link_detect(link);
2170                         amdgpu_dm_update_connector_after_detect(aconnector);
2171
2172                 } else if (dc_link_detect(link, DETECT_REASON_BOOT)) {
2173                         amdgpu_dm_update_connector_after_detect(aconnector);
2174                         register_backlight_device(dm, link);
2175                 }
2176
2177
2178         }
2179
2180         /* Software is initialized. Now we can register interrupt handlers. */
2181         switch (adev->asic_type) {
2182         case CHIP_BONAIRE:
2183         case CHIP_HAWAII:
2184         case CHIP_KAVERI:
2185         case CHIP_KABINI:
2186         case CHIP_MULLINS:
2187         case CHIP_TONGA:
2188         case CHIP_FIJI:
2189         case CHIP_CARRIZO:
2190         case CHIP_STONEY:
2191         case CHIP_POLARIS11:
2192         case CHIP_POLARIS10:
2193         case CHIP_POLARIS12:
2194         case CHIP_VEGAM:
2195         case CHIP_VEGA10:
2196         case CHIP_VEGA12:
2197         case CHIP_VEGA20:
2198                 if (dce110_register_irq_handlers(dm->adev)) {
2199                         DRM_ERROR("DM: Failed to initialize IRQ\n");
2200                         goto fail;
2201                 }
2202                 break;
2203 #if defined(CONFIG_DRM_AMD_DC_DCN1_0)
2204         case CHIP_RAVEN:
2205                 if (dcn10_register_irq_handlers(dm->adev)) {
2206                         DRM_ERROR("DM: Failed to initialize IRQ\n");
2207                         goto fail;
2208                 }
2209                 break;
2210 #endif
2211         default:
2212                 DRM_ERROR("Unsupported ASIC type: 0x%X\n", adev->asic_type);
2213                 goto fail;
2214         }
2215
2216         if (adev->asic_type != CHIP_CARRIZO && adev->asic_type != CHIP_STONEY)
2217                 dm->dc->debug.disable_stutter = amdgpu_pp_feature_mask & PP_STUTTER_MODE ? false : true;
2218
2219         return 0;
2220 fail:
2221         kfree(aencoder);
2222         kfree(aconnector);
2223
2224         return -EINVAL;
2225 }
2226
2227 static void amdgpu_dm_destroy_drm_device(struct amdgpu_display_manager *dm)
2228 {
2229         drm_mode_config_cleanup(dm->ddev);
2230         drm_atomic_private_obj_fini(&dm->atomic_obj);
2231         return;
2232 }
2233
2234 /******************************************************************************
2235  * amdgpu_display_funcs functions
2236  *****************************************************************************/
2237
2238 /*
2239  * dm_bandwidth_update - program display watermarks
2240  *
2241  * @adev: amdgpu_device pointer
2242  *
2243  * Calculate and program the display watermarks and line buffer allocation.
2244  */
2245 static void dm_bandwidth_update(struct amdgpu_device *adev)
2246 {
2247         /* TODO: implement later */
2248 }
2249
2250 static const struct amdgpu_display_funcs dm_display_funcs = {
2251         .bandwidth_update = dm_bandwidth_update, /* called unconditionally */
2252         .vblank_get_counter = dm_vblank_get_counter,/* called unconditionally */
2253         .backlight_set_level = NULL, /* never called for DC */
2254         .backlight_get_level = NULL, /* never called for DC */
2255         .hpd_sense = NULL,/* called unconditionally */
2256         .hpd_set_polarity = NULL, /* called unconditionally */
2257         .hpd_get_gpio_reg = NULL, /* VBIOS parsing. DAL does it. */
2258         .page_flip_get_scanoutpos =
2259                 dm_crtc_get_scanoutpos,/* called unconditionally */
2260         .add_encoder = NULL, /* VBIOS parsing. DAL does it. */
2261         .add_connector = NULL, /* VBIOS parsing. DAL does it. */
2262 };
2263
2264 #if defined(CONFIG_DEBUG_KERNEL_DC)
2265
2266 static ssize_t s3_debug_store(struct device *device,
2267                               struct device_attribute *attr,
2268                               const char *buf,
2269                               size_t count)
2270 {
2271         int ret;
2272         int s3_state;
2273         struct pci_dev *pdev = to_pci_dev(device);
2274         struct drm_device *drm_dev = pci_get_drvdata(pdev);
2275         struct amdgpu_device *adev = drm_dev->dev_private;
2276
2277         ret = kstrtoint(buf, 0, &s3_state);
2278
2279         if (ret == 0) {
2280                 if (s3_state) {
2281                         dm_resume(adev);
2282                         drm_kms_helper_hotplug_event(adev->ddev);
2283                 } else
2284                         dm_suspend(adev);
2285         }
2286
2287         return ret == 0 ? count : 0;
2288 }
2289
2290 DEVICE_ATTR_WO(s3_debug);
2291
2292 #endif
2293
2294 static int dm_early_init(void *handle)
2295 {
2296         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
2297
2298         switch (adev->asic_type) {
2299         case CHIP_BONAIRE:
2300         case CHIP_HAWAII:
2301                 adev->mode_info.num_crtc = 6;
2302                 adev->mode_info.num_hpd = 6;
2303                 adev->mode_info.num_dig = 6;
2304                 break;
2305         case CHIP_KAVERI:
2306                 adev->mode_info.num_crtc = 4;
2307                 adev->mode_info.num_hpd = 6;
2308                 adev->mode_info.num_dig = 7;
2309                 break;
2310         case CHIP_KABINI:
2311         case CHIP_MULLINS:
2312                 adev->mode_info.num_crtc = 2;
2313                 adev->mode_info.num_hpd = 6;
2314                 adev->mode_info.num_dig = 6;
2315                 break;
2316         case CHIP_FIJI:
2317         case CHIP_TONGA:
2318                 adev->mode_info.num_crtc = 6;
2319                 adev->mode_info.num_hpd = 6;
2320                 adev->mode_info.num_dig = 7;
2321                 break;
2322         case CHIP_CARRIZO:
2323                 adev->mode_info.num_crtc = 3;
2324                 adev->mode_info.num_hpd = 6;
2325                 adev->mode_info.num_dig = 9;
2326                 break;
2327         case CHIP_STONEY:
2328                 adev->mode_info.num_crtc = 2;
2329                 adev->mode_info.num_hpd = 6;
2330                 adev->mode_info.num_dig = 9;
2331                 break;
2332         case CHIP_POLARIS11:
2333         case CHIP_POLARIS12:
2334                 adev->mode_info.num_crtc = 5;
2335                 adev->mode_info.num_hpd = 5;
2336                 adev->mode_info.num_dig = 5;
2337                 break;
2338         case CHIP_POLARIS10:
2339         case CHIP_VEGAM:
2340                 adev->mode_info.num_crtc = 6;
2341                 adev->mode_info.num_hpd = 6;
2342                 adev->mode_info.num_dig = 6;
2343                 break;
2344         case CHIP_VEGA10:
2345         case CHIP_VEGA12:
2346         case CHIP_VEGA20:
2347                 adev->mode_info.num_crtc = 6;
2348                 adev->mode_info.num_hpd = 6;
2349                 adev->mode_info.num_dig = 6;
2350                 break;
2351 #if defined(CONFIG_DRM_AMD_DC_DCN1_0)
2352         case CHIP_RAVEN:
2353                 adev->mode_info.num_crtc = 4;
2354                 adev->mode_info.num_hpd = 4;
2355                 adev->mode_info.num_dig = 4;
2356                 break;
2357 #endif
2358         default:
2359                 DRM_ERROR("Unsupported ASIC type: 0x%X\n", adev->asic_type);
2360                 return -EINVAL;
2361         }
2362
2363         amdgpu_dm_set_irq_funcs(adev);
2364
2365         if (adev->mode_info.funcs == NULL)
2366                 adev->mode_info.funcs = &dm_display_funcs;
2367
2368         /*
2369          * Note: Do NOT change adev->audio_endpt_rreg and
2370          * adev->audio_endpt_wreg because they are initialised in
2371          * amdgpu_device_init()
2372          */
2373 #if defined(CONFIG_DEBUG_KERNEL_DC)
2374         device_create_file(
2375                 adev->ddev->dev,
2376                 &dev_attr_s3_debug);
2377 #endif
2378
2379         return 0;
2380 }
2381
2382 static bool modeset_required(struct drm_crtc_state *crtc_state,
2383                              struct dc_stream_state *new_stream,
2384                              struct dc_stream_state *old_stream)
2385 {
2386         if (!drm_atomic_crtc_needs_modeset(crtc_state))
2387                 return false;
2388
2389         if (!crtc_state->enable)
2390                 return false;
2391
2392         return crtc_state->active;
2393 }
2394
2395 static bool modereset_required(struct drm_crtc_state *crtc_state)
2396 {
2397         if (!drm_atomic_crtc_needs_modeset(crtc_state))
2398                 return false;
2399
2400         return !crtc_state->enable || !crtc_state->active;
2401 }
2402
2403 static void amdgpu_dm_encoder_destroy(struct drm_encoder *encoder)
2404 {
2405         drm_encoder_cleanup(encoder);
2406         kfree(encoder);
2407 }
2408
2409 static const struct drm_encoder_funcs amdgpu_dm_encoder_funcs = {
2410         .destroy = amdgpu_dm_encoder_destroy,
2411 };
2412
2413
2414 static int fill_dc_scaling_info(const struct drm_plane_state *state,
2415                                 struct dc_scaling_info *scaling_info)
2416 {
2417         int scale_w, scale_h;
2418
2419         memset(scaling_info, 0, sizeof(*scaling_info));
2420
2421         /* Source is fixed 16.16 but we ignore mantissa for now... */
2422         scaling_info->src_rect.x = state->src_x >> 16;
2423         scaling_info->src_rect.y = state->src_y >> 16;
2424
2425         scaling_info->src_rect.width = state->src_w >> 16;
2426         if (scaling_info->src_rect.width == 0)
2427                 return -EINVAL;
2428
2429         scaling_info->src_rect.height = state->src_h >> 16;
2430         if (scaling_info->src_rect.height == 0)
2431                 return -EINVAL;
2432
2433         scaling_info->dst_rect.x = state->crtc_x;
2434         scaling_info->dst_rect.y = state->crtc_y;
2435
2436         if (state->crtc_w == 0)
2437                 return -EINVAL;
2438
2439         scaling_info->dst_rect.width = state->crtc_w;
2440
2441         if (state->crtc_h == 0)
2442                 return -EINVAL;
2443
2444         scaling_info->dst_rect.height = state->crtc_h;
2445
2446         /* DRM doesn't specify clipping on destination output. */
2447         scaling_info->clip_rect = scaling_info->dst_rect;
2448
2449         /* TODO: Validate scaling per-format with DC plane caps */
2450         scale_w = scaling_info->dst_rect.width * 1000 /
2451                   scaling_info->src_rect.width;
2452
2453         if (scale_w < 250 || scale_w > 16000)
2454                 return -EINVAL;
2455
2456         scale_h = scaling_info->dst_rect.height * 1000 /
2457                   scaling_info->src_rect.height;
2458
2459         if (scale_h < 250 || scale_h > 16000)
2460                 return -EINVAL;
2461
2462         /*
2463          * The "scaling_quality" can be ignored for now, quality = 0 has DC
2464          * assume reasonable defaults based on the format.
2465          */
2466
2467         return 0;
2468 }
2469
2470 static int get_fb_info(const struct amdgpu_framebuffer *amdgpu_fb,
2471                        uint64_t *tiling_flags)
2472 {
2473         struct amdgpu_bo *rbo = gem_to_amdgpu_bo(amdgpu_fb->base.obj[0]);
2474         int r = amdgpu_bo_reserve(rbo, false);
2475
2476         if (unlikely(r)) {
2477                 /* Don't show error message when returning -ERESTARTSYS */
2478                 if (r != -ERESTARTSYS)
2479                         DRM_ERROR("Unable to reserve buffer: %d\n", r);
2480                 return r;
2481         }
2482
2483         if (tiling_flags)
2484                 amdgpu_bo_get_tiling_flags(rbo, tiling_flags);
2485
2486         amdgpu_bo_unreserve(rbo);
2487
2488         return r;
2489 }
2490
2491 static inline uint64_t get_dcc_address(uint64_t address, uint64_t tiling_flags)
2492 {
2493         uint32_t offset = AMDGPU_TILING_GET(tiling_flags, DCC_OFFSET_256B);
2494
2495         return offset ? (address + offset * 256) : 0;
2496 }
2497
2498 static int
2499 fill_plane_dcc_attributes(struct amdgpu_device *adev,
2500                           const struct amdgpu_framebuffer *afb,
2501                           const enum surface_pixel_format format,
2502                           const enum dc_rotation_angle rotation,
2503                           const union plane_size *plane_size,
2504                           const union dc_tiling_info *tiling_info,
2505                           const uint64_t info,
2506                           struct dc_plane_dcc_param *dcc,
2507                           struct dc_plane_address *address)
2508 {
2509         struct dc *dc = adev->dm.dc;
2510         struct dc_dcc_surface_param input;
2511         struct dc_surface_dcc_cap output;
2512         uint32_t offset = AMDGPU_TILING_GET(info, DCC_OFFSET_256B);
2513         uint32_t i64b = AMDGPU_TILING_GET(info, DCC_INDEPENDENT_64B) != 0;
2514         uint64_t dcc_address;
2515
2516         memset(&input, 0, sizeof(input));
2517         memset(&output, 0, sizeof(output));
2518
2519         if (!offset)
2520                 return 0;
2521
2522         if (format >= SURFACE_PIXEL_FORMAT_VIDEO_BEGIN)
2523                 return 0;
2524
2525         if (!dc->cap_funcs.get_dcc_compression_cap)
2526                 return -EINVAL;
2527
2528         input.format = format;
2529         input.surface_size.width = plane_size->grph.surface_size.width;
2530         input.surface_size.height = plane_size->grph.surface_size.height;
2531         input.swizzle_mode = tiling_info->gfx9.swizzle;
2532
2533         if (rotation == ROTATION_ANGLE_0 || rotation == ROTATION_ANGLE_180)
2534                 input.scan = SCAN_DIRECTION_HORIZONTAL;
2535         else if (rotation == ROTATION_ANGLE_90 || rotation == ROTATION_ANGLE_270)
2536                 input.scan = SCAN_DIRECTION_VERTICAL;
2537
2538         if (!dc->cap_funcs.get_dcc_compression_cap(dc, &input, &output))
2539                 return -EINVAL;
2540
2541         if (!output.capable)
2542                 return -EINVAL;
2543
2544         if (i64b == 0 && output.grph.rgb.independent_64b_blks != 0)
2545                 return -EINVAL;
2546
2547         dcc->enable = 1;
2548         dcc->grph.meta_pitch =
2549                 AMDGPU_TILING_GET(info, DCC_PITCH_MAX) + 1;
2550         dcc->grph.independent_64b_blks = i64b;
2551
2552         dcc_address = get_dcc_address(afb->address, info);
2553         address->grph.meta_addr.low_part = lower_32_bits(dcc_address);
2554         address->grph.meta_addr.high_part = upper_32_bits(dcc_address);
2555
2556         return 0;
2557 }
2558
2559 static int
2560 fill_plane_buffer_attributes(struct amdgpu_device *adev,
2561                              const struct amdgpu_framebuffer *afb,
2562                              const enum surface_pixel_format format,
2563                              const enum dc_rotation_angle rotation,
2564                              const uint64_t tiling_flags,
2565                              union dc_tiling_info *tiling_info,
2566                              union plane_size *plane_size,
2567                              struct dc_plane_dcc_param *dcc,
2568                              struct dc_plane_address *address)
2569 {
2570         const struct drm_framebuffer *fb = &afb->base;
2571         int ret;
2572
2573         memset(tiling_info, 0, sizeof(*tiling_info));
2574         memset(plane_size, 0, sizeof(*plane_size));
2575         memset(dcc, 0, sizeof(*dcc));
2576         memset(address, 0, sizeof(*address));
2577
2578         if (format < SURFACE_PIXEL_FORMAT_VIDEO_BEGIN) {
2579                 plane_size->grph.surface_size.x = 0;
2580                 plane_size->grph.surface_size.y = 0;
2581                 plane_size->grph.surface_size.width = fb->width;
2582                 plane_size->grph.surface_size.height = fb->height;
2583                 plane_size->grph.surface_pitch =
2584                         fb->pitches[0] / fb->format->cpp[0];
2585
2586                 address->type = PLN_ADDR_TYPE_GRAPHICS;
2587                 address->grph.addr.low_part = lower_32_bits(afb->address);
2588                 address->grph.addr.high_part = upper_32_bits(afb->address);
2589         } else {
2590                 uint64_t chroma_addr = afb->address + fb->offsets[1];
2591
2592                 plane_size->video.luma_size.x = 0;
2593                 plane_size->video.luma_size.y = 0;
2594                 plane_size->video.luma_size.width = fb->width;
2595                 plane_size->video.luma_size.height = fb->height;
2596                 plane_size->video.luma_pitch =
2597                         fb->pitches[0] / fb->format->cpp[0];
2598
2599                 plane_size->video.chroma_size.x = 0;
2600                 plane_size->video.chroma_size.y = 0;
2601                 /* TODO: set these based on surface format */
2602                 plane_size->video.chroma_size.width = fb->width / 2;
2603                 plane_size->video.chroma_size.height = fb->height / 2;
2604
2605                 plane_size->video.chroma_pitch =
2606                         fb->pitches[1] / fb->format->cpp[1];
2607
2608                 address->type = PLN_ADDR_TYPE_VIDEO_PROGRESSIVE;
2609                 address->video_progressive.luma_addr.low_part =
2610                         lower_32_bits(afb->address);
2611                 address->video_progressive.luma_addr.high_part =
2612                         upper_32_bits(afb->address);
2613                 address->video_progressive.chroma_addr.low_part =
2614                         lower_32_bits(chroma_addr);
2615                 address->video_progressive.chroma_addr.high_part =
2616                         upper_32_bits(chroma_addr);
2617         }
2618
2619         /* Fill GFX8 params */
2620         if (AMDGPU_TILING_GET(tiling_flags, ARRAY_MODE) == DC_ARRAY_2D_TILED_THIN1) {
2621                 unsigned int bankw, bankh, mtaspect, tile_split, num_banks;
2622
2623                 bankw = AMDGPU_TILING_GET(tiling_flags, BANK_WIDTH);
2624                 bankh = AMDGPU_TILING_GET(tiling_flags, BANK_HEIGHT);
2625                 mtaspect = AMDGPU_TILING_GET(tiling_flags, MACRO_TILE_ASPECT);
2626                 tile_split = AMDGPU_TILING_GET(tiling_flags, TILE_SPLIT);
2627                 num_banks = AMDGPU_TILING_GET(tiling_flags, NUM_BANKS);
2628
2629                 /* XXX fix me for VI */
2630                 tiling_info->gfx8.num_banks = num_banks;
2631                 tiling_info->gfx8.array_mode =
2632                                 DC_ARRAY_2D_TILED_THIN1;
2633                 tiling_info->gfx8.tile_split = tile_split;
2634                 tiling_info->gfx8.bank_width = bankw;
2635                 tiling_info->gfx8.bank_height = bankh;
2636                 tiling_info->gfx8.tile_aspect = mtaspect;
2637                 tiling_info->gfx8.tile_mode =
2638                                 DC_ADDR_SURF_MICRO_TILING_DISPLAY;
2639         } else if (AMDGPU_TILING_GET(tiling_flags, ARRAY_MODE)
2640                         == DC_ARRAY_1D_TILED_THIN1) {
2641                 tiling_info->gfx8.array_mode = DC_ARRAY_1D_TILED_THIN1;
2642         }
2643
2644         tiling_info->gfx8.pipe_config =
2645                         AMDGPU_TILING_GET(tiling_flags, PIPE_CONFIG);
2646
2647         if (adev->asic_type == CHIP_VEGA10 ||
2648             adev->asic_type == CHIP_VEGA12 ||
2649             adev->asic_type == CHIP_VEGA20 ||
2650             adev->asic_type == CHIP_RAVEN) {
2651                 /* Fill GFX9 params */
2652                 tiling_info->gfx9.num_pipes =
2653                         adev->gfx.config.gb_addr_config_fields.num_pipes;
2654                 tiling_info->gfx9.num_banks =
2655                         adev->gfx.config.gb_addr_config_fields.num_banks;
2656                 tiling_info->gfx9.pipe_interleave =
2657                         adev->gfx.config.gb_addr_config_fields.pipe_interleave_size;
2658                 tiling_info->gfx9.num_shader_engines =
2659                         adev->gfx.config.gb_addr_config_fields.num_se;
2660                 tiling_info->gfx9.max_compressed_frags =
2661                         adev->gfx.config.gb_addr_config_fields.max_compress_frags;
2662                 tiling_info->gfx9.num_rb_per_se =
2663                         adev->gfx.config.gb_addr_config_fields.num_rb_per_se;
2664                 tiling_info->gfx9.swizzle =
2665                         AMDGPU_TILING_GET(tiling_flags, SWIZZLE_MODE);
2666                 tiling_info->gfx9.shaderEnable = 1;
2667
2668                 ret = fill_plane_dcc_attributes(adev, afb, format, rotation,
2669                                                 plane_size, tiling_info,
2670                                                 tiling_flags, dcc, address);
2671                 if (ret)
2672                         return ret;
2673         }
2674
2675         return 0;
2676 }
2677
2678 static void
2679 fill_blending_from_plane_state(const struct drm_plane_state *plane_state,
2680                                bool *per_pixel_alpha, bool *global_alpha,
2681                                int *global_alpha_value)
2682 {
2683         *per_pixel_alpha = false;
2684         *global_alpha = false;
2685         *global_alpha_value = 0xff;
2686
2687         if (plane_state->plane->type != DRM_PLANE_TYPE_OVERLAY)
2688                 return;
2689
2690         if (plane_state->pixel_blend_mode == DRM_MODE_BLEND_PREMULTI) {
2691                 static const uint32_t alpha_formats[] = {
2692                         DRM_FORMAT_ARGB8888,
2693                         DRM_FORMAT_RGBA8888,
2694                         DRM_FORMAT_ABGR8888,
2695                 };
2696                 uint32_t format = plane_state->fb->format->format;
2697                 unsigned int i;
2698
2699                 for (i = 0; i < ARRAY_SIZE(alpha_formats); ++i) {
2700                         if (format == alpha_formats[i]) {
2701                                 *per_pixel_alpha = true;
2702                                 break;
2703                         }
2704                 }
2705         }
2706
2707         if (plane_state->alpha < 0xffff) {
2708                 *global_alpha = true;
2709                 *global_alpha_value = plane_state->alpha >> 8;
2710         }
2711 }
2712
2713 static int
2714 fill_plane_color_attributes(const struct drm_plane_state *plane_state,
2715                             const enum surface_pixel_format format,
2716                             enum dc_color_space *color_space)
2717 {
2718         bool full_range;
2719
2720         *color_space = COLOR_SPACE_SRGB;
2721
2722         /* DRM color properties only affect non-RGB formats. */
2723         if (format < SURFACE_PIXEL_FORMAT_VIDEO_BEGIN)
2724                 return 0;
2725
2726         full_range = (plane_state->color_range == DRM_COLOR_YCBCR_FULL_RANGE);
2727
2728         switch (plane_state->color_encoding) {
2729         case DRM_COLOR_YCBCR_BT601:
2730                 if (full_range)
2731                         *color_space = COLOR_SPACE_YCBCR601;
2732                 else
2733                         *color_space = COLOR_SPACE_YCBCR601_LIMITED;
2734                 break;
2735
2736         case DRM_COLOR_YCBCR_BT709:
2737                 if (full_range)
2738                         *color_space = COLOR_SPACE_YCBCR709;
2739                 else
2740                         *color_space = COLOR_SPACE_YCBCR709_LIMITED;
2741                 break;
2742
2743         case DRM_COLOR_YCBCR_BT2020:
2744                 if (full_range)
2745                         *color_space = COLOR_SPACE_2020_YCBCR;
2746                 else
2747                         return -EINVAL;
2748                 break;
2749
2750         default:
2751                 return -EINVAL;
2752         }
2753
2754         return 0;
2755 }
2756
2757 static int
2758 fill_dc_plane_info_and_addr(struct amdgpu_device *adev,
2759                             const struct drm_plane_state *plane_state,
2760                             const uint64_t tiling_flags,
2761                             struct dc_plane_info *plane_info,
2762                             struct dc_plane_address *address)
2763 {
2764         const struct drm_framebuffer *fb = plane_state->fb;
2765         const struct amdgpu_framebuffer *afb =
2766                 to_amdgpu_framebuffer(plane_state->fb);
2767         struct drm_format_name_buf format_name;
2768         int ret;
2769
2770         memset(plane_info, 0, sizeof(*plane_info));
2771
2772         switch (fb->format->format) {
2773         case DRM_FORMAT_C8:
2774                 plane_info->format =
2775                         SURFACE_PIXEL_FORMAT_GRPH_PALETA_256_COLORS;
2776                 break;
2777         case DRM_FORMAT_RGB565:
2778                 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_RGB565;
2779                 break;
2780         case DRM_FORMAT_XRGB8888:
2781         case DRM_FORMAT_ARGB8888:
2782                 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ARGB8888;
2783                 break;
2784         case DRM_FORMAT_XRGB2101010:
2785         case DRM_FORMAT_ARGB2101010:
2786                 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ARGB2101010;
2787                 break;
2788         case DRM_FORMAT_XBGR2101010:
2789         case DRM_FORMAT_ABGR2101010:
2790                 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ABGR2101010;
2791                 break;
2792         case DRM_FORMAT_XBGR8888:
2793         case DRM_FORMAT_ABGR8888:
2794                 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ABGR8888;
2795                 break;
2796         case DRM_FORMAT_NV21:
2797                 plane_info->format = SURFACE_PIXEL_FORMAT_VIDEO_420_YCbCr;
2798                 break;
2799         case DRM_FORMAT_NV12:
2800                 plane_info->format = SURFACE_PIXEL_FORMAT_VIDEO_420_YCrCb;
2801                 break;
2802         default:
2803                 DRM_ERROR(
2804                         "Unsupported screen format %s\n",
2805                         drm_get_format_name(fb->format->format, &format_name));
2806                 return -EINVAL;
2807         }
2808
2809         switch (plane_state->rotation & DRM_MODE_ROTATE_MASK) {
2810         case DRM_MODE_ROTATE_0:
2811                 plane_info->rotation = ROTATION_ANGLE_0;
2812                 break;
2813         case DRM_MODE_ROTATE_90:
2814                 plane_info->rotation = ROTATION_ANGLE_90;
2815                 break;
2816         case DRM_MODE_ROTATE_180:
2817                 plane_info->rotation = ROTATION_ANGLE_180;
2818                 break;
2819         case DRM_MODE_ROTATE_270:
2820                 plane_info->rotation = ROTATION_ANGLE_270;
2821                 break;
2822         default:
2823                 plane_info->rotation = ROTATION_ANGLE_0;
2824                 break;
2825         }
2826
2827         plane_info->visible = true;
2828         plane_info->stereo_format = PLANE_STEREO_FORMAT_NONE;
2829
2830         ret = fill_plane_color_attributes(plane_state, plane_info->format,
2831                                           &plane_info->color_space);
2832         if (ret)
2833                 return ret;
2834
2835         ret = fill_plane_buffer_attributes(adev, afb, plane_info->format,
2836                                            plane_info->rotation, tiling_flags,
2837                                            &plane_info->tiling_info,
2838                                            &plane_info->plane_size,
2839                                            &plane_info->dcc, address);
2840         if (ret)
2841                 return ret;
2842
2843         fill_blending_from_plane_state(
2844                 plane_state, &plane_info->per_pixel_alpha,
2845                 &plane_info->global_alpha, &plane_info->global_alpha_value);
2846
2847         return 0;
2848 }
2849
2850 static int fill_dc_plane_attributes(struct amdgpu_device *adev,
2851                                     struct dc_plane_state *dc_plane_state,
2852                                     struct drm_plane_state *plane_state,
2853                                     struct drm_crtc_state *crtc_state)
2854 {
2855         const struct amdgpu_framebuffer *amdgpu_fb =
2856                 to_amdgpu_framebuffer(plane_state->fb);
2857         struct dc_scaling_info scaling_info;
2858         struct dc_plane_info plane_info;
2859         uint64_t tiling_flags;
2860         int ret;
2861
2862         ret = fill_dc_scaling_info(plane_state, &scaling_info);
2863         if (ret)
2864                 return ret;
2865
2866         dc_plane_state->src_rect = scaling_info.src_rect;
2867         dc_plane_state->dst_rect = scaling_info.dst_rect;
2868         dc_plane_state->clip_rect = scaling_info.clip_rect;
2869         dc_plane_state->scaling_quality = scaling_info.scaling_quality;
2870
2871         ret = get_fb_info(amdgpu_fb, &tiling_flags);
2872         if (ret)
2873                 return ret;
2874
2875         ret = fill_dc_plane_info_and_addr(adev, plane_state, tiling_flags,
2876                                           &plane_info,
2877                                           &dc_plane_state->address);
2878         if (ret)
2879                 return ret;
2880
2881         dc_plane_state->format = plane_info.format;
2882         dc_plane_state->color_space = plane_info.color_space;
2883         dc_plane_state->format = plane_info.format;
2884         dc_plane_state->plane_size = plane_info.plane_size;
2885         dc_plane_state->rotation = plane_info.rotation;
2886         dc_plane_state->horizontal_mirror = plane_info.horizontal_mirror;
2887         dc_plane_state->stereo_format = plane_info.stereo_format;
2888         dc_plane_state->tiling_info = plane_info.tiling_info;
2889         dc_plane_state->visible = plane_info.visible;
2890         dc_plane_state->per_pixel_alpha = plane_info.per_pixel_alpha;
2891         dc_plane_state->global_alpha = plane_info.global_alpha;
2892         dc_plane_state->global_alpha_value = plane_info.global_alpha_value;
2893         dc_plane_state->dcc = plane_info.dcc;
2894
2895         /*
2896          * Always set input transfer function, since plane state is refreshed
2897          * every time.
2898          */
2899         ret = amdgpu_dm_set_degamma_lut(crtc_state, dc_plane_state);
2900         if (ret) {
2901                 dc_transfer_func_release(dc_plane_state->in_transfer_func);
2902                 dc_plane_state->in_transfer_func = NULL;
2903         }
2904
2905         return ret;
2906 }
2907
2908 static void update_stream_scaling_settings(const struct drm_display_mode *mode,
2909                                            const struct dm_connector_state *dm_state,
2910                                            struct dc_stream_state *stream)
2911 {
2912         enum amdgpu_rmx_type rmx_type;
2913
2914         struct rect src = { 0 }; /* viewport in composition space*/
2915         struct rect dst = { 0 }; /* stream addressable area */
2916
2917         /* no mode. nothing to be done */
2918         if (!mode)
2919                 return;
2920
2921         /* Full screen scaling by default */
2922         src.width = mode->hdisplay;
2923         src.height = mode->vdisplay;
2924         dst.width = stream->timing.h_addressable;
2925         dst.height = stream->timing.v_addressable;
2926
2927         if (dm_state) {
2928                 rmx_type = dm_state->scaling;
2929                 if (rmx_type == RMX_ASPECT || rmx_type == RMX_OFF) {
2930                         if (src.width * dst.height <
2931                                         src.height * dst.width) {
2932                                 /* height needs less upscaling/more downscaling */
2933                                 dst.width = src.width *
2934                                                 dst.height / src.height;
2935                         } else {
2936                                 /* width needs less upscaling/more downscaling */
2937                                 dst.height = src.height *
2938                                                 dst.width / src.width;
2939                         }
2940                 } else if (rmx_type == RMX_CENTER) {
2941                         dst = src;
2942                 }
2943
2944                 dst.x = (stream->timing.h_addressable - dst.width) / 2;
2945                 dst.y = (stream->timing.v_addressable - dst.height) / 2;
2946
2947                 if (dm_state->underscan_enable) {
2948                         dst.x += dm_state->underscan_hborder / 2;
2949                         dst.y += dm_state->underscan_vborder / 2;
2950                         dst.width -= dm_state->underscan_hborder;
2951                         dst.height -= dm_state->underscan_vborder;
2952                 }
2953         }
2954
2955         stream->src = src;
2956         stream->dst = dst;
2957
2958         DRM_DEBUG_DRIVER("Destination Rectangle x:%d  y:%d  width:%d  height:%d\n",
2959                         dst.x, dst.y, dst.width, dst.height);
2960
2961 }
2962
2963 static enum dc_color_depth
2964 convert_color_depth_from_display_info(const struct drm_connector *connector)
2965 {
2966         struct dm_connector_state *dm_conn_state =
2967                 to_dm_connector_state(connector->state);
2968         uint32_t bpc = connector->display_info.bpc;
2969
2970         /* TODO: Remove this when there's support for max_bpc in drm */
2971         if (dm_conn_state && bpc > dm_conn_state->max_bpc)
2972                 /* Round down to nearest even number. */
2973                 bpc = dm_conn_state->max_bpc - (dm_conn_state->max_bpc & 1);
2974
2975         switch (bpc) {
2976         case 0:
2977                 /*
2978                  * Temporary Work around, DRM doesn't parse color depth for
2979                  * EDID revision before 1.4
2980                  * TODO: Fix edid parsing
2981                  */
2982                 return COLOR_DEPTH_888;
2983         case 6:
2984                 return COLOR_DEPTH_666;
2985         case 8:
2986                 return COLOR_DEPTH_888;
2987         case 10:
2988                 return COLOR_DEPTH_101010;
2989         case 12:
2990                 return COLOR_DEPTH_121212;
2991         case 14:
2992                 return COLOR_DEPTH_141414;
2993         case 16:
2994                 return COLOR_DEPTH_161616;
2995         default:
2996                 return COLOR_DEPTH_UNDEFINED;
2997         }
2998 }
2999
3000 static enum dc_aspect_ratio
3001 get_aspect_ratio(const struct drm_display_mode *mode_in)
3002 {
3003         /* 1-1 mapping, since both enums follow the HDMI spec. */
3004         return (enum dc_aspect_ratio) mode_in->picture_aspect_ratio;
3005 }
3006
3007 static enum dc_color_space
3008 get_output_color_space(const struct dc_crtc_timing *dc_crtc_timing)
3009 {
3010         enum dc_color_space color_space = COLOR_SPACE_SRGB;
3011
3012         switch (dc_crtc_timing->pixel_encoding) {
3013         case PIXEL_ENCODING_YCBCR422:
3014         case PIXEL_ENCODING_YCBCR444:
3015         case PIXEL_ENCODING_YCBCR420:
3016         {
3017                 /*
3018                  * 27030khz is the separation point between HDTV and SDTV
3019                  * according to HDMI spec, we use YCbCr709 and YCbCr601
3020                  * respectively
3021                  */
3022                 if (dc_crtc_timing->pix_clk_100hz > 270300) {
3023                         if (dc_crtc_timing->flags.Y_ONLY)
3024                                 color_space =
3025                                         COLOR_SPACE_YCBCR709_LIMITED;
3026                         else
3027                                 color_space = COLOR_SPACE_YCBCR709;
3028                 } else {
3029                         if (dc_crtc_timing->flags.Y_ONLY)
3030                                 color_space =
3031                                         COLOR_SPACE_YCBCR601_LIMITED;
3032                         else
3033                                 color_space = COLOR_SPACE_YCBCR601;
3034                 }
3035
3036         }
3037         break;
3038         case PIXEL_ENCODING_RGB:
3039                 color_space = COLOR_SPACE_SRGB;
3040                 break;
3041
3042         default:
3043                 WARN_ON(1);
3044                 break;
3045         }
3046
3047         return color_space;
3048 }
3049
3050 static void reduce_mode_colour_depth(struct dc_crtc_timing *timing_out)
3051 {
3052         if (timing_out->display_color_depth <= COLOR_DEPTH_888)
3053                 return;
3054
3055         timing_out->display_color_depth--;
3056 }
3057
3058 static void adjust_colour_depth_from_display_info(struct dc_crtc_timing *timing_out,
3059                                                 const struct drm_display_info *info)
3060 {
3061         int normalized_clk;
3062         if (timing_out->display_color_depth <= COLOR_DEPTH_888)
3063                 return;
3064         do {
3065                 normalized_clk = timing_out->pix_clk_100hz / 10;
3066                 /* YCbCr 4:2:0 requires additional adjustment of 1/2 */
3067                 if (timing_out->pixel_encoding == PIXEL_ENCODING_YCBCR420)
3068                         normalized_clk /= 2;
3069                 /* Adjusting pix clock following on HDMI spec based on colour depth */
3070                 switch (timing_out->display_color_depth) {
3071                 case COLOR_DEPTH_101010:
3072                         normalized_clk = (normalized_clk * 30) / 24;
3073                         break;
3074                 case COLOR_DEPTH_121212:
3075                         normalized_clk = (normalized_clk * 36) / 24;
3076                         break;
3077                 case COLOR_DEPTH_161616:
3078                         normalized_clk = (normalized_clk * 48) / 24;
3079                         break;
3080                 default:
3081                         return;
3082                 }
3083                 if (normalized_clk <= info->max_tmds_clock)
3084                         return;
3085                 reduce_mode_colour_depth(timing_out);
3086
3087         } while (timing_out->display_color_depth > COLOR_DEPTH_888);
3088
3089 }
3090
3091 static void
3092 fill_stream_properties_from_drm_display_mode(struct dc_stream_state *stream,
3093                                              const struct drm_display_mode *mode_in,
3094                                              const struct drm_connector *connector,
3095                                              const struct dc_stream_state *old_stream)
3096 {
3097         struct dc_crtc_timing *timing_out = &stream->timing;
3098         const struct drm_display_info *info = &connector->display_info;
3099
3100         memset(timing_out, 0, sizeof(struct dc_crtc_timing));
3101
3102         timing_out->h_border_left = 0;
3103         timing_out->h_border_right = 0;
3104         timing_out->v_border_top = 0;
3105         timing_out->v_border_bottom = 0;
3106         /* TODO: un-hardcode */
3107         if (drm_mode_is_420_only(info, mode_in)
3108                         && stream->signal == SIGNAL_TYPE_HDMI_TYPE_A)
3109                 timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR420;
3110         else if ((connector->display_info.color_formats & DRM_COLOR_FORMAT_YCRCB444)
3111                         && stream->signal == SIGNAL_TYPE_HDMI_TYPE_A)
3112                 timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR444;
3113         else
3114                 timing_out->pixel_encoding = PIXEL_ENCODING_RGB;
3115
3116         timing_out->timing_3d_format = TIMING_3D_FORMAT_NONE;
3117         timing_out->display_color_depth = convert_color_depth_from_display_info(
3118                         connector);
3119         timing_out->scan_type = SCANNING_TYPE_NODATA;
3120         timing_out->hdmi_vic = 0;
3121
3122         if(old_stream) {
3123                 timing_out->vic = old_stream->timing.vic;
3124                 timing_out->flags.HSYNC_POSITIVE_POLARITY = old_stream->timing.flags.HSYNC_POSITIVE_POLARITY;
3125                 timing_out->flags.VSYNC_POSITIVE_POLARITY = old_stream->timing.flags.VSYNC_POSITIVE_POLARITY;
3126         } else {
3127                 timing_out->vic = drm_match_cea_mode(mode_in);
3128                 if (mode_in->flags & DRM_MODE_FLAG_PHSYNC)
3129                         timing_out->flags.HSYNC_POSITIVE_POLARITY = 1;
3130                 if (mode_in->flags & DRM_MODE_FLAG_PVSYNC)
3131                         timing_out->flags.VSYNC_POSITIVE_POLARITY = 1;
3132         }
3133
3134         timing_out->h_addressable = mode_in->crtc_hdisplay;
3135         timing_out->h_total = mode_in->crtc_htotal;
3136         timing_out->h_sync_width =
3137                 mode_in->crtc_hsync_end - mode_in->crtc_hsync_start;
3138         timing_out->h_front_porch =
3139                 mode_in->crtc_hsync_start - mode_in->crtc_hdisplay;
3140         timing_out->v_total = mode_in->crtc_vtotal;
3141         timing_out->v_addressable = mode_in->crtc_vdisplay;
3142         timing_out->v_front_porch =
3143                 mode_in->crtc_vsync_start - mode_in->crtc_vdisplay;
3144         timing_out->v_sync_width =
3145                 mode_in->crtc_vsync_end - mode_in->crtc_vsync_start;
3146         timing_out->pix_clk_100hz = mode_in->crtc_clock * 10;
3147         timing_out->aspect_ratio = get_aspect_ratio(mode_in);
3148
3149         stream->output_color_space = get_output_color_space(timing_out);
3150
3151         stream->out_transfer_func->type = TF_TYPE_PREDEFINED;
3152         stream->out_transfer_func->tf = TRANSFER_FUNCTION_SRGB;
3153         if (stream->signal == SIGNAL_TYPE_HDMI_TYPE_A)
3154                 adjust_colour_depth_from_display_info(timing_out, info);
3155 }
3156
3157 static void fill_audio_info(struct audio_info *audio_info,
3158                             const struct drm_connector *drm_connector,
3159                             const struct dc_sink *dc_sink)
3160 {
3161         int i = 0;
3162         int cea_revision = 0;
3163         const struct dc_edid_caps *edid_caps = &dc_sink->edid_caps;
3164
3165         audio_info->manufacture_id = edid_caps->manufacturer_id;
3166         audio_info->product_id = edid_caps->product_id;
3167
3168         cea_revision = drm_connector->display_info.cea_rev;
3169
3170         strscpy(audio_info->display_name,
3171                 edid_caps->display_name,
3172                 AUDIO_INFO_DISPLAY_NAME_SIZE_IN_CHARS);
3173
3174         if (cea_revision >= 3) {
3175                 audio_info->mode_count = edid_caps->audio_mode_count;
3176
3177                 for (i = 0; i < audio_info->mode_count; ++i) {
3178                         audio_info->modes[i].format_code =
3179                                         (enum audio_format_code)
3180                                         (edid_caps->audio_modes[i].format_code);
3181                         audio_info->modes[i].channel_count =
3182                                         edid_caps->audio_modes[i].channel_count;
3183                         audio_info->modes[i].sample_rates.all =
3184                                         edid_caps->audio_modes[i].sample_rate;
3185                         audio_info->modes[i].sample_size =
3186                                         edid_caps->audio_modes[i].sample_size;
3187                 }
3188         }
3189
3190         audio_info->flags.all = edid_caps->speaker_flags;
3191
3192         /* TODO: We only check for the progressive mode, check for interlace mode too */
3193         if (drm_connector->latency_present[0]) {
3194                 audio_info->video_latency = drm_connector->video_latency[0];
3195                 audio_info->audio_latency = drm_connector->audio_latency[0];
3196         }
3197
3198         /* TODO: For DP, video and audio latency should be calculated from DPCD caps */
3199
3200 }
3201
3202 static void
3203 copy_crtc_timing_for_drm_display_mode(const struct drm_display_mode *src_mode,
3204                                       struct drm_display_mode *dst_mode)
3205 {
3206         dst_mode->crtc_hdisplay = src_mode->crtc_hdisplay;
3207         dst_mode->crtc_vdisplay = src_mode->crtc_vdisplay;
3208         dst_mode->crtc_clock = src_mode->crtc_clock;
3209         dst_mode->crtc_hblank_start = src_mode->crtc_hblank_start;
3210         dst_mode->crtc_hblank_end = src_mode->crtc_hblank_end;
3211         dst_mode->crtc_hsync_start =  src_mode->crtc_hsync_start;
3212         dst_mode->crtc_hsync_end = src_mode->crtc_hsync_end;
3213         dst_mode->crtc_htotal = src_mode->crtc_htotal;
3214         dst_mode->crtc_hskew = src_mode->crtc_hskew;
3215         dst_mode->crtc_vblank_start = src_mode->crtc_vblank_start;
3216         dst_mode->crtc_vblank_end = src_mode->crtc_vblank_end;
3217         dst_mode->crtc_vsync_start = src_mode->crtc_vsync_start;
3218         dst_mode->crtc_vsync_end = src_mode->crtc_vsync_end;
3219         dst_mode->crtc_vtotal = src_mode->crtc_vtotal;
3220 }
3221
3222 static void
3223 decide_crtc_timing_for_drm_display_mode(struct drm_display_mode *drm_mode,
3224                                         const struct drm_display_mode *native_mode,
3225                                         bool scale_enabled)
3226 {
3227         if (scale_enabled) {
3228                 copy_crtc_timing_for_drm_display_mode(native_mode, drm_mode);
3229         } else if (native_mode->clock == drm_mode->clock &&
3230                         native_mode->htotal == drm_mode->htotal &&
3231                         native_mode->vtotal == drm_mode->vtotal) {
3232                 copy_crtc_timing_for_drm_display_mode(native_mode, drm_mode);
3233         } else {
3234                 /* no scaling nor amdgpu inserted, no need to patch */
3235         }
3236 }
3237
3238 static struct dc_sink *
3239 create_fake_sink(struct amdgpu_dm_connector *aconnector)
3240 {
3241         struct dc_sink_init_data sink_init_data = { 0 };
3242         struct dc_sink *sink = NULL;
3243         sink_init_data.link = aconnector->dc_link;
3244         sink_init_data.sink_signal = aconnector->dc_link->connector_signal;
3245
3246         sink = dc_sink_create(&sink_init_data);
3247         if (!sink) {
3248                 DRM_ERROR("Failed to create sink!\n");
3249                 return NULL;
3250         }
3251         sink->sink_signal = SIGNAL_TYPE_VIRTUAL;
3252
3253         return sink;
3254 }
3255
3256 static void set_multisync_trigger_params(
3257                 struct dc_stream_state *stream)
3258 {
3259         if (stream->triggered_crtc_reset.enabled) {
3260                 stream->triggered_crtc_reset.event = CRTC_EVENT_VSYNC_RISING;
3261                 stream->triggered_crtc_reset.delay = TRIGGER_DELAY_NEXT_LINE;
3262         }
3263 }
3264
3265 static void set_master_stream(struct dc_stream_state *stream_set[],
3266                               int stream_count)
3267 {
3268         int j, highest_rfr = 0, master_stream = 0;
3269
3270         for (j = 0;  j < stream_count; j++) {
3271                 if (stream_set[j] && stream_set[j]->triggered_crtc_reset.enabled) {
3272                         int refresh_rate = 0;
3273
3274                         refresh_rate = (stream_set[j]->timing.pix_clk_100hz*100)/
3275                                 (stream_set[j]->timing.h_total*stream_set[j]->timing.v_total);
3276                         if (refresh_rate > highest_rfr) {
3277                                 highest_rfr = refresh_rate;
3278                                 master_stream = j;
3279                         }
3280                 }
3281         }
3282         for (j = 0;  j < stream_count; j++) {
3283                 if (stream_set[j])
3284                         stream_set[j]->triggered_crtc_reset.event_source = stream_set[master_stream];
3285         }
3286 }
3287
3288 static void dm_enable_per_frame_crtc_master_sync(struct dc_state *context)
3289 {
3290         int i = 0;
3291
3292         if (context->stream_count < 2)
3293                 return;
3294         for (i = 0; i < context->stream_count ; i++) {
3295                 if (!context->streams[i])
3296                         continue;
3297                 /*
3298                  * TODO: add a function to read AMD VSDB bits and set
3299                  * crtc_sync_master.multi_sync_enabled flag
3300                  * For now it's set to false
3301                  */
3302                 set_multisync_trigger_params(context->streams[i]);
3303         }
3304         set_master_stream(context->streams, context->stream_count);
3305 }
3306
3307 static struct dc_stream_state *
3308 create_stream_for_sink(struct amdgpu_dm_connector *aconnector,
3309                        const struct drm_display_mode *drm_mode,
3310                        const struct dm_connector_state *dm_state,
3311                        const struct dc_stream_state *old_stream)
3312 {
3313         struct drm_display_mode *preferred_mode = NULL;
3314         struct drm_connector *drm_connector;
3315         struct dc_stream_state *stream = NULL;
3316         struct drm_display_mode mode = *drm_mode;
3317         bool native_mode_found = false;
3318         bool scale = dm_state ? (dm_state->scaling != RMX_OFF) : false;
3319         int mode_refresh;
3320         int preferred_refresh = 0;
3321
3322         struct dc_sink *sink = NULL;
3323         if (aconnector == NULL) {
3324                 DRM_ERROR("aconnector is NULL!\n");
3325                 return stream;
3326         }
3327
3328         drm_connector = &aconnector->base;
3329
3330         if (!aconnector->dc_sink) {
3331                 sink = create_fake_sink(aconnector);
3332                 if (!sink)
3333                         return stream;
3334         } else {
3335                 sink = aconnector->dc_sink;
3336                 dc_sink_retain(sink);
3337         }
3338
3339         stream = dc_create_stream_for_sink(sink);
3340
3341         if (stream == NULL) {
3342                 DRM_ERROR("Failed to create stream for sink!\n");
3343                 goto finish;
3344         }
3345
3346         stream->dm_stream_context = aconnector;
3347
3348         list_for_each_entry(preferred_mode, &aconnector->base.modes, head) {
3349                 /* Search for preferred mode */
3350                 if (preferred_mode->type & DRM_MODE_TYPE_PREFERRED) {
3351                         native_mode_found = true;
3352                         break;
3353                 }
3354         }
3355         if (!native_mode_found)
3356                 preferred_mode = list_first_entry_or_null(
3357                                 &aconnector->base.modes,
3358                                 struct drm_display_mode,
3359                                 head);
3360
3361         mode_refresh = drm_mode_vrefresh(&mode);
3362
3363         if (preferred_mode == NULL) {
3364                 /*
3365                  * This may not be an error, the use case is when we have no
3366                  * usermode calls to reset and set mode upon hotplug. In this
3367                  * case, we call set mode ourselves to restore the previous mode
3368                  * and the modelist may not be filled in in time.
3369                  */
3370                 DRM_DEBUG_DRIVER("No preferred mode found\n");
3371         } else {
3372                 decide_crtc_timing_for_drm_display_mode(
3373                                 &mode, preferred_mode,
3374                                 dm_state ? (dm_state->scaling != RMX_OFF) : false);
3375                 preferred_refresh = drm_mode_vrefresh(preferred_mode);
3376         }
3377
3378         if (!dm_state)
3379                 drm_mode_set_crtcinfo(&mode, 0);
3380
3381         /*
3382         * If scaling is enabled and refresh rate didn't change
3383         * we copy the vic and polarities of the old timings
3384         */
3385         if (!scale || mode_refresh != preferred_refresh)
3386                 fill_stream_properties_from_drm_display_mode(stream,
3387                         &mode, &aconnector->base, NULL);
3388         else
3389                 fill_stream_properties_from_drm_display_mode(stream,
3390                         &mode, &aconnector->base, old_stream);
3391
3392         update_stream_scaling_settings(&mode, dm_state, stream);
3393
3394         fill_audio_info(
3395                 &stream->audio_info,
3396                 drm_connector,
3397                 sink);
3398
3399         update_stream_signal(stream, sink);
3400
3401 finish:
3402         dc_sink_release(sink);
3403
3404         return stream;
3405 }
3406
3407 static void amdgpu_dm_crtc_destroy(struct drm_crtc *crtc)
3408 {
3409         drm_crtc_cleanup(crtc);
3410         kfree(crtc);
3411 }
3412
3413 static void dm_crtc_destroy_state(struct drm_crtc *crtc,
3414                                   struct drm_crtc_state *state)
3415 {
3416         struct dm_crtc_state *cur = to_dm_crtc_state(state);
3417
3418         /* TODO Destroy dc_stream objects are stream object is flattened */
3419         if (cur->stream)
3420                 dc_stream_release(cur->stream);
3421
3422
3423         __drm_atomic_helper_crtc_destroy_state(state);
3424
3425
3426         kfree(state);
3427 }
3428
3429 static void dm_crtc_reset_state(struct drm_crtc *crtc)
3430 {
3431         struct dm_crtc_state *state;
3432
3433         if (crtc->state)
3434                 dm_crtc_destroy_state(crtc, crtc->state);
3435
3436         state = kzalloc(sizeof(*state), GFP_KERNEL);
3437         if (WARN_ON(!state))
3438                 return;
3439
3440         crtc->state = &state->base;
3441         crtc->state->crtc = crtc;
3442
3443 }
3444
3445 static struct drm_crtc_state *
3446 dm_crtc_duplicate_state(struct drm_crtc *crtc)
3447 {
3448         struct dm_crtc_state *state, *cur;
3449
3450         cur = to_dm_crtc_state(crtc->state);
3451
3452         if (WARN_ON(!crtc->state))
3453                 return NULL;
3454
3455         state = kzalloc(sizeof(*state), GFP_KERNEL);
3456         if (!state)
3457                 return NULL;
3458
3459         __drm_atomic_helper_crtc_duplicate_state(crtc, &state->base);
3460
3461         if (cur->stream) {
3462                 state->stream = cur->stream;
3463                 dc_stream_retain(state->stream);
3464         }
3465
3466         state->active_planes = cur->active_planes;
3467         state->interrupts_enabled = cur->interrupts_enabled;
3468         state->vrr_params = cur->vrr_params;
3469         state->vrr_infopacket = cur->vrr_infopacket;
3470         state->abm_level = cur->abm_level;
3471         state->vrr_supported = cur->vrr_supported;
3472         state->freesync_config = cur->freesync_config;
3473         state->crc_enabled = cur->crc_enabled;
3474
3475         /* TODO Duplicate dc_stream after objects are stream object is flattened */
3476
3477         return &state->base;
3478 }
3479
3480 static inline int dm_set_vupdate_irq(struct drm_crtc *crtc, bool enable)
3481 {
3482         enum dc_irq_source irq_source;
3483         struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
3484         struct amdgpu_device *adev = crtc->dev->dev_private;
3485         int rc;
3486
3487         irq_source = IRQ_TYPE_VUPDATE + acrtc->otg_inst;
3488
3489         rc = dc_interrupt_set(adev->dm.dc, irq_source, enable) ? 0 : -EBUSY;
3490
3491         DRM_DEBUG_DRIVER("crtc %d - vupdate irq %sabling: r=%d\n",
3492                          acrtc->crtc_id, enable ? "en" : "dis", rc);
3493         return rc;
3494 }
3495
3496 static inline int dm_set_vblank(struct drm_crtc *crtc, bool enable)
3497 {
3498         enum dc_irq_source irq_source;
3499         struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
3500         struct amdgpu_device *adev = crtc->dev->dev_private;
3501         struct dm_crtc_state *acrtc_state = to_dm_crtc_state(crtc->state);
3502         int rc = 0;
3503
3504         if (enable) {
3505                 /* vblank irq on -> Only need vupdate irq in vrr mode */
3506                 if (amdgpu_dm_vrr_active(acrtc_state))
3507                         rc = dm_set_vupdate_irq(crtc, true);
3508         } else {
3509                 /* vblank irq off -> vupdate irq off */
3510                 rc = dm_set_vupdate_irq(crtc, false);
3511         }
3512
3513         if (rc)
3514                 return rc;
3515
3516         irq_source = IRQ_TYPE_VBLANK + acrtc->otg_inst;
3517         return dc_interrupt_set(adev->dm.dc, irq_source, enable) ? 0 : -EBUSY;
3518 }
3519
3520 static int dm_enable_vblank(struct drm_crtc *crtc)
3521 {
3522         return dm_set_vblank(crtc, true);
3523 }
3524
3525 static void dm_disable_vblank(struct drm_crtc *crtc)
3526 {
3527         dm_set_vblank(crtc, false);
3528 }
3529
3530 /* Implemented only the options currently availible for the driver */
3531 static const struct drm_crtc_funcs amdgpu_dm_crtc_funcs = {
3532         .reset = dm_crtc_reset_state,
3533         .destroy = amdgpu_dm_crtc_destroy,
3534         .gamma_set = drm_atomic_helper_legacy_gamma_set,
3535         .set_config = drm_atomic_helper_set_config,
3536         .page_flip = drm_atomic_helper_page_flip,
3537         .atomic_duplicate_state = dm_crtc_duplicate_state,
3538         .atomic_destroy_state = dm_crtc_destroy_state,
3539         .set_crc_source = amdgpu_dm_crtc_set_crc_source,
3540         .verify_crc_source = amdgpu_dm_crtc_verify_crc_source,
3541         .enable_vblank = dm_enable_vblank,
3542         .disable_vblank = dm_disable_vblank,
3543 };
3544
3545 static enum drm_connector_status
3546 amdgpu_dm_connector_detect(struct drm_connector *connector, bool force)
3547 {
3548         bool connected;
3549         struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
3550
3551         /*
3552          * Notes:
3553          * 1. This interface is NOT called in context of HPD irq.
3554          * 2. This interface *is called* in context of user-mode ioctl. Which
3555          * makes it a bad place for *any* MST-related activity.
3556          */
3557
3558         if (aconnector->base.force == DRM_FORCE_UNSPECIFIED &&
3559             !aconnector->fake_enable)
3560                 connected = (aconnector->dc_sink != NULL);
3561         else
3562                 connected = (aconnector->base.force == DRM_FORCE_ON);
3563
3564         return (connected ? connector_status_connected :
3565                         connector_status_disconnected);
3566 }
3567
3568 int amdgpu_dm_connector_atomic_set_property(struct drm_connector *connector,
3569                                             struct drm_connector_state *connector_state,
3570                                             struct drm_property *property,
3571                                             uint64_t val)
3572 {
3573         struct drm_device *dev = connector->dev;
3574         struct amdgpu_device *adev = dev->dev_private;
3575         struct dm_connector_state *dm_old_state =
3576                 to_dm_connector_state(connector->state);
3577         struct dm_connector_state *dm_new_state =
3578                 to_dm_connector_state(connector_state);
3579
3580         int ret = -EINVAL;
3581
3582         if (property == dev->mode_config.scaling_mode_property) {
3583                 enum amdgpu_rmx_type rmx_type;
3584
3585                 switch (val) {
3586                 case DRM_MODE_SCALE_CENTER:
3587                         rmx_type = RMX_CENTER;
3588                         break;
3589                 case DRM_MODE_SCALE_ASPECT:
3590                         rmx_type = RMX_ASPECT;
3591                         break;
3592                 case DRM_MODE_SCALE_FULLSCREEN:
3593                         rmx_type = RMX_FULL;
3594                         break;
3595                 case DRM_MODE_SCALE_NONE:
3596                 default:
3597                         rmx_type = RMX_OFF;
3598                         break;
3599                 }
3600
3601                 if (dm_old_state->scaling == rmx_type)
3602                         return 0;
3603
3604                 dm_new_state->scaling = rmx_type;
3605                 ret = 0;
3606         } else if (property == adev->mode_info.underscan_hborder_property) {
3607                 dm_new_state->underscan_hborder = val;
3608                 ret = 0;
3609         } else if (property == adev->mode_info.underscan_vborder_property) {
3610                 dm_new_state->underscan_vborder = val;
3611                 ret = 0;
3612         } else if (property == adev->mode_info.underscan_property) {
3613                 dm_new_state->underscan_enable = val;
3614                 ret = 0;
3615         } else if (property == adev->mode_info.max_bpc_property) {
3616                 dm_new_state->max_bpc = val;
3617                 ret = 0;
3618         } else if (property == adev->mode_info.abm_level_property) {
3619                 dm_new_state->abm_level = val;
3620                 ret = 0;
3621         }
3622
3623         return ret;
3624 }
3625
3626 int amdgpu_dm_connector_atomic_get_property(struct drm_connector *connector,
3627                                             const struct drm_connector_state *state,
3628                                             struct drm_property *property,
3629                                             uint64_t *val)
3630 {
3631         struct drm_device *dev = connector->dev;
3632         struct amdgpu_device *adev = dev->dev_private;
3633         struct dm_connector_state *dm_state =
3634                 to_dm_connector_state(state);
3635         int ret = -EINVAL;
3636
3637         if (property == dev->mode_config.scaling_mode_property) {
3638                 switch (dm_state->scaling) {
3639                 case RMX_CENTER:
3640                         *val = DRM_MODE_SCALE_CENTER;
3641                         break;
3642                 case RMX_ASPECT:
3643                         *val = DRM_MODE_SCALE_ASPECT;
3644                         break;
3645                 case RMX_FULL:
3646                         *val = DRM_MODE_SCALE_FULLSCREEN;
3647                         break;
3648                 case RMX_OFF:
3649                 default:
3650                         *val = DRM_MODE_SCALE_NONE;
3651                         break;
3652                 }
3653                 ret = 0;
3654         } else if (property == adev->mode_info.underscan_hborder_property) {
3655                 *val = dm_state->underscan_hborder;
3656                 ret = 0;
3657         } else if (property == adev->mode_info.underscan_vborder_property) {
3658                 *val = dm_state->underscan_vborder;
3659                 ret = 0;
3660         } else if (property == adev->mode_info.underscan_property) {
3661                 *val = dm_state->underscan_enable;
3662                 ret = 0;
3663         } else if (property == adev->mode_info.max_bpc_property) {
3664                 *val = dm_state->max_bpc;
3665                 ret = 0;
3666         } else if (property == adev->mode_info.abm_level_property) {
3667                 *val = dm_state->abm_level;
3668                 ret = 0;
3669         }
3670
3671         return ret;
3672 }
3673
3674 static void amdgpu_dm_connector_destroy(struct drm_connector *connector)
3675 {
3676         struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
3677         const struct dc_link *link = aconnector->dc_link;
3678         struct amdgpu_device *adev = connector->dev->dev_private;
3679         struct amdgpu_display_manager *dm = &adev->dm;
3680
3681 #if defined(CONFIG_BACKLIGHT_CLASS_DEVICE) ||\
3682         defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE)
3683
3684         if ((link->connector_signal & (SIGNAL_TYPE_EDP | SIGNAL_TYPE_LVDS)) &&
3685             link->type != dc_connection_none &&
3686             dm->backlight_dev) {
3687                 backlight_device_unregister(dm->backlight_dev);
3688                 dm->backlight_dev = NULL;
3689         }
3690 #endif
3691
3692         if (aconnector->dc_em_sink)
3693                 dc_sink_release(aconnector->dc_em_sink);
3694         aconnector->dc_em_sink = NULL;
3695         if (aconnector->dc_sink)
3696                 dc_sink_release(aconnector->dc_sink);
3697         aconnector->dc_sink = NULL;
3698
3699         drm_dp_cec_unregister_connector(&aconnector->dm_dp_aux.aux);
3700         drm_connector_unregister(connector);
3701         drm_connector_cleanup(connector);
3702         kfree(connector);
3703 }
3704
3705 void amdgpu_dm_connector_funcs_reset(struct drm_connector *connector)
3706 {
3707         struct dm_connector_state *state =
3708                 to_dm_connector_state(connector->state);
3709
3710         if (connector->state)
3711                 __drm_atomic_helper_connector_destroy_state(connector->state);
3712
3713         kfree(state);
3714
3715         state = kzalloc(sizeof(*state), GFP_KERNEL);
3716
3717         if (state) {
3718                 state->scaling = RMX_OFF;
3719                 state->underscan_enable = false;
3720                 state->underscan_hborder = 0;
3721                 state->underscan_vborder = 0;
3722                 state->max_bpc = 8;
3723
3724                 __drm_atomic_helper_connector_reset(connector, &state->base);
3725         }
3726 }
3727
3728 struct drm_connector_state *
3729 amdgpu_dm_connector_atomic_duplicate_state(struct drm_connector *connector)
3730 {
3731         struct dm_connector_state *state =
3732                 to_dm_connector_state(connector->state);
3733
3734         struct dm_connector_state *new_state =
3735                         kmemdup(state, sizeof(*state), GFP_KERNEL);
3736
3737         if (!new_state)
3738                 return NULL;
3739
3740         __drm_atomic_helper_connector_duplicate_state(connector, &new_state->base);
3741
3742         new_state->freesync_capable = state->freesync_capable;
3743         new_state->abm_level = state->abm_level;
3744         new_state->scaling = state->scaling;
3745         new_state->underscan_enable = state->underscan_enable;
3746         new_state->underscan_hborder = state->underscan_hborder;
3747         new_state->underscan_vborder = state->underscan_vborder;
3748         new_state->max_bpc = state->max_bpc;
3749
3750         return &new_state->base;
3751 }
3752
3753 static const struct drm_connector_funcs amdgpu_dm_connector_funcs = {
3754         .reset = amdgpu_dm_connector_funcs_reset,
3755         .detect = amdgpu_dm_connector_detect,
3756         .fill_modes = drm_helper_probe_single_connector_modes,
3757         .destroy = amdgpu_dm_connector_destroy,
3758         .atomic_duplicate_state = amdgpu_dm_connector_atomic_duplicate_state,
3759         .atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
3760         .atomic_set_property = amdgpu_dm_connector_atomic_set_property,
3761         .atomic_get_property = amdgpu_dm_connector_atomic_get_property
3762 };
3763
3764 static int get_modes(struct drm_connector *connector)
3765 {
3766         return amdgpu_dm_connector_get_modes(connector);
3767 }
3768
3769 static void create_eml_sink(struct amdgpu_dm_connector *aconnector)
3770 {
3771         struct dc_sink_init_data init_params = {
3772                         .link = aconnector->dc_link,
3773                         .sink_signal = SIGNAL_TYPE_VIRTUAL
3774         };
3775         struct edid *edid;
3776
3777         if (!aconnector->base.edid_blob_ptr) {
3778                 DRM_ERROR("No EDID firmware found on connector: %s ,forcing to OFF!\n",
3779                                 aconnector->base.name);
3780
3781                 aconnector->base.force = DRM_FORCE_OFF;
3782                 aconnector->base.override_edid = false;
3783                 return;
3784         }
3785
3786         edid = (struct edid *) aconnector->base.edid_blob_ptr->data;
3787
3788         aconnector->edid = edid;
3789
3790         aconnector->dc_em_sink = dc_link_add_remote_sink(
3791                 aconnector->dc_link,
3792                 (uint8_t *)edid,
3793                 (edid->extensions + 1) * EDID_LENGTH,
3794                 &init_params);
3795
3796         if (aconnector->base.force == DRM_FORCE_ON) {
3797                 aconnector->dc_sink = aconnector->dc_link->local_sink ?
3798                 aconnector->dc_link->local_sink :
3799                 aconnector->dc_em_sink;
3800                 dc_sink_retain(aconnector->dc_sink);
3801         }
3802 }
3803
3804 static void handle_edid_mgmt(struct amdgpu_dm_connector *aconnector)
3805 {
3806         struct dc_link *link = (struct dc_link *)aconnector->dc_link;
3807
3808         /*
3809          * In case of headless boot with force on for DP managed connector
3810          * Those settings have to be != 0 to get initial modeset
3811          */
3812         if (link->connector_signal == SIGNAL_TYPE_DISPLAY_PORT) {
3813                 link->verified_link_cap.lane_count = LANE_COUNT_FOUR;
3814                 link->verified_link_cap.link_rate = LINK_RATE_HIGH2;
3815         }
3816
3817
3818         aconnector->base.override_edid = true;
3819         create_eml_sink(aconnector);
3820 }
3821
3822 enum drm_mode_status amdgpu_dm_connector_mode_valid(struct drm_connector *connector,
3823                                    struct drm_display_mode *mode)
3824 {
3825         int result = MODE_ERROR;
3826         struct dc_sink *dc_sink;
3827         struct amdgpu_device *adev = connector->dev->dev_private;
3828         /* TODO: Unhardcode stream count */
3829         struct dc_stream_state *stream;
3830         struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
3831         enum dc_status dc_result = DC_OK;
3832
3833         if ((mode->flags & DRM_MODE_FLAG_INTERLACE) ||
3834                         (mode->flags & DRM_MODE_FLAG_DBLSCAN))
3835                 return result;
3836
3837         /*
3838          * Only run this the first time mode_valid is called to initilialize
3839          * EDID mgmt
3840          */
3841         if (aconnector->base.force != DRM_FORCE_UNSPECIFIED &&
3842                 !aconnector->dc_em_sink)
3843                 handle_edid_mgmt(aconnector);
3844
3845         dc_sink = to_amdgpu_dm_connector(connector)->dc_sink;
3846
3847         if (dc_sink == NULL) {
3848                 DRM_ERROR("dc_sink is NULL!\n");
3849                 goto fail;
3850         }
3851
3852         stream = create_stream_for_sink(aconnector, mode, NULL, NULL);
3853         if (stream == NULL) {
3854                 DRM_ERROR("Failed to create stream for sink!\n");
3855                 goto fail;
3856         }
3857
3858         dc_result = dc_validate_stream(adev->dm.dc, stream);
3859
3860         if (dc_result == DC_OK)
3861                 result = MODE_OK;
3862         else
3863                 DRM_DEBUG_KMS("Mode %dx%d (clk %d) failed DC validation with error %d\n",
3864                               mode->vdisplay,
3865                               mode->hdisplay,
3866                               mode->clock,
3867                               dc_result);
3868
3869         dc_stream_release(stream);
3870
3871 fail:
3872         /* TODO: error handling*/
3873         return result;
3874 }
3875
3876 static int fill_hdr_info_packet(const struct drm_connector_state *state,
3877                                 struct dc_info_packet *out)
3878 {
3879         struct hdmi_drm_infoframe frame;
3880         unsigned char buf[30]; /* 26 + 4 */
3881         ssize_t len;
3882         int ret, i;
3883
3884         memset(out, 0, sizeof(*out));
3885
3886         if (!state->hdr_output_metadata)
3887                 return 0;
3888
3889         ret = drm_hdmi_infoframe_set_hdr_metadata(&frame, state);
3890         if (ret)
3891                 return ret;
3892
3893         len = hdmi_drm_infoframe_pack_only(&frame, buf, sizeof(buf));
3894         if (len < 0)
3895                 return (int)len;
3896
3897         /* Static metadata is a fixed 26 bytes + 4 byte header. */
3898         if (len != 30)
3899                 return -EINVAL;
3900
3901         /* Prepare the infopacket for DC. */
3902         switch (state->connector->connector_type) {
3903         case DRM_MODE_CONNECTOR_HDMIA:
3904                 out->hb0 = 0x87; /* type */
3905                 out->hb1 = 0x01; /* version */
3906                 out->hb2 = 0x1A; /* length */
3907                 out->sb[0] = buf[3]; /* checksum */
3908                 i = 1;
3909                 break;
3910
3911         case DRM_MODE_CONNECTOR_DisplayPort:
3912         case DRM_MODE_CONNECTOR_eDP:
3913                 out->hb0 = 0x00; /* sdp id, zero */
3914                 out->hb1 = 0x87; /* type */
3915                 out->hb2 = 0x1D; /* payload len - 1 */
3916                 out->hb3 = (0x13 << 2); /* sdp version */
3917                 out->sb[0] = 0x01; /* version */
3918                 out->sb[1] = 0x1A; /* length */
3919                 i = 2;
3920                 break;
3921
3922         default:
3923                 return -EINVAL;
3924         }
3925
3926         memcpy(&out->sb[i], &buf[4], 26);
3927         out->valid = true;
3928
3929         print_hex_dump(KERN_DEBUG, "HDR SB:", DUMP_PREFIX_NONE, 16, 1, out->sb,
3930                        sizeof(out->sb), false);
3931
3932         return 0;
3933 }
3934
3935 static bool
3936 is_hdr_metadata_different(const struct drm_connector_state *old_state,
3937                           const struct drm_connector_state *new_state)
3938 {
3939         struct drm_property_blob *old_blob = old_state->hdr_output_metadata;
3940         struct drm_property_blob *new_blob = new_state->hdr_output_metadata;
3941
3942         if (old_blob != new_blob) {
3943                 if (old_blob && new_blob &&
3944                     old_blob->length == new_blob->length)
3945                         return memcmp(old_blob->data, new_blob->data,
3946                                       old_blob->length);
3947
3948                 return true;
3949         }
3950
3951         return false;
3952 }
3953
3954 static int
3955 amdgpu_dm_connector_atomic_check(struct drm_connector *conn,
3956                                  struct drm_atomic_state *state)
3957 {
3958         struct drm_connector_state *new_con_state =
3959                 drm_atomic_get_new_connector_state(state, conn);
3960         struct drm_connector_state *old_con_state =
3961                 drm_atomic_get_old_connector_state(state, conn);
3962         struct drm_crtc *crtc = new_con_state->crtc;
3963         struct drm_crtc_state *new_crtc_state;
3964         int ret;
3965
3966         if (!crtc)
3967                 return 0;
3968
3969         if (is_hdr_metadata_different(old_con_state, new_con_state)) {
3970                 struct dc_info_packet hdr_infopacket;
3971
3972                 ret = fill_hdr_info_packet(new_con_state, &hdr_infopacket);
3973                 if (ret)
3974                         return ret;
3975
3976                 new_crtc_state = drm_atomic_get_crtc_state(state, crtc);
3977                 if (IS_ERR(new_crtc_state))
3978                         return PTR_ERR(new_crtc_state);
3979
3980                 /*
3981                  * DC considers the stream backends changed if the
3982                  * static metadata changes. Forcing the modeset also
3983                  * gives a simple way for userspace to switch from
3984                  * 8bpc to 10bpc when setting the metadata to enter
3985                  * or exit HDR.
3986                  *
3987                  * Changing the static metadata after it's been
3988                  * set is permissible, however. So only force a
3989                  * modeset if we're entering or exiting HDR.
3990                  */
3991                 new_crtc_state->mode_changed =
3992                         !old_con_state->hdr_output_metadata ||
3993                         !new_con_state->hdr_output_metadata;
3994         }
3995
3996         return 0;
3997 }
3998
3999 static const struct drm_connector_helper_funcs
4000 amdgpu_dm_connector_helper_funcs = {
4001         /*
4002          * If hotplugging a second bigger display in FB Con mode, bigger resolution
4003          * modes will be filtered by drm_mode_validate_size(), and those modes
4004          * are missing after user start lightdm. So we need to renew modes list.
4005          * in get_modes call back, not just return the modes count
4006          */
4007         .get_modes = get_modes,
4008         .mode_valid = amdgpu_dm_connector_mode_valid,
4009         .atomic_check = amdgpu_dm_connector_atomic_check,
4010 };
4011
4012 static void dm_crtc_helper_disable(struct drm_crtc *crtc)
4013 {
4014 }
4015
4016 static bool does_crtc_have_active_cursor(struct drm_crtc_state *new_crtc_state)
4017 {
4018         struct drm_device *dev = new_crtc_state->crtc->dev;
4019         struct drm_plane *plane;
4020
4021         drm_for_each_plane_mask(plane, dev, new_crtc_state->plane_mask) {
4022                 if (plane->type == DRM_PLANE_TYPE_CURSOR)
4023                         return true;
4024         }
4025
4026         return false;
4027 }
4028
4029 static int count_crtc_active_planes(struct drm_crtc_state *new_crtc_state)
4030 {
4031         struct drm_atomic_state *state = new_crtc_state->state;
4032         struct drm_plane *plane;
4033         int num_active = 0;
4034
4035         drm_for_each_plane_mask(plane, state->dev, new_crtc_state->plane_mask) {
4036                 struct drm_plane_state *new_plane_state;
4037
4038                 /* Cursor planes are "fake". */
4039                 if (plane->type == DRM_PLANE_TYPE_CURSOR)
4040                         continue;
4041
4042                 new_plane_state = drm_atomic_get_new_plane_state(state, plane);
4043
4044                 if (!new_plane_state) {
4045                         /*
4046                          * The plane is enable on the CRTC and hasn't changed
4047                          * state. This means that it previously passed
4048                          * validation and is therefore enabled.
4049                          */
4050                         num_active += 1;
4051                         continue;
4052                 }
4053
4054                 /* We need a framebuffer to be considered enabled. */
4055                 num_active += (new_plane_state->fb != NULL);
4056         }
4057
4058         return num_active;
4059 }
4060
4061 /*
4062  * Sets whether interrupts should be enabled on a specific CRTC.
4063  * We require that the stream be enabled and that there exist active
4064  * DC planes on the stream.
4065  */
4066 static void
4067 dm_update_crtc_interrupt_state(struct drm_crtc *crtc,
4068                                struct drm_crtc_state *new_crtc_state)
4069 {
4070         struct dm_crtc_state *dm_new_crtc_state =
4071                 to_dm_crtc_state(new_crtc_state);
4072
4073         dm_new_crtc_state->active_planes = 0;
4074         dm_new_crtc_state->interrupts_enabled = false;
4075
4076         if (!dm_new_crtc_state->stream)
4077                 return;
4078
4079         dm_new_crtc_state->active_planes =
4080                 count_crtc_active_planes(new_crtc_state);
4081
4082         dm_new_crtc_state->interrupts_enabled =
4083                 dm_new_crtc_state->active_planes > 0;
4084 }
4085
4086 static int dm_crtc_helper_atomic_check(struct drm_crtc *crtc,
4087                                        struct drm_crtc_state *state)
4088 {
4089         struct amdgpu_device *adev = crtc->dev->dev_private;
4090         struct dc *dc = adev->dm.dc;
4091         struct dm_crtc_state *dm_crtc_state = to_dm_crtc_state(state);
4092         int ret = -EINVAL;
4093
4094         /*
4095          * Update interrupt state for the CRTC. This needs to happen whenever
4096          * the CRTC has changed or whenever any of its planes have changed.
4097          * Atomic check satisfies both of these requirements since the CRTC
4098          * is added to the state by DRM during drm_atomic_helper_check_planes.
4099          */
4100         dm_update_crtc_interrupt_state(crtc, state);
4101
4102         if (unlikely(!dm_crtc_state->stream &&
4103                      modeset_required(state, NULL, dm_crtc_state->stream))) {
4104                 WARN_ON(1);
4105                 return ret;
4106         }
4107
4108         /* In some use cases, like reset, no stream is attached */
4109         if (!dm_crtc_state->stream)
4110                 return 0;
4111
4112         /*
4113          * We want at least one hardware plane enabled to use
4114          * the stream with a cursor enabled.
4115          */
4116         if (state->enable && state->active &&
4117             does_crtc_have_active_cursor(state) &&
4118             dm_crtc_state->active_planes == 0)
4119                 return -EINVAL;
4120
4121         if (dc_validate_stream(dc, dm_crtc_state->stream) == DC_OK)
4122                 return 0;
4123
4124         return ret;
4125 }
4126
4127 static bool dm_crtc_helper_mode_fixup(struct drm_crtc *crtc,
4128                                       const struct drm_display_mode *mode,
4129                                       struct drm_display_mode *adjusted_mode)
4130 {
4131         return true;
4132 }
4133
4134 static const struct drm_crtc_helper_funcs amdgpu_dm_crtc_helper_funcs = {
4135         .disable = dm_crtc_helper_disable,
4136         .atomic_check = dm_crtc_helper_atomic_check,
4137         .mode_fixup = dm_crtc_helper_mode_fixup
4138 };
4139
4140 static void dm_encoder_helper_disable(struct drm_encoder *encoder)
4141 {
4142
4143 }
4144
4145 static int dm_encoder_helper_atomic_check(struct drm_encoder *encoder,
4146                                           struct drm_crtc_state *crtc_state,
4147                                           struct drm_connector_state *conn_state)
4148 {
4149         return 0;
4150 }
4151
4152 const struct drm_encoder_helper_funcs amdgpu_dm_encoder_helper_funcs = {
4153         .disable = dm_encoder_helper_disable,
4154         .atomic_check = dm_encoder_helper_atomic_check
4155 };
4156
4157 static void dm_drm_plane_reset(struct drm_plane *plane)
4158 {
4159         struct dm_plane_state *amdgpu_state = NULL;
4160
4161         if (plane->state)
4162                 plane->funcs->atomic_destroy_state(plane, plane->state);
4163
4164         amdgpu_state = kzalloc(sizeof(*amdgpu_state), GFP_KERNEL);
4165         WARN_ON(amdgpu_state == NULL);
4166
4167         if (amdgpu_state)
4168                 __drm_atomic_helper_plane_reset(plane, &amdgpu_state->base);
4169 }
4170
4171 static struct drm_plane_state *
4172 dm_drm_plane_duplicate_state(struct drm_plane *plane)
4173 {
4174         struct dm_plane_state *dm_plane_state, *old_dm_plane_state;
4175
4176         old_dm_plane_state = to_dm_plane_state(plane->state);
4177         dm_plane_state = kzalloc(sizeof(*dm_plane_state), GFP_KERNEL);
4178         if (!dm_plane_state)
4179                 return NULL;
4180
4181         __drm_atomic_helper_plane_duplicate_state(plane, &dm_plane_state->base);
4182
4183         if (old_dm_plane_state->dc_state) {
4184                 dm_plane_state->dc_state = old_dm_plane_state->dc_state;
4185                 dc_plane_state_retain(dm_plane_state->dc_state);
4186         }
4187
4188         return &dm_plane_state->base;
4189 }
4190
4191 void dm_drm_plane_destroy_state(struct drm_plane *plane,
4192                                 struct drm_plane_state *state)
4193 {
4194         struct dm_plane_state *dm_plane_state = to_dm_plane_state(state);
4195
4196         if (dm_plane_state->dc_state)
4197                 dc_plane_state_release(dm_plane_state->dc_state);
4198
4199         drm_atomic_helper_plane_destroy_state(plane, state);
4200 }
4201
4202 static const struct drm_plane_funcs dm_plane_funcs = {
4203         .update_plane   = drm_atomic_helper_update_plane,
4204         .disable_plane  = drm_atomic_helper_disable_plane,
4205         .destroy        = drm_primary_helper_destroy,
4206         .reset = dm_drm_plane_reset,
4207         .atomic_duplicate_state = dm_drm_plane_duplicate_state,
4208         .atomic_destroy_state = dm_drm_plane_destroy_state,
4209 };
4210
4211 static int dm_plane_helper_prepare_fb(struct drm_plane *plane,
4212                                       struct drm_plane_state *new_state)
4213 {
4214         struct amdgpu_framebuffer *afb;
4215         struct drm_gem_object *obj;
4216         struct amdgpu_device *adev;
4217         struct amdgpu_bo *rbo;
4218         struct dm_plane_state *dm_plane_state_new, *dm_plane_state_old;
4219         uint64_t tiling_flags;
4220         uint32_t domain;
4221         int r;
4222
4223         dm_plane_state_old = to_dm_plane_state(plane->state);
4224         dm_plane_state_new = to_dm_plane_state(new_state);
4225
4226         if (!new_state->fb) {
4227                 DRM_DEBUG_DRIVER("No FB bound\n");
4228                 return 0;
4229         }
4230
4231         afb = to_amdgpu_framebuffer(new_state->fb);
4232         obj = new_state->fb->obj[0];
4233         rbo = gem_to_amdgpu_bo(obj);
4234         adev = amdgpu_ttm_adev(rbo->tbo.bdev);
4235         r = amdgpu_bo_reserve(rbo, false);
4236         if (unlikely(r != 0))
4237                 return r;
4238
4239         if (plane->type != DRM_PLANE_TYPE_CURSOR)
4240                 domain = amdgpu_display_supported_domains(adev);
4241         else
4242                 domain = AMDGPU_GEM_DOMAIN_VRAM;
4243
4244         r = amdgpu_bo_pin(rbo, domain);
4245         if (unlikely(r != 0)) {
4246                 if (r != -ERESTARTSYS)
4247                         DRM_ERROR("Failed to pin framebuffer with error %d\n", r);
4248                 amdgpu_bo_unreserve(rbo);
4249                 return r;
4250         }
4251
4252         r = amdgpu_ttm_alloc_gart(&rbo->tbo);
4253         if (unlikely(r != 0)) {
4254                 amdgpu_bo_unpin(rbo);
4255                 amdgpu_bo_unreserve(rbo);
4256                 DRM_ERROR("%p bind failed\n", rbo);
4257                 return r;
4258         }
4259
4260         amdgpu_bo_get_tiling_flags(rbo, &tiling_flags);
4261
4262         amdgpu_bo_unreserve(rbo);
4263
4264         afb->address = amdgpu_bo_gpu_offset(rbo);
4265
4266         amdgpu_bo_ref(rbo);
4267
4268         if (dm_plane_state_new->dc_state &&
4269                         dm_plane_state_old->dc_state != dm_plane_state_new->dc_state) {
4270                 struct dc_plane_state *plane_state = dm_plane_state_new->dc_state;
4271
4272                 fill_plane_buffer_attributes(
4273                         adev, afb, plane_state->format, plane_state->rotation,
4274                         tiling_flags, &plane_state->tiling_info,
4275                         &plane_state->plane_size, &plane_state->dcc,
4276                         &plane_state->address);
4277         }
4278
4279         return 0;
4280 }
4281
4282 static void dm_plane_helper_cleanup_fb(struct drm_plane *plane,
4283                                        struct drm_plane_state *old_state)
4284 {
4285         struct amdgpu_bo *rbo;
4286         int r;
4287
4288         if (!old_state->fb)
4289                 return;
4290
4291         rbo = gem_to_amdgpu_bo(old_state->fb->obj[0]);
4292         r = amdgpu_bo_reserve(rbo, false);
4293         if (unlikely(r)) {
4294                 DRM_ERROR("failed to reserve rbo before unpin\n");
4295                 return;
4296         }
4297
4298         amdgpu_bo_unpin(rbo);
4299         amdgpu_bo_unreserve(rbo);
4300         amdgpu_bo_unref(&rbo);
4301 }
4302
4303 static int dm_plane_atomic_check(struct drm_plane *plane,
4304                                  struct drm_plane_state *state)
4305 {
4306         struct amdgpu_device *adev = plane->dev->dev_private;
4307         struct dc *dc = adev->dm.dc;
4308         struct dm_plane_state *dm_plane_state;
4309         struct dc_scaling_info scaling_info;
4310         int ret;
4311
4312         dm_plane_state = to_dm_plane_state(state);
4313
4314         if (!dm_plane_state->dc_state)
4315                 return 0;
4316
4317         ret = fill_dc_scaling_info(state, &scaling_info);
4318         if (ret)
4319                 return ret;
4320
4321         if (dc_validate_plane(dc, dm_plane_state->dc_state) == DC_OK)
4322                 return 0;
4323
4324         return -EINVAL;
4325 }
4326
4327 static int dm_plane_atomic_async_check(struct drm_plane *plane,
4328                                        struct drm_plane_state *new_plane_state)
4329 {
4330         struct drm_plane_state *old_plane_state =
4331                 drm_atomic_get_old_plane_state(new_plane_state->state, plane);
4332
4333         /* Only support async updates on cursor planes. */
4334         if (plane->type != DRM_PLANE_TYPE_CURSOR)
4335                 return -EINVAL;
4336
4337         /*
4338          * DRM calls prepare_fb and cleanup_fb on new_plane_state for
4339          * async commits so don't allow fb changes.
4340          */
4341         if (old_plane_state->fb != new_plane_state->fb)
4342                 return -EINVAL;
4343
4344         return 0;
4345 }
4346
4347 static void dm_plane_atomic_async_update(struct drm_plane *plane,
4348                                          struct drm_plane_state *new_state)
4349 {
4350         struct drm_plane_state *old_state =
4351                 drm_atomic_get_old_plane_state(new_state->state, plane);
4352
4353         if (plane->state->fb != new_state->fb)
4354                 drm_atomic_set_fb_for_plane(plane->state, new_state->fb);
4355
4356         plane->state->src_x = new_state->src_x;
4357         plane->state->src_y = new_state->src_y;
4358         plane->state->src_w = new_state->src_w;
4359         plane->state->src_h = new_state->src_h;
4360         plane->state->crtc_x = new_state->crtc_x;
4361         plane->state->crtc_y = new_state->crtc_y;
4362         plane->state->crtc_w = new_state->crtc_w;
4363         plane->state->crtc_h = new_state->crtc_h;
4364
4365         handle_cursor_update(plane, old_state);
4366 }
4367
4368 static const struct drm_plane_helper_funcs dm_plane_helper_funcs = {
4369         .prepare_fb = dm_plane_helper_prepare_fb,
4370         .cleanup_fb = dm_plane_helper_cleanup_fb,
4371         .atomic_check = dm_plane_atomic_check,
4372         .atomic_async_check = dm_plane_atomic_async_check,
4373         .atomic_async_update = dm_plane_atomic_async_update
4374 };
4375
4376 /*
4377  * TODO: these are currently initialized to rgb formats only.
4378  * For future use cases we should either initialize them dynamically based on
4379  * plane capabilities, or initialize this array to all formats, so internal drm
4380  * check will succeed, and let DC implement proper check
4381  */
4382 static const uint32_t rgb_formats[] = {
4383         DRM_FORMAT_XRGB8888,
4384         DRM_FORMAT_ARGB8888,
4385         DRM_FORMAT_RGBA8888,
4386         DRM_FORMAT_XRGB2101010,
4387         DRM_FORMAT_XBGR2101010,
4388         DRM_FORMAT_ARGB2101010,
4389         DRM_FORMAT_ABGR2101010,
4390         DRM_FORMAT_XBGR8888,
4391         DRM_FORMAT_ABGR8888,
4392         DRM_FORMAT_RGB565,
4393 };
4394
4395 static const uint32_t overlay_formats[] = {
4396         DRM_FORMAT_XRGB8888,
4397         DRM_FORMAT_ARGB8888,
4398         DRM_FORMAT_RGBA8888,
4399         DRM_FORMAT_XBGR8888,
4400         DRM_FORMAT_ABGR8888,
4401         DRM_FORMAT_RGB565
4402 };
4403
4404 static const u32 cursor_formats[] = {
4405         DRM_FORMAT_ARGB8888
4406 };
4407
4408 static int get_plane_formats(const struct drm_plane *plane,
4409                              const struct dc_plane_cap *plane_cap,
4410                              uint32_t *formats, int max_formats)
4411 {
4412         int i, num_formats = 0;
4413
4414         /*
4415          * TODO: Query support for each group of formats directly from
4416          * DC plane caps. This will require adding more formats to the
4417          * caps list.
4418          */
4419
4420         switch (plane->type) {
4421         case DRM_PLANE_TYPE_PRIMARY:
4422                 for (i = 0; i < ARRAY_SIZE(rgb_formats); ++i) {
4423                         if (num_formats >= max_formats)
4424                                 break;
4425
4426                         formats[num_formats++] = rgb_formats[i];
4427                 }
4428
4429                 if (plane_cap && plane_cap->pixel_format_support.nv12)
4430                         formats[num_formats++] = DRM_FORMAT_NV12;
4431                 break;
4432
4433         case DRM_PLANE_TYPE_OVERLAY:
4434                 for (i = 0; i < ARRAY_SIZE(overlay_formats); ++i) {
4435                         if (num_formats >= max_formats)
4436                                 break;
4437
4438                         formats[num_formats++] = overlay_formats[i];
4439                 }
4440                 break;
4441
4442         case DRM_PLANE_TYPE_CURSOR:
4443                 for (i = 0; i < ARRAY_SIZE(cursor_formats); ++i) {
4444                         if (num_formats >= max_formats)
4445                                 break;
4446
4447                         formats[num_formats++] = cursor_formats[i];
4448                 }
4449                 break;
4450         }
4451
4452         return num_formats;
4453 }
4454
4455 static int amdgpu_dm_plane_init(struct amdgpu_display_manager *dm,
4456                                 struct drm_plane *plane,
4457                                 unsigned long possible_crtcs,
4458                                 const struct dc_plane_cap *plane_cap)
4459 {
4460         uint32_t formats[32];
4461         int num_formats;
4462         int res = -EPERM;
4463
4464         num_formats = get_plane_formats(plane, plane_cap, formats,
4465                                         ARRAY_SIZE(formats));
4466
4467         res = drm_universal_plane_init(dm->adev->ddev, plane, possible_crtcs,
4468                                        &dm_plane_funcs, formats, num_formats,
4469                                        NULL, plane->type, NULL);
4470         if (res)
4471                 return res;
4472
4473         if (plane->type == DRM_PLANE_TYPE_OVERLAY &&
4474             plane_cap && plane_cap->per_pixel_alpha) {
4475                 unsigned int blend_caps = BIT(DRM_MODE_BLEND_PIXEL_NONE) |
4476                                           BIT(DRM_MODE_BLEND_PREMULTI);
4477
4478                 drm_plane_create_alpha_property(plane);
4479                 drm_plane_create_blend_mode_property(plane, blend_caps);
4480         }
4481
4482         if (plane->type == DRM_PLANE_TYPE_PRIMARY &&
4483             plane_cap && plane_cap->pixel_format_support.nv12) {
4484                 /* This only affects YUV formats. */
4485                 drm_plane_create_color_properties(
4486                         plane,
4487                         BIT(DRM_COLOR_YCBCR_BT601) |
4488                         BIT(DRM_COLOR_YCBCR_BT709),
4489                         BIT(DRM_COLOR_YCBCR_LIMITED_RANGE) |
4490                         BIT(DRM_COLOR_YCBCR_FULL_RANGE),
4491                         DRM_COLOR_YCBCR_BT709, DRM_COLOR_YCBCR_LIMITED_RANGE);
4492         }
4493
4494         drm_plane_helper_add(plane, &dm_plane_helper_funcs);
4495
4496         /* Create (reset) the plane state */
4497         if (plane->funcs->reset)
4498                 plane->funcs->reset(plane);
4499
4500         return 0;
4501 }
4502
4503 static int amdgpu_dm_crtc_init(struct amdgpu_display_manager *dm,
4504                                struct drm_plane *plane,
4505                                uint32_t crtc_index)
4506 {
4507         struct amdgpu_crtc *acrtc = NULL;
4508         struct drm_plane *cursor_plane;
4509
4510         int res = -ENOMEM;
4511
4512         cursor_plane = kzalloc(sizeof(*cursor_plane), GFP_KERNEL);
4513         if (!cursor_plane)
4514                 goto fail;
4515
4516         cursor_plane->type = DRM_PLANE_TYPE_CURSOR;
4517         res = amdgpu_dm_plane_init(dm, cursor_plane, 0, NULL);
4518
4519         acrtc = kzalloc(sizeof(struct amdgpu_crtc), GFP_KERNEL);
4520         if (!acrtc)
4521                 goto fail;
4522
4523         res = drm_crtc_init_with_planes(
4524                         dm->ddev,
4525                         &acrtc->base,
4526                         plane,
4527                         cursor_plane,
4528                         &amdgpu_dm_crtc_funcs, NULL);
4529
4530         if (res)
4531                 goto fail;
4532
4533         drm_crtc_helper_add(&acrtc->base, &amdgpu_dm_crtc_helper_funcs);
4534
4535         /* Create (reset) the plane state */
4536         if (acrtc->base.funcs->reset)
4537                 acrtc->base.funcs->reset(&acrtc->base);
4538
4539         acrtc->max_cursor_width = dm->adev->dm.dc->caps.max_cursor_size;
4540         acrtc->max_cursor_height = dm->adev->dm.dc->caps.max_cursor_size;
4541
4542         acrtc->crtc_id = crtc_index;
4543         acrtc->base.enabled = false;
4544         acrtc->otg_inst = -1;
4545
4546         dm->adev->mode_info.crtcs[crtc_index] = acrtc;
4547         drm_crtc_enable_color_mgmt(&acrtc->base, MAX_COLOR_LUT_ENTRIES,
4548                                    true, MAX_COLOR_LUT_ENTRIES);
4549         drm_mode_crtc_set_gamma_size(&acrtc->base, MAX_COLOR_LEGACY_LUT_ENTRIES);
4550
4551         return 0;
4552
4553 fail:
4554         kfree(acrtc);
4555         kfree(cursor_plane);
4556         return res;
4557 }
4558
4559
4560 static int to_drm_connector_type(enum signal_type st)
4561 {
4562         switch (st) {
4563         case SIGNAL_TYPE_HDMI_TYPE_A:
4564                 return DRM_MODE_CONNECTOR_HDMIA;
4565         case SIGNAL_TYPE_EDP:
4566                 return DRM_MODE_CONNECTOR_eDP;
4567         case SIGNAL_TYPE_LVDS:
4568                 return DRM_MODE_CONNECTOR_LVDS;
4569         case SIGNAL_TYPE_RGB:
4570                 return DRM_MODE_CONNECTOR_VGA;
4571         case SIGNAL_TYPE_DISPLAY_PORT:
4572         case SIGNAL_TYPE_DISPLAY_PORT_MST:
4573                 return DRM_MODE_CONNECTOR_DisplayPort;
4574         case SIGNAL_TYPE_DVI_DUAL_LINK:
4575         case SIGNAL_TYPE_DVI_SINGLE_LINK:
4576                 return DRM_MODE_CONNECTOR_DVID;
4577         case SIGNAL_TYPE_VIRTUAL:
4578                 return DRM_MODE_CONNECTOR_VIRTUAL;
4579
4580         default:
4581                 return DRM_MODE_CONNECTOR_Unknown;
4582         }
4583 }
4584
4585 static struct drm_encoder *amdgpu_dm_connector_to_encoder(struct drm_connector *connector)
4586 {
4587         return drm_encoder_find(connector->dev, NULL, connector->encoder_ids[0]);
4588 }
4589
4590 static void amdgpu_dm_get_native_mode(struct drm_connector *connector)
4591 {
4592         struct drm_encoder *encoder;
4593         struct amdgpu_encoder *amdgpu_encoder;
4594
4595         encoder = amdgpu_dm_connector_to_encoder(connector);
4596
4597         if (encoder == NULL)
4598                 return;
4599
4600         amdgpu_encoder = to_amdgpu_encoder(encoder);
4601
4602         amdgpu_encoder->native_mode.clock = 0;
4603
4604         if (!list_empty(&connector->probed_modes)) {
4605                 struct drm_display_mode *preferred_mode = NULL;
4606
4607                 list_for_each_entry(preferred_mode,
4608                                     &connector->probed_modes,
4609                                     head) {
4610                         if (preferred_mode->type & DRM_MODE_TYPE_PREFERRED)
4611                                 amdgpu_encoder->native_mode = *preferred_mode;
4612
4613                         break;
4614                 }
4615
4616         }
4617 }
4618
4619 static struct drm_display_mode *
4620 amdgpu_dm_create_common_mode(struct drm_encoder *encoder,
4621                              char *name,
4622                              int hdisplay, int vdisplay)
4623 {
4624         struct drm_device *dev = encoder->dev;
4625         struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
4626         struct drm_display_mode *mode = NULL;
4627         struct drm_display_mode *native_mode = &amdgpu_encoder->native_mode;
4628
4629         mode = drm_mode_duplicate(dev, native_mode);
4630
4631         if (mode == NULL)
4632                 return NULL;
4633
4634         mode->hdisplay = hdisplay;
4635         mode->vdisplay = vdisplay;
4636         mode->type &= ~DRM_MODE_TYPE_PREFERRED;
4637         strscpy(mode->name, name, DRM_DISPLAY_MODE_LEN);
4638
4639         return mode;
4640
4641 }
4642
4643 static void amdgpu_dm_connector_add_common_modes(struct drm_encoder *encoder,
4644                                                  struct drm_connector *connector)
4645 {
4646         struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
4647         struct drm_display_mode *mode = NULL;
4648         struct drm_display_mode *native_mode = &amdgpu_encoder->native_mode;
4649         struct amdgpu_dm_connector *amdgpu_dm_connector =
4650                                 to_amdgpu_dm_connector(connector);
4651         int i;
4652         int n;
4653         struct mode_size {
4654                 char name[DRM_DISPLAY_MODE_LEN];
4655                 int w;
4656                 int h;
4657         } common_modes[] = {
4658                 {  "640x480",  640,  480},
4659                 {  "800x600",  800,  600},
4660                 { "1024x768", 1024,  768},
4661                 { "1280x720", 1280,  720},
4662                 { "1280x800", 1280,  800},
4663                 {"1280x1024", 1280, 1024},
4664                 { "1440x900", 1440,  900},
4665                 {"1680x1050", 1680, 1050},
4666                 {"1600x1200", 1600, 1200},
4667                 {"1920x1080", 1920, 1080},
4668                 {"1920x1200", 1920, 1200}
4669         };
4670
4671         n = ARRAY_SIZE(common_modes);
4672
4673         for (i = 0; i < n; i++) {
4674                 struct drm_display_mode *curmode = NULL;
4675                 bool mode_existed = false;
4676
4677                 if (common_modes[i].w > native_mode->hdisplay ||
4678                     common_modes[i].h > native_mode->vdisplay ||
4679                    (common_modes[i].w == native_mode->hdisplay &&
4680                     common_modes[i].h == native_mode->vdisplay))
4681                         continue;
4682
4683                 list_for_each_entry(curmode, &connector->probed_modes, head) {
4684                         if (common_modes[i].w == curmode->hdisplay &&
4685                             common_modes[i].h == curmode->vdisplay) {
4686                                 mode_existed = true;
4687                                 break;
4688                         }
4689                 }
4690
4691                 if (mode_existed)
4692                         continue;
4693
4694                 mode = amdgpu_dm_create_common_mode(encoder,
4695                                 common_modes[i].name, common_modes[i].w,
4696                                 common_modes[i].h);
4697                 drm_mode_probed_add(connector, mode);
4698                 amdgpu_dm_connector->num_modes++;
4699         }
4700 }
4701
4702 static void amdgpu_dm_connector_ddc_get_modes(struct drm_connector *connector,
4703                                               struct edid *edid)
4704 {
4705         struct amdgpu_dm_connector *amdgpu_dm_connector =
4706                         to_amdgpu_dm_connector(connector);
4707
4708         if (edid) {
4709                 /* empty probed_modes */
4710                 INIT_LIST_HEAD(&connector->probed_modes);
4711                 amdgpu_dm_connector->num_modes =
4712                                 drm_add_edid_modes(connector, edid);
4713
4714                 amdgpu_dm_get_native_mode(connector);
4715         } else {
4716                 amdgpu_dm_connector->num_modes = 0;
4717         }
4718 }
4719
4720 static int amdgpu_dm_connector_get_modes(struct drm_connector *connector)
4721 {
4722         struct amdgpu_dm_connector *amdgpu_dm_connector =
4723                         to_amdgpu_dm_connector(connector);
4724         struct drm_encoder *encoder;
4725         struct edid *edid = amdgpu_dm_connector->edid;
4726
4727         encoder = amdgpu_dm_connector_to_encoder(connector);
4728
4729         if (!edid || !drm_edid_is_valid(edid)) {
4730                 amdgpu_dm_connector->num_modes =
4731                                 drm_add_modes_noedid(connector, 640, 480);
4732         } else {
4733                 amdgpu_dm_connector_ddc_get_modes(connector, edid);
4734                 amdgpu_dm_connector_add_common_modes(encoder, connector);
4735         }
4736         amdgpu_dm_fbc_init(connector);
4737
4738         return amdgpu_dm_connector->num_modes;
4739 }
4740
4741 void amdgpu_dm_connector_init_helper(struct amdgpu_display_manager *dm,
4742                                      struct amdgpu_dm_connector *aconnector,
4743                                      int connector_type,
4744                                      struct dc_link *link,
4745                                      int link_index)
4746 {
4747         struct amdgpu_device *adev = dm->ddev->dev_private;
4748
4749         aconnector->connector_id = link_index;
4750         aconnector->dc_link = link;
4751         aconnector->base.interlace_allowed = false;
4752         aconnector->base.doublescan_allowed = false;
4753         aconnector->base.stereo_allowed = false;
4754         aconnector->base.dpms = DRM_MODE_DPMS_OFF;
4755         aconnector->hpd.hpd = AMDGPU_HPD_NONE; /* not used */
4756         mutex_init(&aconnector->hpd_lock);
4757
4758         /*
4759          * configure support HPD hot plug connector_>polled default value is 0
4760          * which means HPD hot plug not supported
4761          */
4762         switch (connector_type) {
4763         case DRM_MODE_CONNECTOR_HDMIA:
4764                 aconnector->base.polled = DRM_CONNECTOR_POLL_HPD;
4765                 aconnector->base.ycbcr_420_allowed =
4766                         link->link_enc->features.hdmi_ycbcr420_supported ? true : false;
4767                 break;
4768         case DRM_MODE_CONNECTOR_DisplayPort:
4769                 aconnector->base.polled = DRM_CONNECTOR_POLL_HPD;
4770                 aconnector->base.ycbcr_420_allowed =
4771                         link->link_enc->features.dp_ycbcr420_supported ? true : false;
4772                 break;
4773         case DRM_MODE_CONNECTOR_DVID:
4774                 aconnector->base.polled = DRM_CONNECTOR_POLL_HPD;
4775                 break;
4776         default:
4777                 break;
4778         }
4779
4780         drm_object_attach_property(&aconnector->base.base,
4781                                 dm->ddev->mode_config.scaling_mode_property,
4782                                 DRM_MODE_SCALE_NONE);
4783
4784         drm_object_attach_property(&aconnector->base.base,
4785                                 adev->mode_info.underscan_property,
4786                                 UNDERSCAN_OFF);
4787         drm_object_attach_property(&aconnector->base.base,
4788                                 adev->mode_info.underscan_hborder_property,
4789                                 0);
4790         drm_object_attach_property(&aconnector->base.base,
4791                                 adev->mode_info.underscan_vborder_property,
4792                                 0);
4793         drm_object_attach_property(&aconnector->base.base,
4794                                 adev->mode_info.max_bpc_property,
4795                                 0);
4796
4797         if (connector_type == DRM_MODE_CONNECTOR_eDP &&
4798             dc_is_dmcu_initialized(adev->dm.dc)) {
4799                 drm_object_attach_property(&aconnector->base.base,
4800                                 adev->mode_info.abm_level_property, 0);
4801         }
4802
4803         if (connector_type == DRM_MODE_CONNECTOR_HDMIA ||
4804             connector_type == DRM_MODE_CONNECTOR_DisplayPort ||
4805             connector_type == DRM_MODE_CONNECTOR_eDP) {
4806                 drm_object_attach_property(
4807                         &aconnector->base.base,
4808                         dm->ddev->mode_config.hdr_output_metadata_property, 0);
4809
4810                 drm_connector_attach_vrr_capable_property(
4811                         &aconnector->base);
4812         }
4813 }
4814
4815 static int amdgpu_dm_i2c_xfer(struct i2c_adapter *i2c_adap,
4816                               struct i2c_msg *msgs, int num)
4817 {
4818         struct amdgpu_i2c_adapter *i2c = i2c_get_adapdata(i2c_adap);
4819         struct ddc_service *ddc_service = i2c->ddc_service;
4820         struct i2c_command cmd;
4821         int i;
4822         int result = -EIO;
4823
4824         cmd.payloads = kcalloc(num, sizeof(struct i2c_payload), GFP_KERNEL);
4825
4826         if (!cmd.payloads)
4827                 return result;
4828
4829         cmd.number_of_payloads = num;
4830         cmd.engine = I2C_COMMAND_ENGINE_DEFAULT;
4831         cmd.speed = 100;
4832
4833         for (i = 0; i < num; i++) {
4834                 cmd.payloads[i].write = !(msgs[i].flags & I2C_M_RD);
4835                 cmd.payloads[i].address = msgs[i].addr;
4836                 cmd.payloads[i].length = msgs[i].len;
4837                 cmd.payloads[i].data = msgs[i].buf;
4838         }
4839
4840         if (dc_submit_i2c(
4841                         ddc_service->ctx->dc,
4842                         ddc_service->ddc_pin->hw_info.ddc_channel,
4843                         &cmd))
4844                 result = num;
4845
4846         kfree(cmd.payloads);
4847         return result;
4848 }
4849
4850 static u32 amdgpu_dm_i2c_func(struct i2c_adapter *adap)
4851 {
4852         return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL;
4853 }
4854
4855 static const struct i2c_algorithm amdgpu_dm_i2c_algo = {
4856         .master_xfer = amdgpu_dm_i2c_xfer,
4857         .functionality = amdgpu_dm_i2c_func,
4858 };
4859
4860 static struct amdgpu_i2c_adapter *
4861 create_i2c(struct ddc_service *ddc_service,
4862            int link_index,
4863            int *res)
4864 {
4865         struct amdgpu_device *adev = ddc_service->ctx->driver_context;
4866         struct amdgpu_i2c_adapter *i2c;
4867
4868         i2c = kzalloc(sizeof(struct amdgpu_i2c_adapter), GFP_KERNEL);
4869         if (!i2c)
4870                 return NULL;
4871         i2c->base.owner = THIS_MODULE;
4872         i2c->base.class = I2C_CLASS_DDC;
4873         i2c->base.dev.parent = &adev->pdev->dev;
4874         i2c->base.algo = &amdgpu_dm_i2c_algo;
4875         snprintf(i2c->base.name, sizeof(i2c->base.name), "AMDGPU DM i2c hw bus %d", link_index);
4876         i2c_set_adapdata(&i2c->base, i2c);
4877         i2c->ddc_service = ddc_service;
4878         i2c->ddc_service->ddc_pin->hw_info.ddc_channel = link_index;
4879
4880         return i2c;
4881 }
4882
4883
4884 /*
4885  * Note: this function assumes that dc_link_detect() was called for the
4886  * dc_link which will be represented by this aconnector.
4887  */
4888 static int amdgpu_dm_connector_init(struct amdgpu_display_manager *dm,
4889                                     struct amdgpu_dm_connector *aconnector,
4890                                     uint32_t link_index,
4891                                     struct amdgpu_encoder *aencoder)
4892 {
4893         int res = 0;
4894         int connector_type;
4895         struct dc *dc = dm->dc;
4896         struct dc_link *link = dc_get_link_at_index(dc, link_index);
4897         struct amdgpu_i2c_adapter *i2c;
4898
4899         link->priv = aconnector;
4900
4901         DRM_DEBUG_DRIVER("%s()\n", __func__);
4902
4903         i2c = create_i2c(link->ddc, link->link_index, &res);
4904         if (!i2c) {
4905                 DRM_ERROR("Failed to create i2c adapter data\n");
4906                 return -ENOMEM;
4907         }
4908
4909         aconnector->i2c = i2c;
4910         res = i2c_add_adapter(&i2c->base);
4911
4912         if (res) {
4913                 DRM_ERROR("Failed to register hw i2c %d\n", link->link_index);
4914                 goto out_free;
4915         }
4916
4917         connector_type = to_drm_connector_type(link->connector_signal);
4918
4919         res = drm_connector_init(
4920                         dm->ddev,
4921                         &aconnector->base,
4922                         &amdgpu_dm_connector_funcs,
4923                         connector_type);
4924
4925         if (res) {
4926                 DRM_ERROR("connector_init failed\n");
4927                 aconnector->connector_id = -1;
4928                 goto out_free;
4929         }
4930
4931         drm_connector_helper_add(
4932                         &aconnector->base,
4933                         &amdgpu_dm_connector_helper_funcs);
4934
4935         if (aconnector->base.funcs->reset)
4936                 aconnector->base.funcs->reset(&aconnector->base);
4937
4938         amdgpu_dm_connector_init_helper(
4939                 dm,
4940                 aconnector,
4941                 connector_type,
4942                 link,
4943                 link_index);
4944
4945         drm_connector_attach_encoder(
4946                 &aconnector->base, &aencoder->base);
4947
4948         drm_connector_register(&aconnector->base);
4949 #if defined(CONFIG_DEBUG_FS)
4950         res = connector_debugfs_init(aconnector);
4951         if (res) {
4952                 DRM_ERROR("Failed to create debugfs for connector");
4953                 goto out_free;
4954         }
4955         aconnector->debugfs_dpcd_address = 0;
4956         aconnector->debugfs_dpcd_size = 0;
4957 #endif
4958
4959         if (connector_type == DRM_MODE_CONNECTOR_DisplayPort
4960                 || connector_type == DRM_MODE_CONNECTOR_eDP)
4961                 amdgpu_dm_initialize_dp_connector(dm, aconnector);
4962
4963 out_free:
4964         if (res) {
4965                 kfree(i2c);
4966                 aconnector->i2c = NULL;
4967         }
4968         return res;
4969 }
4970
4971 int amdgpu_dm_get_encoder_crtc_mask(struct amdgpu_device *adev)
4972 {
4973         switch (adev->mode_info.num_crtc) {
4974         case 1:
4975                 return 0x1;
4976         case 2:
4977                 return 0x3;
4978         case 3:
4979                 return 0x7;
4980         case 4:
4981                 return 0xf;
4982         case 5:
4983                 return 0x1f;
4984         case 6:
4985         default:
4986                 return 0x3f;
4987         }
4988 }
4989
4990 static int amdgpu_dm_encoder_init(struct drm_device *dev,
4991                                   struct amdgpu_encoder *aencoder,
4992                                   uint32_t link_index)
4993 {
4994         struct amdgpu_device *adev = dev->dev_private;
4995
4996         int res = drm_encoder_init(dev,
4997                                    &aencoder->base,
4998                                    &amdgpu_dm_encoder_funcs,
4999                                    DRM_MODE_ENCODER_TMDS,
5000                                    NULL);
5001
5002         aencoder->base.possible_crtcs = amdgpu_dm_get_encoder_crtc_mask(adev);
5003
5004         if (!res)
5005                 aencoder->encoder_id = link_index;
5006         else
5007                 aencoder->encoder_id = -1;
5008
5009         drm_encoder_helper_add(&aencoder->base, &amdgpu_dm_encoder_helper_funcs);
5010
5011         return res;
5012 }
5013
5014 static void manage_dm_interrupts(struct amdgpu_device *adev,
5015                                  struct amdgpu_crtc *acrtc,
5016                                  bool enable)
5017 {
5018         /*
5019          * this is not correct translation but will work as soon as VBLANK
5020          * constant is the same as PFLIP
5021          */
5022         int irq_type =
5023                 amdgpu_display_crtc_idx_to_irq_type(
5024                         adev,
5025                         acrtc->crtc_id);
5026
5027         if (enable) {
5028                 drm_crtc_vblank_on(&acrtc->base);
5029                 amdgpu_irq_get(
5030                         adev,
5031                         &adev->pageflip_irq,
5032                         irq_type);
5033         } else {
5034
5035                 amdgpu_irq_put(
5036                         adev,
5037                         &adev->pageflip_irq,
5038                         irq_type);
5039                 drm_crtc_vblank_off(&acrtc->base);
5040         }
5041 }
5042
5043 static bool
5044 is_scaling_state_different(const struct dm_connector_state *dm_state,
5045                            const struct dm_connector_state *old_dm_state)
5046 {
5047         if (dm_state->scaling != old_dm_state->scaling)
5048                 return true;
5049         if (!dm_state->underscan_enable && old_dm_state->underscan_enable) {
5050                 if (old_dm_state->underscan_hborder != 0 && old_dm_state->underscan_vborder != 0)
5051                         return true;
5052         } else  if (dm_state->underscan_enable && !old_dm_state->underscan_enable) {
5053                 if (dm_state->underscan_hborder != 0 && dm_state->underscan_vborder != 0)
5054                         return true;
5055         } else if (dm_state->underscan_hborder != old_dm_state->underscan_hborder ||
5056                    dm_state->underscan_vborder != old_dm_state->underscan_vborder)
5057                 return true;
5058         return false;
5059 }
5060
5061 static void remove_stream(struct amdgpu_device *adev,
5062                           struct amdgpu_crtc *acrtc,
5063                           struct dc_stream_state *stream)
5064 {
5065         /* this is the update mode case */
5066
5067         acrtc->otg_inst = -1;
5068         acrtc->enabled = false;
5069 }
5070
5071 static int get_cursor_position(struct drm_plane *plane, struct drm_crtc *crtc,
5072                                struct dc_cursor_position *position)
5073 {
5074         struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
5075         int x, y;
5076         int xorigin = 0, yorigin = 0;
5077
5078         if (!crtc || !plane->state->fb) {
5079                 position->enable = false;
5080                 position->x = 0;
5081                 position->y = 0;
5082                 return 0;
5083         }
5084
5085         if ((plane->state->crtc_w > amdgpu_crtc->max_cursor_width) ||
5086             (plane->state->crtc_h > amdgpu_crtc->max_cursor_height)) {
5087                 DRM_ERROR("%s: bad cursor width or height %d x %d\n",
5088                           __func__,
5089                           plane->state->crtc_w,
5090                           plane->state->crtc_h);
5091                 return -EINVAL;
5092         }
5093
5094         x = plane->state->crtc_x;
5095         y = plane->state->crtc_y;
5096
5097         if (crtc->primary->state) {
5098                 /* avivo cursor are offset into the total surface */
5099                 x += crtc->primary->state->src_x >> 16;
5100                 y += crtc->primary->state->src_y >> 16;
5101         }
5102
5103         if (x < 0) {
5104                 xorigin = min(-x, amdgpu_crtc->max_cursor_width - 1);
5105                 x = 0;
5106         }
5107         if (y < 0) {
5108                 yorigin = min(-y, amdgpu_crtc->max_cursor_height - 1);
5109                 y = 0;
5110         }
5111         position->enable = true;
5112         position->x = x;
5113         position->y = y;
5114         position->x_hotspot = xorigin;
5115         position->y_hotspot = yorigin;
5116
5117         return 0;
5118 }
5119
5120 static void handle_cursor_update(struct drm_plane *plane,
5121                                  struct drm_plane_state *old_plane_state)
5122 {
5123         struct amdgpu_device *adev = plane->dev->dev_private;
5124         struct amdgpu_framebuffer *afb = to_amdgpu_framebuffer(plane->state->fb);
5125         struct drm_crtc *crtc = afb ? plane->state->crtc : old_plane_state->crtc;
5126         struct dm_crtc_state *crtc_state = crtc ? to_dm_crtc_state(crtc->state) : NULL;
5127         struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
5128         uint64_t address = afb ? afb->address : 0;
5129         struct dc_cursor_position position;
5130         struct dc_cursor_attributes attributes;
5131         int ret;
5132
5133         if (!plane->state->fb && !old_plane_state->fb)
5134                 return;
5135
5136         DRM_DEBUG_DRIVER("%s: crtc_id=%d with size %d to %d\n",
5137                          __func__,
5138                          amdgpu_crtc->crtc_id,
5139                          plane->state->crtc_w,
5140                          plane->state->crtc_h);
5141
5142         ret = get_cursor_position(plane, crtc, &position);
5143         if (ret)
5144                 return;
5145
5146         if (!position.enable) {
5147                 /* turn off cursor */
5148                 if (crtc_state && crtc_state->stream) {
5149                         mutex_lock(&adev->dm.dc_lock);
5150                         dc_stream_set_cursor_position(crtc_state->stream,
5151                                                       &position);
5152                         mutex_unlock(&adev->dm.dc_lock);
5153                 }
5154                 return;
5155         }
5156
5157         amdgpu_crtc->cursor_width = plane->state->crtc_w;
5158         amdgpu_crtc->cursor_height = plane->state->crtc_h;
5159
5160         memset(&attributes, 0, sizeof(attributes));
5161         attributes.address.high_part = upper_32_bits(address);
5162         attributes.address.low_part  = lower_32_bits(address);
5163         attributes.width             = plane->state->crtc_w;
5164         attributes.height            = plane->state->crtc_h;
5165         attributes.color_format      = CURSOR_MODE_COLOR_PRE_MULTIPLIED_ALPHA;
5166         attributes.rotation_angle    = 0;
5167         attributes.attribute_flags.value = 0;
5168
5169         attributes.pitch = attributes.width;
5170
5171         if (crtc_state->stream) {
5172                 mutex_lock(&adev->dm.dc_lock);
5173                 if (!dc_stream_set_cursor_attributes(crtc_state->stream,
5174                                                          &attributes))
5175                         DRM_ERROR("DC failed to set cursor attributes\n");
5176
5177                 if (!dc_stream_set_cursor_position(crtc_state->stream,
5178                                                    &position))
5179                         DRM_ERROR("DC failed to set cursor position\n");
5180                 mutex_unlock(&adev->dm.dc_lock);
5181         }
5182 }
5183
5184 static void prepare_flip_isr(struct amdgpu_crtc *acrtc)
5185 {
5186
5187         assert_spin_locked(&acrtc->base.dev->event_lock);
5188         WARN_ON(acrtc->event);
5189
5190         acrtc->event = acrtc->base.state->event;
5191
5192         /* Set the flip status */
5193         acrtc->pflip_status = AMDGPU_FLIP_SUBMITTED;
5194
5195         /* Mark this event as consumed */
5196         acrtc->base.state->event = NULL;
5197
5198         DRM_DEBUG_DRIVER("crtc:%d, pflip_stat:AMDGPU_FLIP_SUBMITTED\n",
5199                                                  acrtc->crtc_id);
5200 }
5201
5202 static void update_freesync_state_on_stream(
5203         struct amdgpu_display_manager *dm,
5204         struct dm_crtc_state *new_crtc_state,
5205         struct dc_stream_state *new_stream,
5206         struct dc_plane_state *surface,
5207         u32 flip_timestamp_in_us)
5208 {
5209         struct mod_vrr_params vrr_params;
5210         struct dc_info_packet vrr_infopacket = {0};
5211         struct amdgpu_device *adev = dm->adev;
5212         unsigned long flags;
5213
5214         if (!new_stream)
5215                 return;
5216
5217         /*
5218          * TODO: Determine why min/max totals and vrefresh can be 0 here.
5219          * For now it's sufficient to just guard against these conditions.
5220          */
5221
5222         if (!new_stream->timing.h_total || !new_stream->timing.v_total)
5223                 return;
5224
5225         spin_lock_irqsave(&adev->ddev->event_lock, flags);
5226         vrr_params = new_crtc_state->vrr_params;
5227
5228         if (surface) {
5229                 mod_freesync_handle_preflip(
5230                         dm->freesync_module,
5231                         surface,
5232                         new_stream,
5233                         flip_timestamp_in_us,
5234                         &vrr_params);
5235
5236                 if (adev->family < AMDGPU_FAMILY_AI &&
5237                     amdgpu_dm_vrr_active(new_crtc_state)) {
5238                         mod_freesync_handle_v_update(dm->freesync_module,
5239                                                      new_stream, &vrr_params);
5240                 }
5241         }
5242
5243         mod_freesync_build_vrr_infopacket(
5244                 dm->freesync_module,
5245                 new_stream,
5246                 &vrr_params,
5247                 PACKET_TYPE_VRR,
5248                 TRANSFER_FUNC_UNKNOWN,
5249                 &vrr_infopacket);
5250
5251         new_crtc_state->freesync_timing_changed |=
5252                 (memcmp(&new_crtc_state->vrr_params.adjust,
5253                         &vrr_params.adjust,
5254                         sizeof(vrr_params.adjust)) != 0);
5255
5256         new_crtc_state->freesync_vrr_info_changed |=
5257                 (memcmp(&new_crtc_state->vrr_infopacket,
5258                         &vrr_infopacket,
5259                         sizeof(vrr_infopacket)) != 0);
5260
5261         new_crtc_state->vrr_params = vrr_params;
5262         new_crtc_state->vrr_infopacket = vrr_infopacket;
5263
5264         new_stream->adjust = new_crtc_state->vrr_params.adjust;
5265         new_stream->vrr_infopacket = vrr_infopacket;
5266
5267         if (new_crtc_state->freesync_vrr_info_changed)
5268                 DRM_DEBUG_KMS("VRR packet update: crtc=%u enabled=%d state=%d",
5269                               new_crtc_state->base.crtc->base.id,
5270                               (int)new_crtc_state->base.vrr_enabled,
5271                               (int)vrr_params.state);
5272
5273         spin_unlock_irqrestore(&adev->ddev->event_lock, flags);
5274 }
5275
5276 static void pre_update_freesync_state_on_stream(
5277         struct amdgpu_display_manager *dm,
5278         struct dm_crtc_state *new_crtc_state)
5279 {
5280         struct dc_stream_state *new_stream = new_crtc_state->stream;
5281         struct mod_vrr_params vrr_params;
5282         struct mod_freesync_config config = new_crtc_state->freesync_config;
5283         struct amdgpu_device *adev = dm->adev;
5284         unsigned long flags;
5285
5286         if (!new_stream)
5287                 return;
5288
5289         /*
5290          * TODO: Determine why min/max totals and vrefresh can be 0 here.
5291          * For now it's sufficient to just guard against these conditions.
5292          */
5293         if (!new_stream->timing.h_total || !new_stream->timing.v_total)
5294                 return;
5295
5296         spin_lock_irqsave(&adev->ddev->event_lock, flags);
5297         vrr_params = new_crtc_state->vrr_params;
5298
5299         if (new_crtc_state->vrr_supported &&
5300             config.min_refresh_in_uhz &&
5301             config.max_refresh_in_uhz) {
5302                 config.state = new_crtc_state->base.vrr_enabled ?
5303                         VRR_STATE_ACTIVE_VARIABLE :
5304                         VRR_STATE_INACTIVE;
5305         } else {
5306                 config.state = VRR_STATE_UNSUPPORTED;
5307         }
5308
5309         mod_freesync_build_vrr_params(dm->freesync_module,
5310                                       new_stream,
5311                                       &config, &vrr_params);
5312
5313         new_crtc_state->freesync_timing_changed |=
5314                 (memcmp(&new_crtc_state->vrr_params.adjust,
5315                         &vrr_params.adjust,
5316                         sizeof(vrr_params.adjust)) != 0);
5317
5318         new_crtc_state->vrr_params = vrr_params;
5319         spin_unlock_irqrestore(&adev->ddev->event_lock, flags);
5320 }
5321
5322 static void amdgpu_dm_handle_vrr_transition(struct dm_crtc_state *old_state,
5323                                             struct dm_crtc_state *new_state)
5324 {
5325         bool old_vrr_active = amdgpu_dm_vrr_active(old_state);
5326         bool new_vrr_active = amdgpu_dm_vrr_active(new_state);
5327
5328         if (!old_vrr_active && new_vrr_active) {
5329                 /* Transition VRR inactive -> active:
5330                  * While VRR is active, we must not disable vblank irq, as a
5331                  * reenable after disable would compute bogus vblank/pflip
5332                  * timestamps if it likely happened inside display front-porch.
5333                  *
5334                  * We also need vupdate irq for the actual core vblank handling
5335                  * at end of vblank.
5336                  */
5337                 dm_set_vupdate_irq(new_state->base.crtc, true);
5338                 drm_crtc_vblank_get(new_state->base.crtc);
5339                 DRM_DEBUG_DRIVER("%s: crtc=%u VRR off->on: Get vblank ref\n",
5340                                  __func__, new_state->base.crtc->base.id);
5341         } else if (old_vrr_active && !new_vrr_active) {
5342                 /* Transition VRR active -> inactive:
5343                  * Allow vblank irq disable again for fixed refresh rate.
5344                  */
5345                 dm_set_vupdate_irq(new_state->base.crtc, false);
5346                 drm_crtc_vblank_put(new_state->base.crtc);
5347                 DRM_DEBUG_DRIVER("%s: crtc=%u VRR on->off: Drop vblank ref\n",
5348                                  __func__, new_state->base.crtc->base.id);
5349         }
5350 }
5351
5352 static void amdgpu_dm_commit_cursors(struct drm_atomic_state *state)
5353 {
5354         struct drm_plane *plane;
5355         struct drm_plane_state *old_plane_state, *new_plane_state;
5356         int i;
5357
5358         /*
5359          * TODO: Make this per-stream so we don't issue redundant updates for
5360          * commits with multiple streams.
5361          */
5362         for_each_oldnew_plane_in_state(state, plane, old_plane_state,
5363                                        new_plane_state, i)
5364                 if (plane->type == DRM_PLANE_TYPE_CURSOR)
5365                         handle_cursor_update(plane, old_plane_state);
5366 }
5367
5368 static void amdgpu_dm_commit_planes(struct drm_atomic_state *state,
5369                                     struct dc_state *dc_state,
5370                                     struct drm_device *dev,
5371                                     struct amdgpu_display_manager *dm,
5372                                     struct drm_crtc *pcrtc,
5373                                     bool wait_for_vblank)
5374 {
5375         uint32_t i;
5376         uint64_t timestamp_ns;
5377         struct drm_plane *plane;
5378         struct drm_plane_state *old_plane_state, *new_plane_state;
5379         struct amdgpu_crtc *acrtc_attach = to_amdgpu_crtc(pcrtc);
5380         struct drm_crtc_state *new_pcrtc_state =
5381                         drm_atomic_get_new_crtc_state(state, pcrtc);
5382         struct dm_crtc_state *acrtc_state = to_dm_crtc_state(new_pcrtc_state);
5383         struct dm_crtc_state *dm_old_crtc_state =
5384                         to_dm_crtc_state(drm_atomic_get_old_crtc_state(state, pcrtc));
5385         int planes_count = 0, vpos, hpos;
5386         long r;
5387         unsigned long flags;
5388         struct amdgpu_bo *abo;
5389         uint64_t tiling_flags;
5390         uint32_t target_vblank, last_flip_vblank;
5391         bool vrr_active = amdgpu_dm_vrr_active(acrtc_state);
5392         bool pflip_present = false;
5393         struct {
5394                 struct dc_surface_update surface_updates[MAX_SURFACES];
5395                 struct dc_plane_info plane_infos[MAX_SURFACES];
5396                 struct dc_scaling_info scaling_infos[MAX_SURFACES];
5397                 struct dc_flip_addrs flip_addrs[MAX_SURFACES];
5398                 struct dc_stream_update stream_update;
5399         } *bundle;
5400
5401         bundle = kzalloc(sizeof(*bundle), GFP_KERNEL);
5402
5403         if (!bundle) {
5404                 dm_error("Failed to allocate update bundle\n");
5405                 goto cleanup;
5406         }
5407
5408         /*
5409          * Disable the cursor first if we're disabling all the planes.
5410          * It'll remain on the screen after the planes are re-enabled
5411          * if we don't.
5412          */
5413         if (acrtc_state->active_planes == 0)
5414                 amdgpu_dm_commit_cursors(state);
5415
5416         /* update planes when needed */
5417         for_each_oldnew_plane_in_state(state, plane, old_plane_state, new_plane_state, i) {
5418                 struct drm_crtc *crtc = new_plane_state->crtc;
5419                 struct drm_crtc_state *new_crtc_state;
5420                 struct drm_framebuffer *fb = new_plane_state->fb;
5421                 bool plane_needs_flip;
5422                 struct dc_plane_state *dc_plane;
5423                 struct dm_plane_state *dm_new_plane_state = to_dm_plane_state(new_plane_state);
5424
5425                 /* Cursor plane is handled after stream updates */
5426                 if (plane->type == DRM_PLANE_TYPE_CURSOR)
5427                         continue;
5428
5429                 if (!fb || !crtc || pcrtc != crtc)
5430                         continue;
5431
5432                 new_crtc_state = drm_atomic_get_new_crtc_state(state, crtc);
5433                 if (!new_crtc_state->active)
5434                         continue;
5435
5436                 dc_plane = dm_new_plane_state->dc_state;
5437
5438                 bundle->surface_updates[planes_count].surface = dc_plane;
5439                 if (new_pcrtc_state->color_mgmt_changed) {
5440                         bundle->surface_updates[planes_count].gamma = dc_plane->gamma_correction;
5441                         bundle->surface_updates[planes_count].in_transfer_func = dc_plane->in_transfer_func;
5442                 }
5443
5444                 fill_dc_scaling_info(new_plane_state,
5445                                      &bundle->scaling_infos[planes_count]);
5446
5447                 bundle->surface_updates[planes_count].scaling_info =
5448                         &bundle->scaling_infos[planes_count];
5449
5450                 plane_needs_flip = old_plane_state->fb && new_plane_state->fb;
5451
5452                 pflip_present = pflip_present || plane_needs_flip;
5453
5454                 if (!plane_needs_flip) {
5455                         planes_count += 1;
5456                         continue;
5457                 }
5458
5459                 abo = gem_to_amdgpu_bo(fb->obj[0]);
5460
5461                 /*
5462                  * Wait for all fences on this FB. Do limited wait to avoid
5463                  * deadlock during GPU reset when this fence will not signal
5464                  * but we hold reservation lock for the BO.
5465                  */
5466                 r = reservation_object_wait_timeout_rcu(abo->tbo.resv, true,
5467                                                         false,
5468                                                         msecs_to_jiffies(5000));
5469                 if (unlikely(r <= 0))
5470                         DRM_ERROR("Waiting for fences timed out or interrupted!");
5471
5472                 /*
5473                  * TODO This might fail and hence better not used, wait
5474                  * explicitly on fences instead
5475                  * and in general should be called for
5476                  * blocking commit to as per framework helpers
5477                  */
5478                 r = amdgpu_bo_reserve(abo, true);
5479                 if (unlikely(r != 0))
5480                         DRM_ERROR("failed to reserve buffer before flip\n");
5481
5482                 amdgpu_bo_get_tiling_flags(abo, &tiling_flags);
5483
5484                 amdgpu_bo_unreserve(abo);
5485
5486                 fill_dc_plane_info_and_addr(
5487                         dm->adev, new_plane_state, tiling_flags,
5488                         &bundle->plane_infos[planes_count],
5489                         &bundle->flip_addrs[planes_count].address);
5490
5491                 bundle->surface_updates[planes_count].plane_info =
5492                         &bundle->plane_infos[planes_count];
5493
5494                 bundle->flip_addrs[planes_count].flip_immediate =
5495                                 (crtc->state->pageflip_flags & DRM_MODE_PAGE_FLIP_ASYNC) != 0;
5496
5497                 timestamp_ns = ktime_get_ns();
5498                 bundle->flip_addrs[planes_count].flip_timestamp_in_us = div_u64(timestamp_ns, 1000);
5499                 bundle->surface_updates[planes_count].flip_addr = &bundle->flip_addrs[planes_count];
5500                 bundle->surface_updates[planes_count].surface = dc_plane;
5501
5502                 if (!bundle->surface_updates[planes_count].surface) {
5503                         DRM_ERROR("No surface for CRTC: id=%d\n",
5504                                         acrtc_attach->crtc_id);
5505                         continue;
5506                 }
5507
5508                 if (plane == pcrtc->primary)
5509                         update_freesync_state_on_stream(
5510                                 dm,
5511                                 acrtc_state,
5512                                 acrtc_state->stream,
5513                                 dc_plane,
5514                                 bundle->flip_addrs[planes_count].flip_timestamp_in_us);
5515
5516                 DRM_DEBUG_DRIVER("%s Flipping to hi: 0x%x, low: 0x%x\n",
5517                                  __func__,
5518                                  bundle->flip_addrs[planes_count].address.grph.addr.high_part,
5519                                  bundle->flip_addrs[planes_count].address.grph.addr.low_part);
5520
5521                 planes_count += 1;
5522
5523         }
5524
5525         if (pflip_present) {
5526                 if (!vrr_active) {
5527                         /* Use old throttling in non-vrr fixed refresh rate mode
5528                          * to keep flip scheduling based on target vblank counts
5529                          * working in a backwards compatible way, e.g., for
5530                          * clients using the GLX_OML_sync_control extension or
5531                          * DRI3/Present extension with defined target_msc.
5532                          */
5533                         last_flip_vblank = amdgpu_get_vblank_counter_kms(dm->ddev, acrtc_attach->crtc_id);
5534                 }
5535                 else {
5536                         /* For variable refresh rate mode only:
5537                          * Get vblank of last completed flip to avoid > 1 vrr
5538                          * flips per video frame by use of throttling, but allow
5539                          * flip programming anywhere in the possibly large
5540                          * variable vrr vblank interval for fine-grained flip
5541                          * timing control and more opportunity to avoid stutter
5542                          * on late submission of flips.
5543                          */
5544                         spin_lock_irqsave(&pcrtc->dev->event_lock, flags);
5545                         last_flip_vblank = acrtc_attach->last_flip_vblank;
5546                         spin_unlock_irqrestore(&pcrtc->dev->event_lock, flags);
5547                 }
5548
5549                 target_vblank = last_flip_vblank + wait_for_vblank;
5550
5551                 /*
5552                  * Wait until we're out of the vertical blank period before the one
5553                  * targeted by the flip
5554                  */
5555                 while ((acrtc_attach->enabled &&
5556                         (amdgpu_display_get_crtc_scanoutpos(dm->ddev, acrtc_attach->crtc_id,
5557                                                             0, &vpos, &hpos, NULL,
5558                                                             NULL, &pcrtc->hwmode)
5559                          & (DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_IN_VBLANK)) ==
5560                         (DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_IN_VBLANK) &&
5561                         (int)(target_vblank -
5562                           amdgpu_get_vblank_counter_kms(dm->ddev, acrtc_attach->crtc_id)) > 0)) {
5563                         usleep_range(1000, 1100);
5564                 }
5565
5566                 if (acrtc_attach->base.state->event) {
5567                         drm_crtc_vblank_get(pcrtc);
5568
5569                         spin_lock_irqsave(&pcrtc->dev->event_lock, flags);
5570
5571                         WARN_ON(acrtc_attach->pflip_status != AMDGPU_FLIP_NONE);
5572                         prepare_flip_isr(acrtc_attach);
5573
5574                         spin_unlock_irqrestore(&pcrtc->dev->event_lock, flags);
5575                 }
5576
5577                 if (acrtc_state->stream) {
5578
5579                         if (acrtc_state->freesync_timing_changed)
5580                                 bundle->stream_update.adjust =
5581                                         &acrtc_state->stream->adjust;
5582
5583                         if (acrtc_state->freesync_vrr_info_changed)
5584                                 bundle->stream_update.vrr_infopacket =
5585                                         &acrtc_state->stream->vrr_infopacket;
5586                 }
5587         }
5588
5589         /* Update the planes if changed or disable if we don't have any. */
5590         if (planes_count || acrtc_state->active_planes == 0) {
5591                 if (new_pcrtc_state->mode_changed) {
5592                         bundle->stream_update.src = acrtc_state->stream->src;
5593                         bundle->stream_update.dst = acrtc_state->stream->dst;
5594                 }
5595
5596                 if (new_pcrtc_state->color_mgmt_changed)
5597                         bundle->stream_update.out_transfer_func = acrtc_state->stream->out_transfer_func;
5598
5599                 acrtc_state->stream->abm_level = acrtc_state->abm_level;
5600                 if (acrtc_state->abm_level != dm_old_crtc_state->abm_level)
5601                         bundle->stream_update.abm_level = &acrtc_state->abm_level;
5602
5603                 mutex_lock(&dm->dc_lock);
5604                 dc_commit_updates_for_stream(dm->dc,
5605                                                      bundle->surface_updates,
5606                                                      planes_count,
5607                                                      acrtc_state->stream,
5608                                                      &bundle->stream_update,
5609                                                      dc_state);
5610                 mutex_unlock(&dm->dc_lock);
5611         }
5612
5613         /*
5614          * Update cursor state *after* programming all the planes.
5615          * This avoids redundant programming in the case where we're going
5616          * to be disabling a single plane - those pipes are being disabled.
5617          */
5618         if (acrtc_state->active_planes)
5619                 amdgpu_dm_commit_cursors(state);
5620
5621 cleanup:
5622         kfree(bundle);
5623 }
5624
5625 /*
5626  * Enable interrupts on CRTCs that are newly active, undergone
5627  * a modeset, or have active planes again.
5628  *
5629  * Done in two passes, based on the for_modeset flag:
5630  * Pass 1: For CRTCs going through modeset
5631  * Pass 2: For CRTCs going from 0 to n active planes
5632  *
5633  * Interrupts can only be enabled after the planes are programmed,
5634  * so this requires a two-pass approach since we don't want to
5635  * just defer the interrupts until after commit planes every time.
5636  */
5637 static void amdgpu_dm_enable_crtc_interrupts(struct drm_device *dev,
5638                                              struct drm_atomic_state *state,
5639                                              bool for_modeset)
5640 {
5641         struct amdgpu_device *adev = dev->dev_private;
5642         struct drm_crtc *crtc;
5643         struct drm_crtc_state *old_crtc_state, *new_crtc_state;
5644         int i;
5645
5646         for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state,
5647                                       new_crtc_state, i) {
5648                 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
5649                 struct dm_crtc_state *dm_new_crtc_state =
5650                         to_dm_crtc_state(new_crtc_state);
5651                 struct dm_crtc_state *dm_old_crtc_state =
5652                         to_dm_crtc_state(old_crtc_state);
5653                 bool modeset = drm_atomic_crtc_needs_modeset(new_crtc_state);
5654                 bool run_pass;
5655
5656                 run_pass = (for_modeset && modeset) ||
5657                            (!for_modeset && !modeset &&
5658                             !dm_old_crtc_state->interrupts_enabled);
5659
5660                 if (!run_pass)
5661                         continue;
5662
5663                 if (!dm_new_crtc_state->interrupts_enabled)
5664                         continue;
5665
5666                 manage_dm_interrupts(adev, acrtc, true);
5667
5668 #ifdef CONFIG_DEBUG_FS
5669                 /* The stream has changed so CRC capture needs to re-enabled. */
5670                 if (dm_new_crtc_state->crc_enabled) {
5671                         dm_new_crtc_state->crc_enabled = false;
5672                         amdgpu_dm_crtc_set_crc_source(crtc, "auto");
5673                 }
5674 #endif
5675         }
5676 }
5677
5678 /*
5679  * amdgpu_dm_crtc_copy_transient_flags - copy mirrored flags from DRM to DC
5680  * @crtc_state: the DRM CRTC state
5681  * @stream_state: the DC stream state.
5682  *
5683  * Copy the mirrored transient state flags from DRM, to DC. It is used to bring
5684  * a dc_stream_state's flags in sync with a drm_crtc_state's flags.
5685  */
5686 static void amdgpu_dm_crtc_copy_transient_flags(struct drm_crtc_state *crtc_state,
5687                                                 struct dc_stream_state *stream_state)
5688 {
5689         stream_state->mode_changed = drm_atomic_crtc_needs_modeset(crtc_state);
5690 }
5691
5692 static int amdgpu_dm_atomic_commit(struct drm_device *dev,
5693                                    struct drm_atomic_state *state,
5694                                    bool nonblock)
5695 {
5696         struct drm_crtc *crtc;
5697         struct drm_crtc_state *old_crtc_state, *new_crtc_state;
5698         struct amdgpu_device *adev = dev->dev_private;
5699         int i;
5700
5701         /*
5702          * We evade vblank and pflip interrupts on CRTCs that are undergoing
5703          * a modeset, being disabled, or have no active planes.
5704          *
5705          * It's done in atomic commit rather than commit tail for now since
5706          * some of these interrupt handlers access the current CRTC state and
5707          * potentially the stream pointer itself.
5708          *
5709          * Since the atomic state is swapped within atomic commit and not within
5710          * commit tail this would leave to new state (that hasn't been committed yet)
5711          * being accesssed from within the handlers.
5712          *
5713          * TODO: Fix this so we can do this in commit tail and not have to block
5714          * in atomic check.
5715          */
5716         for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
5717                 struct dm_crtc_state *dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
5718                 struct dm_crtc_state *dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
5719                 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
5720
5721                 if (dm_old_crtc_state->interrupts_enabled &&
5722                     (!dm_new_crtc_state->interrupts_enabled ||
5723                      drm_atomic_crtc_needs_modeset(new_crtc_state))) {
5724                         /*
5725                          * Drop the extra vblank reference added by CRC
5726                          * capture if applicable.
5727                          */
5728                         if (dm_new_crtc_state->crc_enabled)
5729                                 drm_crtc_vblank_put(crtc);
5730
5731                         /*
5732                          * Only keep CRC capture enabled if there's
5733                          * still a stream for the CRTC.
5734                          */
5735                         if (!dm_new_crtc_state->stream)
5736                                 dm_new_crtc_state->crc_enabled = false;
5737
5738                         manage_dm_interrupts(adev, acrtc, false);
5739                 }
5740         }
5741         /*
5742          * Add check here for SoC's that support hardware cursor plane, to
5743          * unset legacy_cursor_update
5744          */
5745
5746         return drm_atomic_helper_commit(dev, state, nonblock);
5747
5748         /*TODO Handle EINTR, reenable IRQ*/
5749 }
5750
5751 /**
5752  * amdgpu_dm_atomic_commit_tail() - AMDgpu DM's commit tail implementation.
5753  * @state: The atomic state to commit
5754  *
5755  * This will tell DC to commit the constructed DC state from atomic_check,
5756  * programming the hardware. Any failures here implies a hardware failure, since
5757  * atomic check should have filtered anything non-kosher.
5758  */
5759 static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state)
5760 {
5761         struct drm_device *dev = state->dev;
5762         struct amdgpu_device *adev = dev->dev_private;
5763         struct amdgpu_display_manager *dm = &adev->dm;
5764         struct dm_atomic_state *dm_state;
5765         struct dc_state *dc_state = NULL, *dc_state_temp = NULL;
5766         uint32_t i, j;
5767         struct drm_crtc *crtc;
5768         struct drm_crtc_state *old_crtc_state, *new_crtc_state;
5769         unsigned long flags;
5770         bool wait_for_vblank = true;
5771         struct drm_connector *connector;
5772         struct drm_connector_state *old_con_state, *new_con_state;
5773         struct dm_crtc_state *dm_old_crtc_state, *dm_new_crtc_state;
5774         int crtc_disable_count = 0;
5775
5776         drm_atomic_helper_update_legacy_modeset_state(dev, state);
5777
5778         dm_state = dm_atomic_get_new_state(state);
5779         if (dm_state && dm_state->context) {
5780                 dc_state = dm_state->context;
5781         } else {
5782                 /* No state changes, retain current state. */
5783                 dc_state_temp = dc_create_state(dm->dc);
5784                 ASSERT(dc_state_temp);
5785                 dc_state = dc_state_temp;
5786                 dc_resource_state_copy_construct_current(dm->dc, dc_state);
5787         }
5788
5789         /* update changed items */
5790         for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
5791                 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
5792
5793                 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
5794                 dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
5795
5796                 DRM_DEBUG_DRIVER(
5797                         "amdgpu_crtc id:%d crtc_state_flags: enable:%d, active:%d, "
5798                         "planes_changed:%d, mode_changed:%d,active_changed:%d,"
5799                         "connectors_changed:%d\n",
5800                         acrtc->crtc_id,
5801                         new_crtc_state->enable,
5802                         new_crtc_state->active,
5803                         new_crtc_state->planes_changed,
5804                         new_crtc_state->mode_changed,
5805                         new_crtc_state->active_changed,
5806                         new_crtc_state->connectors_changed);
5807
5808                 /* Copy all transient state flags into dc state */
5809                 if (dm_new_crtc_state->stream) {
5810                         amdgpu_dm_crtc_copy_transient_flags(&dm_new_crtc_state->base,
5811                                                             dm_new_crtc_state->stream);
5812                 }
5813
5814                 /* handles headless hotplug case, updating new_state and
5815                  * aconnector as needed
5816                  */
5817
5818                 if (modeset_required(new_crtc_state, dm_new_crtc_state->stream, dm_old_crtc_state->stream)) {
5819
5820                         DRM_DEBUG_DRIVER("Atomic commit: SET crtc id %d: [%p]\n", acrtc->crtc_id, acrtc);
5821
5822                         if (!dm_new_crtc_state->stream) {
5823                                 /*
5824                                  * this could happen because of issues with
5825                                  * userspace notifications delivery.
5826                                  * In this case userspace tries to set mode on
5827                                  * display which is disconnected in fact.
5828                                  * dc_sink is NULL in this case on aconnector.
5829                                  * We expect reset mode will come soon.
5830                                  *
5831                                  * This can also happen when unplug is done
5832                                  * during resume sequence ended
5833                                  *
5834                                  * In this case, we want to pretend we still
5835                                  * have a sink to keep the pipe running so that
5836                                  * hw state is consistent with the sw state
5837                                  */
5838                                 DRM_DEBUG_DRIVER("%s: Failed to create new stream for crtc %d\n",
5839                                                 __func__, acrtc->base.base.id);
5840                                 continue;
5841                         }
5842
5843                         if (dm_old_crtc_state->stream)
5844                                 remove_stream(adev, acrtc, dm_old_crtc_state->stream);
5845
5846                         pm_runtime_get_noresume(dev->dev);
5847
5848                         acrtc->enabled = true;
5849                         acrtc->hw_mode = new_crtc_state->mode;
5850                         crtc->hwmode = new_crtc_state->mode;
5851                 } else if (modereset_required(new_crtc_state)) {
5852                         DRM_DEBUG_DRIVER("Atomic commit: RESET. crtc id %d:[%p]\n", acrtc->crtc_id, acrtc);
5853
5854                         /* i.e. reset mode */
5855                         if (dm_old_crtc_state->stream)
5856                                 remove_stream(adev, acrtc, dm_old_crtc_state->stream);
5857                 }
5858         } /* for_each_crtc_in_state() */
5859
5860         if (dc_state) {
5861                 dm_enable_per_frame_crtc_master_sync(dc_state);
5862                 mutex_lock(&dm->dc_lock);
5863                 WARN_ON(!dc_commit_state(dm->dc, dc_state));
5864                 mutex_unlock(&dm->dc_lock);
5865         }
5866
5867         for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) {
5868                 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
5869
5870                 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
5871
5872                 if (dm_new_crtc_state->stream != NULL) {
5873                         const struct dc_stream_status *status =
5874                                         dc_stream_get_status(dm_new_crtc_state->stream);
5875
5876                         if (!status)
5877                                 status = dc_stream_get_status_from_state(dc_state,
5878                                                                          dm_new_crtc_state->stream);
5879
5880                         if (!status)
5881                                 DC_ERR("got no status for stream %p on acrtc%p\n", dm_new_crtc_state->stream, acrtc);
5882                         else
5883                                 acrtc->otg_inst = status->primary_otg_inst;
5884                 }
5885         }
5886
5887         /* Handle connector state changes */
5888         for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
5889                 struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state);
5890                 struct dm_connector_state *dm_old_con_state = to_dm_connector_state(old_con_state);
5891                 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(dm_new_con_state->base.crtc);
5892                 struct dc_surface_update dummy_updates[MAX_SURFACES];
5893                 struct dc_stream_update stream_update;
5894                 struct dc_info_packet hdr_packet;
5895                 struct dc_stream_status *status = NULL;
5896                 bool abm_changed, hdr_changed, scaling_changed;
5897
5898                 memset(&dummy_updates, 0, sizeof(dummy_updates));
5899                 memset(&stream_update, 0, sizeof(stream_update));
5900
5901                 if (acrtc) {
5902                         new_crtc_state = drm_atomic_get_new_crtc_state(state, &acrtc->base);
5903                         old_crtc_state = drm_atomic_get_old_crtc_state(state, &acrtc->base);
5904                 }
5905
5906                 /* Skip any modesets/resets */
5907                 if (!acrtc || drm_atomic_crtc_needs_modeset(new_crtc_state))
5908                         continue;
5909
5910                 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
5911                 dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
5912
5913                 scaling_changed = is_scaling_state_different(dm_new_con_state,
5914                                                              dm_old_con_state);
5915
5916                 abm_changed = dm_new_crtc_state->abm_level !=
5917                               dm_old_crtc_state->abm_level;
5918
5919                 hdr_changed =
5920                         is_hdr_metadata_different(old_con_state, new_con_state);
5921
5922                 if (!scaling_changed && !abm_changed && !hdr_changed)
5923                         continue;
5924
5925                 if (scaling_changed) {
5926                         update_stream_scaling_settings(&dm_new_con_state->base.crtc->mode,
5927                                         dm_new_con_state, (struct dc_stream_state *)dm_new_crtc_state->stream);
5928
5929                         stream_update.src = dm_new_crtc_state->stream->src;
5930                         stream_update.dst = dm_new_crtc_state->stream->dst;
5931                 }
5932
5933                 if (abm_changed) {
5934                         dm_new_crtc_state->stream->abm_level = dm_new_crtc_state->abm_level;
5935
5936                         stream_update.abm_level = &dm_new_crtc_state->abm_level;
5937                 }
5938
5939                 if (hdr_changed) {
5940                         fill_hdr_info_packet(new_con_state, &hdr_packet);
5941                         stream_update.hdr_static_metadata = &hdr_packet;
5942                 }
5943
5944                 status = dc_stream_get_status(dm_new_crtc_state->stream);
5945                 WARN_ON(!status);
5946                 WARN_ON(!status->plane_count);
5947
5948                 /*
5949                  * TODO: DC refuses to perform stream updates without a dc_surface_update.
5950                  * Here we create an empty update on each plane.
5951                  * To fix this, DC should permit updating only stream properties.
5952                  */
5953                 for (j = 0; j < status->plane_count; j++)
5954                         dummy_updates[j].surface = status->plane_states[0];
5955
5956
5957                 mutex_lock(&dm->dc_lock);
5958                 dc_commit_updates_for_stream(dm->dc,
5959                                                      dummy_updates,
5960                                                      status->plane_count,
5961                                                      dm_new_crtc_state->stream,
5962                                                      &stream_update,
5963                                                      dc_state);
5964                 mutex_unlock(&dm->dc_lock);
5965         }
5966
5967         /* Count number of newly disabled CRTCs for dropping PM refs later. */
5968         for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state,
5969                                       new_crtc_state, i) {
5970                 if (old_crtc_state->active && !new_crtc_state->active)
5971                         crtc_disable_count++;
5972
5973                 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
5974                 dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
5975
5976                 /* Update freesync active state. */
5977                 pre_update_freesync_state_on_stream(dm, dm_new_crtc_state);
5978
5979                 /* Handle vrr on->off / off->on transitions */
5980                 amdgpu_dm_handle_vrr_transition(dm_old_crtc_state,
5981                                                 dm_new_crtc_state);
5982         }
5983
5984         /* Enable interrupts for CRTCs going through a modeset. */
5985         amdgpu_dm_enable_crtc_interrupts(dev, state, true);
5986
5987         for_each_new_crtc_in_state(state, crtc, new_crtc_state, j)
5988                 if (new_crtc_state->pageflip_flags & DRM_MODE_PAGE_FLIP_ASYNC)
5989                         wait_for_vblank = false;
5990
5991         /* update planes when needed per crtc*/
5992         for_each_new_crtc_in_state(state, crtc, new_crtc_state, j) {
5993                 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
5994
5995                 if (dm_new_crtc_state->stream)
5996                         amdgpu_dm_commit_planes(state, dc_state, dev,
5997                                                 dm, crtc, wait_for_vblank);
5998         }
5999
6000         /* Enable interrupts for CRTCs going from 0 to n active planes. */
6001         amdgpu_dm_enable_crtc_interrupts(dev, state, false);
6002
6003         /*
6004          * send vblank event on all events not handled in flip and
6005          * mark consumed event for drm_atomic_helper_commit_hw_done
6006          */
6007         spin_lock_irqsave(&adev->ddev->event_lock, flags);
6008         for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) {
6009
6010                 if (new_crtc_state->event)
6011                         drm_send_event_locked(dev, &new_crtc_state->event->base);
6012
6013                 new_crtc_state->event = NULL;
6014         }
6015         spin_unlock_irqrestore(&adev->ddev->event_lock, flags);
6016
6017         /* Signal HW programming completion */
6018         drm_atomic_helper_commit_hw_done(state);
6019
6020         if (wait_for_vblank)
6021                 drm_atomic_helper_wait_for_flip_done(dev, state);
6022
6023         drm_atomic_helper_cleanup_planes(dev, state);
6024
6025         /*
6026          * Finally, drop a runtime PM reference for each newly disabled CRTC,
6027          * so we can put the GPU into runtime suspend if we're not driving any
6028          * displays anymore
6029          */
6030         for (i = 0; i < crtc_disable_count; i++)
6031                 pm_runtime_put_autosuspend(dev->dev);
6032         pm_runtime_mark_last_busy(dev->dev);
6033
6034         if (dc_state_temp)
6035                 dc_release_state(dc_state_temp);
6036 }
6037
6038
6039 static int dm_force_atomic_commit(struct drm_connector *connector)
6040 {
6041         int ret = 0;
6042         struct drm_device *ddev = connector->dev;
6043         struct drm_atomic_state *state = drm_atomic_state_alloc(ddev);
6044         struct amdgpu_crtc *disconnected_acrtc = to_amdgpu_crtc(connector->encoder->crtc);
6045         struct drm_plane *plane = disconnected_acrtc->base.primary;
6046         struct drm_connector_state *conn_state;
6047         struct drm_crtc_state *crtc_state;
6048         struct drm_plane_state *plane_state;
6049
6050         if (!state)
6051                 return -ENOMEM;
6052
6053         state->acquire_ctx = ddev->mode_config.acquire_ctx;
6054
6055         /* Construct an atomic state to restore previous display setting */
6056
6057         /*
6058          * Attach connectors to drm_atomic_state
6059          */
6060         conn_state = drm_atomic_get_connector_state(state, connector);
6061
6062         ret = PTR_ERR_OR_ZERO(conn_state);
6063         if (ret)
6064                 goto err;
6065
6066         /* Attach crtc to drm_atomic_state*/
6067         crtc_state = drm_atomic_get_crtc_state(state, &disconnected_acrtc->base);
6068
6069         ret = PTR_ERR_OR_ZERO(crtc_state);
6070         if (ret)
6071                 goto err;
6072
6073         /* force a restore */
6074         crtc_state->mode_changed = true;
6075
6076         /* Attach plane to drm_atomic_state */
6077         plane_state = drm_atomic_get_plane_state(state, plane);
6078
6079         ret = PTR_ERR_OR_ZERO(plane_state);
6080         if (ret)
6081                 goto err;
6082
6083
6084         /* Call commit internally with the state we just constructed */
6085         ret = drm_atomic_commit(state);
6086         if (!ret)
6087                 return 0;
6088
6089 err:
6090         DRM_ERROR("Restoring old state failed with %i\n", ret);
6091         drm_atomic_state_put(state);
6092
6093         return ret;
6094 }
6095
6096 /*
6097  * This function handles all cases when set mode does not come upon hotplug.
6098  * This includes when a display is unplugged then plugged back into the
6099  * same port and when running without usermode desktop manager supprot
6100  */
6101 void dm_restore_drm_connector_state(struct drm_device *dev,
6102                                     struct drm_connector *connector)
6103 {
6104         struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
6105         struct amdgpu_crtc *disconnected_acrtc;
6106         struct dm_crtc_state *acrtc_state;
6107
6108         if (!aconnector->dc_sink || !connector->state || !connector->encoder)
6109                 return;
6110
6111         disconnected_acrtc = to_amdgpu_crtc(connector->encoder->crtc);
6112         if (!disconnected_acrtc)
6113                 return;
6114
6115         acrtc_state = to_dm_crtc_state(disconnected_acrtc->base.state);
6116         if (!acrtc_state->stream)
6117                 return;
6118
6119         /*
6120          * If the previous sink is not released and different from the current,
6121          * we deduce we are in a state where we can not rely on usermode call
6122          * to turn on the display, so we do it here
6123          */
6124         if (acrtc_state->stream->sink != aconnector->dc_sink)
6125                 dm_force_atomic_commit(&aconnector->base);
6126 }
6127
6128 /*
6129  * Grabs all modesetting locks to serialize against any blocking commits,
6130  * Waits for completion of all non blocking commits.
6131  */
6132 static int do_aquire_global_lock(struct drm_device *dev,
6133                                  struct drm_atomic_state *state)
6134 {
6135         struct drm_crtc *crtc;
6136         struct drm_crtc_commit *commit;
6137         long ret;
6138
6139         /*
6140          * Adding all modeset locks to aquire_ctx will
6141          * ensure that when the framework release it the
6142          * extra locks we are locking here will get released to
6143          */
6144         ret = drm_modeset_lock_all_ctx(dev, state->acquire_ctx);
6145         if (ret)
6146                 return ret;
6147
6148         list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
6149                 spin_lock(&crtc->commit_lock);
6150                 commit = list_first_entry_or_null(&crtc->commit_list,
6151                                 struct drm_crtc_commit, commit_entry);
6152                 if (commit)
6153                         drm_crtc_commit_get(commit);
6154                 spin_unlock(&crtc->commit_lock);
6155
6156                 if (!commit)
6157                         continue;
6158
6159                 /*
6160                  * Make sure all pending HW programming completed and
6161                  * page flips done
6162                  */
6163                 ret = wait_for_completion_interruptible_timeout(&commit->hw_done, 10*HZ);
6164
6165                 if (ret > 0)
6166                         ret = wait_for_completion_interruptible_timeout(
6167                                         &commit->flip_done, 10*HZ);
6168
6169                 if (ret == 0)
6170                         DRM_ERROR("[CRTC:%d:%s] hw_done or flip_done "
6171                                   "timed out\n", crtc->base.id, crtc->name);
6172
6173                 drm_crtc_commit_put(commit);
6174         }
6175
6176         return ret < 0 ? ret : 0;
6177 }
6178
6179 static void get_freesync_config_for_crtc(
6180         struct dm_crtc_state *new_crtc_state,
6181         struct dm_connector_state *new_con_state)
6182 {
6183         struct mod_freesync_config config = {0};
6184         struct amdgpu_dm_connector *aconnector =
6185                         to_amdgpu_dm_connector(new_con_state->base.connector);
6186         struct drm_display_mode *mode = &new_crtc_state->base.mode;
6187         int vrefresh = drm_mode_vrefresh(mode);
6188
6189         new_crtc_state->vrr_supported = new_con_state->freesync_capable &&
6190                                         vrefresh >= aconnector->min_vfreq &&
6191                                         vrefresh <= aconnector->max_vfreq;
6192
6193         if (new_crtc_state->vrr_supported) {
6194                 new_crtc_state->stream->ignore_msa_timing_param = true;
6195                 config.state = new_crtc_state->base.vrr_enabled ?
6196                                 VRR_STATE_ACTIVE_VARIABLE :
6197                                 VRR_STATE_INACTIVE;
6198                 config.min_refresh_in_uhz =
6199                                 aconnector->min_vfreq * 1000000;
6200                 config.max_refresh_in_uhz =
6201                                 aconnector->max_vfreq * 1000000;
6202                 config.vsif_supported = true;
6203                 config.btr = true;
6204         }
6205
6206         new_crtc_state->freesync_config = config;
6207 }
6208
6209 static void reset_freesync_config_for_crtc(
6210         struct dm_crtc_state *new_crtc_state)
6211 {
6212         new_crtc_state->vrr_supported = false;
6213
6214         memset(&new_crtc_state->vrr_params, 0,
6215                sizeof(new_crtc_state->vrr_params));
6216         memset(&new_crtc_state->vrr_infopacket, 0,
6217                sizeof(new_crtc_state->vrr_infopacket));
6218 }
6219
6220 static int dm_update_crtc_state(struct amdgpu_display_manager *dm,
6221                                 struct drm_atomic_state *state,
6222                                 struct drm_crtc *crtc,
6223                                 struct drm_crtc_state *old_crtc_state,
6224                                 struct drm_crtc_state *new_crtc_state,
6225                                 bool enable,
6226                                 bool *lock_and_validation_needed)
6227 {
6228         struct dm_atomic_state *dm_state = NULL;
6229         struct dm_crtc_state *dm_old_crtc_state, *dm_new_crtc_state;
6230         struct dc_stream_state *new_stream;
6231         int ret = 0;
6232
6233         /*
6234          * TODO Move this code into dm_crtc_atomic_check once we get rid of dc_validation_set
6235          * update changed items
6236          */
6237         struct amdgpu_crtc *acrtc = NULL;
6238         struct amdgpu_dm_connector *aconnector = NULL;
6239         struct drm_connector_state *drm_new_conn_state = NULL, *drm_old_conn_state = NULL;
6240         struct dm_connector_state *dm_new_conn_state = NULL, *dm_old_conn_state = NULL;
6241
6242         new_stream = NULL;
6243
6244         dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
6245         dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
6246         acrtc = to_amdgpu_crtc(crtc);
6247         aconnector = amdgpu_dm_find_first_crtc_matching_connector(state, crtc);
6248
6249         /* TODO This hack should go away */
6250         if (aconnector && enable) {
6251                 /* Make sure fake sink is created in plug-in scenario */
6252                 drm_new_conn_state = drm_atomic_get_new_connector_state(state,
6253                                                             &aconnector->base);
6254                 drm_old_conn_state = drm_atomic_get_old_connector_state(state,
6255                                                             &aconnector->base);
6256
6257                 if (IS_ERR(drm_new_conn_state)) {
6258                         ret = PTR_ERR_OR_ZERO(drm_new_conn_state);
6259                         goto fail;
6260                 }
6261
6262                 dm_new_conn_state = to_dm_connector_state(drm_new_conn_state);
6263                 dm_old_conn_state = to_dm_connector_state(drm_old_conn_state);
6264
6265                 if (!drm_atomic_crtc_needs_modeset(new_crtc_state))
6266                         goto skip_modeset;
6267
6268                 new_stream = create_stream_for_sink(aconnector,
6269                                                      &new_crtc_state->mode,
6270                                                     dm_new_conn_state,
6271                                                     dm_old_crtc_state->stream);
6272
6273                 /*
6274                  * we can have no stream on ACTION_SET if a display
6275                  * was disconnected during S3, in this case it is not an
6276                  * error, the OS will be updated after detection, and
6277                  * will do the right thing on next atomic commit
6278                  */
6279
6280                 if (!new_stream) {
6281                         DRM_DEBUG_DRIVER("%s: Failed to create new stream for crtc %d\n",
6282                                         __func__, acrtc->base.base.id);
6283                         ret = -ENOMEM;
6284                         goto fail;
6285                 }
6286
6287                 dm_new_crtc_state->abm_level = dm_new_conn_state->abm_level;
6288
6289                 ret = fill_hdr_info_packet(drm_new_conn_state,
6290                                            &new_stream->hdr_static_metadata);
6291                 if (ret)
6292                         goto fail;
6293
6294                 if (dc_is_stream_unchanged(new_stream, dm_old_crtc_state->stream) &&
6295                     dc_is_stream_scaling_unchanged(new_stream, dm_old_crtc_state->stream)) {
6296                         new_crtc_state->mode_changed = false;
6297                         DRM_DEBUG_DRIVER("Mode change not required, setting mode_changed to %d",
6298                                          new_crtc_state->mode_changed);
6299                 }
6300         }
6301
6302         /* mode_changed flag may get updated above, need to check again */
6303         if (!drm_atomic_crtc_needs_modeset(new_crtc_state))
6304                 goto skip_modeset;
6305
6306         DRM_DEBUG_DRIVER(
6307                 "amdgpu_crtc id:%d crtc_state_flags: enable:%d, active:%d, "
6308                 "planes_changed:%d, mode_changed:%d,active_changed:%d,"
6309                 "connectors_changed:%d\n",
6310                 acrtc->crtc_id,
6311                 new_crtc_state->enable,
6312                 new_crtc_state->active,
6313                 new_crtc_state->planes_changed,
6314                 new_crtc_state->mode_changed,
6315                 new_crtc_state->active_changed,
6316                 new_crtc_state->connectors_changed);
6317
6318         /* Remove stream for any changed/disabled CRTC */
6319         if (!enable) {
6320
6321                 if (!dm_old_crtc_state->stream)
6322                         goto skip_modeset;
6323
6324                 ret = dm_atomic_get_state(state, &dm_state);
6325                 if (ret)
6326                         goto fail;
6327
6328                 DRM_DEBUG_DRIVER("Disabling DRM crtc: %d\n",
6329                                 crtc->base.id);
6330
6331                 /* i.e. reset mode */
6332                 if (dc_remove_stream_from_ctx(
6333                                 dm->dc,
6334                                 dm_state->context,
6335                                 dm_old_crtc_state->stream) != DC_OK) {
6336                         ret = -EINVAL;
6337                         goto fail;
6338                 }
6339
6340                 dc_stream_release(dm_old_crtc_state->stream);
6341                 dm_new_crtc_state->stream = NULL;
6342
6343                 reset_freesync_config_for_crtc(dm_new_crtc_state);
6344
6345                 *lock_and_validation_needed = true;
6346
6347         } else {/* Add stream for any updated/enabled CRTC */
6348                 /*
6349                  * Quick fix to prevent NULL pointer on new_stream when
6350                  * added MST connectors not found in existing crtc_state in the chained mode
6351                  * TODO: need to dig out the root cause of that
6352                  */
6353                 if (!aconnector || (!aconnector->dc_sink && aconnector->mst_port))
6354                         goto skip_modeset;
6355
6356                 if (modereset_required(new_crtc_state))
6357                         goto skip_modeset;
6358
6359                 if (modeset_required(new_crtc_state, new_stream,
6360                                      dm_old_crtc_state->stream)) {
6361
6362                         WARN_ON(dm_new_crtc_state->stream);
6363
6364                         ret = dm_atomic_get_state(state, &dm_state);
6365                         if (ret)
6366                                 goto fail;
6367
6368                         dm_new_crtc_state->stream = new_stream;
6369
6370                         dc_stream_retain(new_stream);
6371
6372                         DRM_DEBUG_DRIVER("Enabling DRM crtc: %d\n",
6373                                                 crtc->base.id);
6374
6375                         if (dc_add_stream_to_ctx(
6376                                         dm->dc,
6377                                         dm_state->context,
6378                                         dm_new_crtc_state->stream) != DC_OK) {
6379                                 ret = -EINVAL;
6380                                 goto fail;
6381                         }
6382
6383                         *lock_and_validation_needed = true;
6384                 }
6385         }
6386
6387 skip_modeset:
6388         /* Release extra reference */
6389         if (new_stream)
6390                  dc_stream_release(new_stream);
6391
6392         /*
6393          * We want to do dc stream updates that do not require a
6394          * full modeset below.
6395          */
6396         if (!(enable && aconnector && new_crtc_state->enable &&
6397               new_crtc_state->active))
6398                 return 0;
6399         /*
6400          * Given above conditions, the dc state cannot be NULL because:
6401          * 1. We're in the process of enabling CRTCs (just been added
6402          *    to the dc context, or already is on the context)
6403          * 2. Has a valid connector attached, and
6404          * 3. Is currently active and enabled.
6405          * => The dc stream state currently exists.
6406          */
6407         BUG_ON(dm_new_crtc_state->stream == NULL);
6408
6409         /* Scaling or underscan settings */
6410         if (is_scaling_state_different(dm_old_conn_state, dm_new_conn_state))
6411                 update_stream_scaling_settings(
6412                         &new_crtc_state->mode, dm_new_conn_state, dm_new_crtc_state->stream);
6413
6414         /* ABM settings */
6415         dm_new_crtc_state->abm_level = dm_new_conn_state->abm_level;
6416
6417         /*
6418          * Color management settings. We also update color properties
6419          * when a modeset is needed, to ensure it gets reprogrammed.
6420          */
6421         if (dm_new_crtc_state->base.color_mgmt_changed ||
6422             drm_atomic_crtc_needs_modeset(new_crtc_state)) {
6423                 ret = amdgpu_dm_set_regamma_lut(dm_new_crtc_state);
6424                 if (ret)
6425                         goto fail;
6426                 amdgpu_dm_set_ctm(dm_new_crtc_state);
6427         }
6428
6429         /* Update Freesync settings. */
6430         get_freesync_config_for_crtc(dm_new_crtc_state,
6431                                      dm_new_conn_state);
6432
6433         return ret;
6434
6435 fail:
6436         if (new_stream)
6437                 dc_stream_release(new_stream);
6438         return ret;
6439 }
6440
6441 static bool should_reset_plane(struct drm_atomic_state *state,
6442                                struct drm_plane *plane,
6443                                struct drm_plane_state *old_plane_state,
6444                                struct drm_plane_state *new_plane_state)
6445 {
6446         struct drm_plane *other;
6447         struct drm_plane_state *old_other_state, *new_other_state;
6448         struct drm_crtc_state *new_crtc_state;
6449         int i;
6450
6451         /*
6452          * TODO: Remove this hack once the checks below are sufficient
6453          * enough to determine when we need to reset all the planes on
6454          * the stream.
6455          */
6456         if (state->allow_modeset)
6457                 return true;
6458
6459         /* Exit early if we know that we're adding or removing the plane. */
6460         if (old_plane_state->crtc != new_plane_state->crtc)
6461                 return true;
6462
6463         /* old crtc == new_crtc == NULL, plane not in context. */
6464         if (!new_plane_state->crtc)
6465                 return false;
6466
6467         new_crtc_state =
6468                 drm_atomic_get_new_crtc_state(state, new_plane_state->crtc);
6469
6470         if (!new_crtc_state)
6471                 return true;
6472
6473         if (drm_atomic_crtc_needs_modeset(new_crtc_state))
6474                 return true;
6475
6476         /*
6477          * If there are any new primary or overlay planes being added or
6478          * removed then the z-order can potentially change. To ensure
6479          * correct z-order and pipe acquisition the current DC architecture
6480          * requires us to remove and recreate all existing planes.
6481          *
6482          * TODO: Come up with a more elegant solution for this.
6483          */
6484         for_each_oldnew_plane_in_state(state, other, old_other_state, new_other_state, i) {
6485                 if (other->type == DRM_PLANE_TYPE_CURSOR)
6486                         continue;
6487
6488                 if (old_other_state->crtc != new_plane_state->crtc &&
6489                     new_other_state->crtc != new_plane_state->crtc)
6490                         continue;
6491
6492                 if (old_other_state->crtc != new_other_state->crtc)
6493                         return true;
6494
6495                 /* TODO: Remove this once we can handle fast format changes. */
6496                 if (old_other_state->fb && new_other_state->fb &&
6497                     old_other_state->fb->format != new_other_state->fb->format)
6498                         return true;
6499         }
6500
6501         return false;
6502 }
6503
6504 static int dm_update_plane_state(struct dc *dc,
6505                                  struct drm_atomic_state *state,
6506                                  struct drm_plane *plane,
6507                                  struct drm_plane_state *old_plane_state,
6508                                  struct drm_plane_state *new_plane_state,
6509                                  bool enable,
6510                                  bool *lock_and_validation_needed)
6511 {
6512
6513         struct dm_atomic_state *dm_state = NULL;
6514         struct drm_crtc *new_plane_crtc, *old_plane_crtc;
6515         struct drm_crtc_state *old_crtc_state, *new_crtc_state;
6516         struct dm_crtc_state *dm_new_crtc_state, *dm_old_crtc_state;
6517         struct dm_plane_state *dm_new_plane_state, *dm_old_plane_state;
6518         bool needs_reset;
6519         int ret = 0;
6520
6521
6522         new_plane_crtc = new_plane_state->crtc;
6523         old_plane_crtc = old_plane_state->crtc;
6524         dm_new_plane_state = to_dm_plane_state(new_plane_state);
6525         dm_old_plane_state = to_dm_plane_state(old_plane_state);
6526
6527         /*TODO Implement atomic check for cursor plane */
6528         if (plane->type == DRM_PLANE_TYPE_CURSOR)
6529                 return 0;
6530
6531         needs_reset = should_reset_plane(state, plane, old_plane_state,
6532                                          new_plane_state);
6533
6534         /* Remove any changed/removed planes */
6535         if (!enable) {
6536                 if (!needs_reset)
6537                         return 0;
6538
6539                 if (!old_plane_crtc)
6540                         return 0;
6541
6542                 old_crtc_state = drm_atomic_get_old_crtc_state(
6543                                 state, old_plane_crtc);
6544                 dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
6545
6546                 if (!dm_old_crtc_state->stream)
6547                         return 0;
6548
6549                 DRM_DEBUG_ATOMIC("Disabling DRM plane: %d on DRM crtc %d\n",
6550                                 plane->base.id, old_plane_crtc->base.id);
6551
6552                 ret = dm_atomic_get_state(state, &dm_state);
6553                 if (ret)
6554                         return ret;
6555
6556                 if (!dc_remove_plane_from_context(
6557                                 dc,
6558                                 dm_old_crtc_state->stream,
6559                                 dm_old_plane_state->dc_state,
6560                                 dm_state->context)) {
6561
6562                         ret = EINVAL;
6563                         return ret;
6564                 }
6565
6566
6567                 dc_plane_state_release(dm_old_plane_state->dc_state);
6568                 dm_new_plane_state->dc_state = NULL;
6569
6570                 *lock_and_validation_needed = true;
6571
6572         } else { /* Add new planes */
6573                 struct dc_plane_state *dc_new_plane_state;
6574
6575                 if (drm_atomic_plane_disabling(plane->state, new_plane_state))
6576                         return 0;
6577
6578                 if (!new_plane_crtc)
6579                         return 0;
6580
6581                 new_crtc_state = drm_atomic_get_new_crtc_state(state, new_plane_crtc);
6582                 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
6583
6584                 if (!dm_new_crtc_state->stream)
6585                         return 0;
6586
6587                 if (!needs_reset)
6588                         return 0;
6589
6590                 WARN_ON(dm_new_plane_state->dc_state);
6591
6592                 dc_new_plane_state = dc_create_plane_state(dc);
6593                 if (!dc_new_plane_state)
6594                         return -ENOMEM;
6595
6596                 DRM_DEBUG_DRIVER("Enabling DRM plane: %d on DRM crtc %d\n",
6597                                 plane->base.id, new_plane_crtc->base.id);
6598
6599                 ret = fill_dc_plane_attributes(
6600                         new_plane_crtc->dev->dev_private,
6601                         dc_new_plane_state,
6602                         new_plane_state,
6603                         new_crtc_state);
6604                 if (ret) {
6605                         dc_plane_state_release(dc_new_plane_state);
6606                         return ret;
6607                 }
6608
6609                 ret = dm_atomic_get_state(state, &dm_state);
6610                 if (ret) {
6611                         dc_plane_state_release(dc_new_plane_state);
6612                         return ret;
6613                 }
6614
6615                 /*
6616                  * Any atomic check errors that occur after this will
6617                  * not need a release. The plane state will be attached
6618                  * to the stream, and therefore part of the atomic
6619                  * state. It'll be released when the atomic state is
6620                  * cleaned.
6621                  */
6622                 if (!dc_add_plane_to_context(
6623                                 dc,
6624                                 dm_new_crtc_state->stream,
6625                                 dc_new_plane_state,
6626                                 dm_state->context)) {
6627
6628                         dc_plane_state_release(dc_new_plane_state);
6629                         return -EINVAL;
6630                 }
6631
6632                 dm_new_plane_state->dc_state = dc_new_plane_state;
6633
6634                 /* Tell DC to do a full surface update every time there
6635                  * is a plane change. Inefficient, but works for now.
6636                  */
6637                 dm_new_plane_state->dc_state->update_flags.bits.full_update = 1;
6638
6639                 *lock_and_validation_needed = true;
6640         }
6641
6642
6643         return ret;
6644 }
6645
6646 static int
6647 dm_determine_update_type_for_commit(struct amdgpu_display_manager *dm,
6648                                     struct drm_atomic_state *state,
6649                                     enum surface_update_type *out_type)
6650 {
6651         struct dc *dc = dm->dc;
6652         struct dm_atomic_state *dm_state = NULL, *old_dm_state = NULL;
6653         int i, j, num_plane, ret = 0;
6654         struct drm_plane_state *old_plane_state, *new_plane_state;
6655         struct dm_plane_state *new_dm_plane_state, *old_dm_plane_state;
6656         struct drm_crtc *new_plane_crtc, *old_plane_crtc;
6657         struct drm_plane *plane;
6658
6659         struct drm_crtc *crtc;
6660         struct drm_crtc_state *new_crtc_state, *old_crtc_state;
6661         struct dm_crtc_state *new_dm_crtc_state, *old_dm_crtc_state;
6662         struct dc_stream_status *status = NULL;
6663
6664         struct dc_surface_update *updates;
6665         enum surface_update_type update_type = UPDATE_TYPE_FAST;
6666
6667         updates = kcalloc(MAX_SURFACES, sizeof(*updates), GFP_KERNEL);
6668
6669         if (!updates) {
6670                 DRM_ERROR("Failed to allocate plane updates\n");
6671                 /* Set type to FULL to avoid crashing in DC*/
6672                 update_type = UPDATE_TYPE_FULL;
6673                 goto cleanup;
6674         }
6675
6676         for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
6677                 struct dc_scaling_info scaling_info;
6678                 struct dc_stream_update stream_update;
6679
6680                 memset(&stream_update, 0, sizeof(stream_update));
6681
6682                 new_dm_crtc_state = to_dm_crtc_state(new_crtc_state);
6683                 old_dm_crtc_state = to_dm_crtc_state(old_crtc_state);
6684                 num_plane = 0;
6685
6686                 if (new_dm_crtc_state->stream != old_dm_crtc_state->stream) {
6687                         update_type = UPDATE_TYPE_FULL;
6688                         goto cleanup;
6689                 }
6690
6691                 if (!new_dm_crtc_state->stream)
6692                         continue;
6693
6694                 for_each_oldnew_plane_in_state(state, plane, old_plane_state, new_plane_state, j) {
6695                         new_plane_crtc = new_plane_state->crtc;
6696                         old_plane_crtc = old_plane_state->crtc;
6697                         new_dm_plane_state = to_dm_plane_state(new_plane_state);
6698                         old_dm_plane_state = to_dm_plane_state(old_plane_state);
6699
6700                         if (plane->type == DRM_PLANE_TYPE_CURSOR)
6701                                 continue;
6702
6703                         if (new_dm_plane_state->dc_state != old_dm_plane_state->dc_state) {
6704                                 update_type = UPDATE_TYPE_FULL;
6705                                 goto cleanup;
6706                         }
6707
6708                         if (crtc != new_plane_crtc)
6709                                 continue;
6710
6711                         updates[num_plane].surface = new_dm_plane_state->dc_state;
6712
6713                         if (new_crtc_state->mode_changed) {
6714                                 stream_update.dst = new_dm_crtc_state->stream->dst;
6715                                 stream_update.src = new_dm_crtc_state->stream->src;
6716                         }
6717
6718                         if (new_crtc_state->color_mgmt_changed) {
6719                                 updates[num_plane].gamma =
6720                                                 new_dm_plane_state->dc_state->gamma_correction;
6721                                 updates[num_plane].in_transfer_func =
6722                                                 new_dm_plane_state->dc_state->in_transfer_func;
6723                                 stream_update.gamut_remap =
6724                                                 &new_dm_crtc_state->stream->gamut_remap_matrix;
6725                                 stream_update.out_transfer_func =
6726                                                 new_dm_crtc_state->stream->out_transfer_func;
6727                         }
6728
6729                         ret = fill_dc_scaling_info(new_plane_state,
6730                                                    &scaling_info);
6731                         if (ret)
6732                                 goto cleanup;
6733
6734                         updates[num_plane].scaling_info = &scaling_info;
6735
6736                         num_plane++;
6737                 }
6738
6739                 if (num_plane == 0)
6740                         continue;
6741
6742                 ret = dm_atomic_get_state(state, &dm_state);
6743                 if (ret)
6744                         goto cleanup;
6745
6746                 old_dm_state = dm_atomic_get_old_state(state);
6747                 if (!old_dm_state) {
6748                         ret = -EINVAL;
6749                         goto cleanup;
6750                 }
6751
6752                 status = dc_stream_get_status_from_state(old_dm_state->context,
6753                                                          new_dm_crtc_state->stream);
6754
6755                 /*
6756                  * TODO: DC modifies the surface during this call so we need
6757                  * to lock here - find a way to do this without locking.
6758                  */
6759                 mutex_lock(&dm->dc_lock);
6760                 update_type = dc_check_update_surfaces_for_stream(dc, updates, num_plane,
6761                                                                   &stream_update, status);
6762                 mutex_unlock(&dm->dc_lock);
6763
6764                 if (update_type > UPDATE_TYPE_MED) {
6765                         update_type = UPDATE_TYPE_FULL;
6766                         goto cleanup;
6767                 }
6768         }
6769
6770 cleanup:
6771         kfree(updates);
6772
6773         *out_type = update_type;
6774         return ret;
6775 }
6776
6777 /**
6778  * amdgpu_dm_atomic_check() - Atomic check implementation for AMDgpu DM.
6779  * @dev: The DRM device
6780  * @state: The atomic state to commit
6781  *
6782  * Validate that the given atomic state is programmable by DC into hardware.
6783  * This involves constructing a &struct dc_state reflecting the new hardware
6784  * state we wish to commit, then querying DC to see if it is programmable. It's
6785  * important not to modify the existing DC state. Otherwise, atomic_check
6786  * may unexpectedly commit hardware changes.
6787  *
6788  * When validating the DC state, it's important that the right locks are
6789  * acquired. For full updates case which removes/adds/updates streams on one
6790  * CRTC while flipping on another CRTC, acquiring global lock will guarantee
6791  * that any such full update commit will wait for completion of any outstanding
6792  * flip using DRMs synchronization events. See
6793  * dm_determine_update_type_for_commit()
6794  *
6795  * Note that DM adds the affected connectors for all CRTCs in state, when that
6796  * might not seem necessary. This is because DC stream creation requires the
6797  * DC sink, which is tied to the DRM connector state. Cleaning this up should
6798  * be possible but non-trivial - a possible TODO item.
6799  *
6800  * Return: -Error code if validation failed.
6801  */
6802 static int amdgpu_dm_atomic_check(struct drm_device *dev,
6803                                   struct drm_atomic_state *state)
6804 {
6805         struct amdgpu_device *adev = dev->dev_private;
6806         struct dm_atomic_state *dm_state = NULL;
6807         struct dc *dc = adev->dm.dc;
6808         struct drm_connector *connector;
6809         struct drm_connector_state *old_con_state, *new_con_state;
6810         struct drm_crtc *crtc;
6811         struct drm_crtc_state *old_crtc_state, *new_crtc_state;
6812         struct drm_plane *plane;
6813         struct drm_plane_state *old_plane_state, *new_plane_state;
6814         enum surface_update_type update_type = UPDATE_TYPE_FAST;
6815         enum surface_update_type overall_update_type = UPDATE_TYPE_FAST;
6816
6817         int ret, i;
6818
6819         /*
6820          * This bool will be set for true for any modeset/reset
6821          * or plane update which implies non fast surface update.
6822          */
6823         bool lock_and_validation_needed = false;
6824
6825         ret = drm_atomic_helper_check_modeset(dev, state);
6826         if (ret)
6827                 goto fail;
6828
6829         for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
6830                 if (!drm_atomic_crtc_needs_modeset(new_crtc_state) &&
6831                     !new_crtc_state->color_mgmt_changed &&
6832                     old_crtc_state->vrr_enabled == new_crtc_state->vrr_enabled)
6833                         continue;
6834
6835                 if (!new_crtc_state->enable)
6836                         continue;
6837
6838                 ret = drm_atomic_add_affected_connectors(state, crtc);
6839                 if (ret)
6840                         return ret;
6841
6842                 ret = drm_atomic_add_affected_planes(state, crtc);
6843                 if (ret)
6844                         goto fail;
6845         }
6846
6847         /*
6848          * Add all primary and overlay planes on the CRTC to the state
6849          * whenever a plane is enabled to maintain correct z-ordering
6850          * and to enable fast surface updates.
6851          */
6852         drm_for_each_crtc(crtc, dev) {
6853                 bool modified = false;
6854
6855                 for_each_oldnew_plane_in_state(state, plane, old_plane_state, new_plane_state, i) {
6856                         if (plane->type == DRM_PLANE_TYPE_CURSOR)
6857                                 continue;
6858
6859                         if (new_plane_state->crtc == crtc ||
6860                             old_plane_state->crtc == crtc) {
6861                                 modified = true;
6862                                 break;
6863                         }
6864                 }
6865
6866                 if (!modified)
6867                         continue;
6868
6869                 drm_for_each_plane_mask(plane, state->dev, crtc->state->plane_mask) {
6870                         if (plane->type == DRM_PLANE_TYPE_CURSOR)
6871                                 continue;
6872
6873                         new_plane_state =
6874                                 drm_atomic_get_plane_state(state, plane);
6875
6876                         if (IS_ERR(new_plane_state)) {
6877                                 ret = PTR_ERR(new_plane_state);
6878                                 goto fail;
6879                         }
6880                 }
6881         }
6882
6883         /* Remove exiting planes if they are modified */
6884         for_each_oldnew_plane_in_state_reverse(state, plane, old_plane_state, new_plane_state, i) {
6885                 ret = dm_update_plane_state(dc, state, plane,
6886                                             old_plane_state,
6887                                             new_plane_state,
6888                                             false,
6889                                             &lock_and_validation_needed);
6890                 if (ret)
6891                         goto fail;
6892         }
6893
6894         /* Disable all crtcs which require disable */
6895         for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
6896                 ret = dm_update_crtc_state(&adev->dm, state, crtc,
6897                                            old_crtc_state,
6898                                            new_crtc_state,
6899                                            false,
6900                                            &lock_and_validation_needed);
6901                 if (ret)
6902                         goto fail;
6903         }
6904
6905         /* Enable all crtcs which require enable */
6906         for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
6907                 ret = dm_update_crtc_state(&adev->dm, state, crtc,
6908                                            old_crtc_state,
6909                                            new_crtc_state,
6910                                            true,
6911                                            &lock_and_validation_needed);
6912                 if (ret)
6913                         goto fail;
6914         }
6915
6916         /* Add new/modified planes */
6917         for_each_oldnew_plane_in_state_reverse(state, plane, old_plane_state, new_plane_state, i) {
6918                 ret = dm_update_plane_state(dc, state, plane,
6919                                             old_plane_state,
6920                                             new_plane_state,
6921                                             true,
6922                                             &lock_and_validation_needed);
6923                 if (ret)
6924                         goto fail;
6925         }
6926
6927         /* Run this here since we want to validate the streams we created */
6928         ret = drm_atomic_helper_check_planes(dev, state);
6929         if (ret)
6930                 goto fail;
6931
6932         /* Check scaling and underscan changes*/
6933         /* TODO Removed scaling changes validation due to inability to commit
6934          * new stream into context w\o causing full reset. Need to
6935          * decide how to handle.
6936          */
6937         for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
6938                 struct dm_connector_state *dm_old_con_state = to_dm_connector_state(old_con_state);
6939                 struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state);
6940                 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(dm_new_con_state->base.crtc);
6941
6942                 /* Skip any modesets/resets */
6943                 if (!acrtc || drm_atomic_crtc_needs_modeset(
6944                                 drm_atomic_get_new_crtc_state(state, &acrtc->base)))
6945                         continue;
6946
6947                 /* Skip any thing not scale or underscan changes */
6948                 if (!is_scaling_state_different(dm_new_con_state, dm_old_con_state))
6949                         continue;
6950
6951                 overall_update_type = UPDATE_TYPE_FULL;
6952                 lock_and_validation_needed = true;
6953         }
6954
6955         ret = dm_determine_update_type_for_commit(&adev->dm, state, &update_type);
6956         if (ret)
6957                 goto fail;
6958
6959         if (overall_update_type < update_type)
6960                 overall_update_type = update_type;
6961
6962         /*
6963          * lock_and_validation_needed was an old way to determine if we need to set
6964          * the global lock. Leaving it in to check if we broke any corner cases
6965          * lock_and_validation_needed true = UPDATE_TYPE_FULL or UPDATE_TYPE_MED
6966          * lock_and_validation_needed false = UPDATE_TYPE_FAST
6967          */
6968         if (lock_and_validation_needed && overall_update_type <= UPDATE_TYPE_FAST)
6969                 WARN(1, "Global lock should be Set, overall_update_type should be UPDATE_TYPE_MED or UPDATE_TYPE_FULL");
6970
6971         if (overall_update_type > UPDATE_TYPE_FAST) {
6972                 ret = dm_atomic_get_state(state, &dm_state);
6973                 if (ret)
6974                         goto fail;
6975
6976                 ret = do_aquire_global_lock(dev, state);
6977                 if (ret)
6978                         goto fail;
6979
6980                 if (dc_validate_global_state(dc, dm_state->context, false) != DC_OK) {
6981                         ret = -EINVAL;
6982                         goto fail;
6983                 }
6984         } else if (state->legacy_cursor_update) {
6985                 /*
6986                  * This is a fast cursor update coming from the plane update
6987                  * helper, check if it can be done asynchronously for better
6988                  * performance.
6989                  */
6990                 state->async_update = !drm_atomic_helper_async_check(dev, state);
6991         }
6992
6993         /* Must be success */
6994         WARN_ON(ret);
6995         return ret;
6996
6997 fail:
6998         if (ret == -EDEADLK)
6999                 DRM_DEBUG_DRIVER("Atomic check stopped to avoid deadlock.\n");
7000         else if (ret == -EINTR || ret == -EAGAIN || ret == -ERESTARTSYS)
7001                 DRM_DEBUG_DRIVER("Atomic check stopped due to signal.\n");
7002         else
7003                 DRM_DEBUG_DRIVER("Atomic check failed with err: %d \n", ret);
7004
7005         return ret;
7006 }
7007
7008 static bool is_dp_capable_without_timing_msa(struct dc *dc,
7009                                              struct amdgpu_dm_connector *amdgpu_dm_connector)
7010 {
7011         uint8_t dpcd_data;
7012         bool capable = false;
7013
7014         if (amdgpu_dm_connector->dc_link &&
7015                 dm_helpers_dp_read_dpcd(
7016                                 NULL,
7017                                 amdgpu_dm_connector->dc_link,
7018                                 DP_DOWN_STREAM_PORT_COUNT,
7019                                 &dpcd_data,
7020                                 sizeof(dpcd_data))) {
7021                 capable = (dpcd_data & DP_MSA_TIMING_PAR_IGNORED) ? true:false;
7022         }
7023
7024         return capable;
7025 }
7026 void amdgpu_dm_update_freesync_caps(struct drm_connector *connector,
7027                                         struct edid *edid)
7028 {
7029         int i;
7030         bool edid_check_required;
7031         struct detailed_timing *timing;
7032         struct detailed_non_pixel *data;
7033         struct detailed_data_monitor_range *range;
7034         struct amdgpu_dm_connector *amdgpu_dm_connector =
7035                         to_amdgpu_dm_connector(connector);
7036         struct dm_connector_state *dm_con_state = NULL;
7037
7038         struct drm_device *dev = connector->dev;
7039         struct amdgpu_device *adev = dev->dev_private;
7040         bool freesync_capable = false;
7041
7042         if (!connector->state) {
7043                 DRM_ERROR("%s - Connector has no state", __func__);
7044                 goto update;
7045         }
7046
7047         if (!edid) {
7048                 dm_con_state = to_dm_connector_state(connector->state);
7049
7050                 amdgpu_dm_connector->min_vfreq = 0;
7051                 amdgpu_dm_connector->max_vfreq = 0;
7052                 amdgpu_dm_connector->pixel_clock_mhz = 0;
7053
7054                 goto update;
7055         }
7056
7057         dm_con_state = to_dm_connector_state(connector->state);
7058
7059         edid_check_required = false;
7060         if (!amdgpu_dm_connector->dc_sink) {
7061                 DRM_ERROR("dc_sink NULL, could not add free_sync module.\n");
7062                 goto update;
7063         }
7064         if (!adev->dm.freesync_module)
7065                 goto update;
7066         /*
7067          * if edid non zero restrict freesync only for dp and edp
7068          */
7069         if (edid) {
7070                 if (amdgpu_dm_connector->dc_sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT
7071                         || amdgpu_dm_connector->dc_sink->sink_signal == SIGNAL_TYPE_EDP) {
7072                         edid_check_required = is_dp_capable_without_timing_msa(
7073                                                 adev->dm.dc,
7074                                                 amdgpu_dm_connector);
7075                 }
7076         }
7077         if (edid_check_required == true && (edid->version > 1 ||
7078            (edid->version == 1 && edid->revision > 1))) {
7079                 for (i = 0; i < 4; i++) {
7080
7081                         timing  = &edid->detailed_timings[i];
7082                         data    = &timing->data.other_data;
7083                         range   = &data->data.range;
7084                         /*
7085                          * Check if monitor has continuous frequency mode
7086                          */
7087                         if (data->type != EDID_DETAIL_MONITOR_RANGE)
7088                                 continue;
7089                         /*
7090                          * Check for flag range limits only. If flag == 1 then
7091                          * no additional timing information provided.
7092                          * Default GTF, GTF Secondary curve and CVT are not
7093                          * supported
7094                          */
7095                         if (range->flags != 1)
7096                                 continue;
7097
7098                         amdgpu_dm_connector->min_vfreq = range->min_vfreq;
7099                         amdgpu_dm_connector->max_vfreq = range->max_vfreq;
7100                         amdgpu_dm_connector->pixel_clock_mhz =
7101                                 range->pixel_clock_mhz * 10;
7102                         break;
7103                 }
7104
7105                 if (amdgpu_dm_connector->max_vfreq -
7106                     amdgpu_dm_connector->min_vfreq > 10) {
7107
7108                         freesync_capable = true;
7109                 }
7110         }
7111
7112 update:
7113         if (dm_con_state)
7114                 dm_con_state->freesync_capable = freesync_capable;
7115
7116         if (connector->vrr_capable_property)
7117                 drm_connector_set_vrr_capable_property(connector,
7118                                                        freesync_capable);
7119 }
7120
This page took 0.475457 seconds and 4 git commands to generate.