]> Git Repo - linux.git/blob - drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
Merge branch 'parisc-4.16-1' of git://git.kernel.org/pub/scm/linux/kernel/git/deller...
[linux.git] / drivers / gpu / drm / amd / display / amdgpu_dm / amdgpu_dm.c
1 /*
2  * Copyright 2015 Advanced Micro Devices, Inc.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20  * OTHER DEALINGS IN THE SOFTWARE.
21  *
22  * Authors: AMD
23  *
24  */
25
26 #include "dm_services_types.h"
27 #include "dc.h"
28 #include "dc/inc/core_types.h"
29
30 #include "vid.h"
31 #include "amdgpu.h"
32 #include "amdgpu_display.h"
33 #include "atom.h"
34 #include "amdgpu_dm.h"
35 #include "amdgpu_pm.h"
36
37 #include "amd_shared.h"
38 #include "amdgpu_dm_irq.h"
39 #include "dm_helpers.h"
40 #include "dm_services_types.h"
41 #include "amdgpu_dm_mst_types.h"
42
43 #include "ivsrcid/ivsrcid_vislands30.h"
44
45 #include <linux/module.h>
46 #include <linux/moduleparam.h>
47 #include <linux/version.h>
48 #include <linux/types.h>
49
50 #include <drm/drmP.h>
51 #include <drm/drm_atomic.h>
52 #include <drm/drm_atomic_helper.h>
53 #include <drm/drm_dp_mst_helper.h>
54 #include <drm/drm_fb_helper.h>
55 #include <drm/drm_edid.h>
56
57 #include "modules/inc/mod_freesync.h"
58
59 #if defined(CONFIG_DRM_AMD_DC_DCN1_0)
60 #include "ivsrcid/irqsrcs_dcn_1_0.h"
61
62 #include "dcn/dcn_1_0_offset.h"
63 #include "dcn/dcn_1_0_sh_mask.h"
64 #include "soc15ip.h"
65
66 #include "soc15_common.h"
67 #endif
68
69 #include "modules/inc/mod_freesync.h"
70
71 #include "i2caux_interface.h"
72
73 /* basic init/fini API */
74 static int amdgpu_dm_init(struct amdgpu_device *adev);
75 static void amdgpu_dm_fini(struct amdgpu_device *adev);
76
77 /* initializes drm_device display related structures, based on the information
78  * provided by DAL. The drm strcutures are: drm_crtc, drm_connector,
79  * drm_encoder, drm_mode_config
80  *
81  * Returns 0 on success
82  */
83 static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev);
84 /* removes and deallocates the drm structures, created by the above function */
85 static void amdgpu_dm_destroy_drm_device(struct amdgpu_display_manager *dm);
86
87 static void
88 amdgpu_dm_update_connector_after_detect(struct amdgpu_dm_connector *aconnector);
89
90 static int amdgpu_dm_plane_init(struct amdgpu_display_manager *dm,
91                                 struct amdgpu_plane *aplane,
92                                 unsigned long possible_crtcs);
93 static int amdgpu_dm_crtc_init(struct amdgpu_display_manager *dm,
94                                struct drm_plane *plane,
95                                uint32_t link_index);
96 static int amdgpu_dm_connector_init(struct amdgpu_display_manager *dm,
97                                     struct amdgpu_dm_connector *amdgpu_dm_connector,
98                                     uint32_t link_index,
99                                     struct amdgpu_encoder *amdgpu_encoder);
100 static int amdgpu_dm_encoder_init(struct drm_device *dev,
101                                   struct amdgpu_encoder *aencoder,
102                                   uint32_t link_index);
103
104 static int amdgpu_dm_connector_get_modes(struct drm_connector *connector);
105
106 static int amdgpu_dm_atomic_commit(struct drm_device *dev,
107                                    struct drm_atomic_state *state,
108                                    bool nonblock);
109
110 static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state);
111
112 static int amdgpu_dm_atomic_check(struct drm_device *dev,
113                                   struct drm_atomic_state *state);
114
115
116
117
118 static const enum drm_plane_type dm_plane_type_default[AMDGPU_MAX_PLANES] = {
119         DRM_PLANE_TYPE_PRIMARY,
120         DRM_PLANE_TYPE_PRIMARY,
121         DRM_PLANE_TYPE_PRIMARY,
122         DRM_PLANE_TYPE_PRIMARY,
123         DRM_PLANE_TYPE_PRIMARY,
124         DRM_PLANE_TYPE_PRIMARY,
125 };
126
127 static const enum drm_plane_type dm_plane_type_carizzo[AMDGPU_MAX_PLANES] = {
128         DRM_PLANE_TYPE_PRIMARY,
129         DRM_PLANE_TYPE_PRIMARY,
130         DRM_PLANE_TYPE_PRIMARY,
131         DRM_PLANE_TYPE_OVERLAY,/* YUV Capable Underlay */
132 };
133
134 static const enum drm_plane_type dm_plane_type_stoney[AMDGPU_MAX_PLANES] = {
135         DRM_PLANE_TYPE_PRIMARY,
136         DRM_PLANE_TYPE_PRIMARY,
137         DRM_PLANE_TYPE_OVERLAY, /* YUV Capable Underlay */
138 };
139
140 /*
141  * dm_vblank_get_counter
142  *
143  * @brief
144  * Get counter for number of vertical blanks
145  *
146  * @param
147  * struct amdgpu_device *adev - [in] desired amdgpu device
148  * int disp_idx - [in] which CRTC to get the counter from
149  *
150  * @return
151  * Counter for vertical blanks
152  */
153 static u32 dm_vblank_get_counter(struct amdgpu_device *adev, int crtc)
154 {
155         if (crtc >= adev->mode_info.num_crtc)
156                 return 0;
157         else {
158                 struct amdgpu_crtc *acrtc = adev->mode_info.crtcs[crtc];
159                 struct dm_crtc_state *acrtc_state = to_dm_crtc_state(
160                                 acrtc->base.state);
161
162
163                 if (acrtc_state->stream == NULL) {
164                         DRM_ERROR("dc_stream_state is NULL for crtc '%d'!\n",
165                                   crtc);
166                         return 0;
167                 }
168
169                 return dc_stream_get_vblank_counter(acrtc_state->stream);
170         }
171 }
172
173 static int dm_crtc_get_scanoutpos(struct amdgpu_device *adev, int crtc,
174                                   u32 *vbl, u32 *position)
175 {
176         uint32_t v_blank_start, v_blank_end, h_position, v_position;
177
178         if ((crtc < 0) || (crtc >= adev->mode_info.num_crtc))
179                 return -EINVAL;
180         else {
181                 struct amdgpu_crtc *acrtc = adev->mode_info.crtcs[crtc];
182                 struct dm_crtc_state *acrtc_state = to_dm_crtc_state(
183                                                 acrtc->base.state);
184
185                 if (acrtc_state->stream ==  NULL) {
186                         DRM_ERROR("dc_stream_state is NULL for crtc '%d'!\n",
187                                   crtc);
188                         return 0;
189                 }
190
191                 /*
192                  * TODO rework base driver to use values directly.
193                  * for now parse it back into reg-format
194                  */
195                 dc_stream_get_scanoutpos(acrtc_state->stream,
196                                          &v_blank_start,
197                                          &v_blank_end,
198                                          &h_position,
199                                          &v_position);
200
201                 *position = v_position | (h_position << 16);
202                 *vbl = v_blank_start | (v_blank_end << 16);
203         }
204
205         return 0;
206 }
207
208 static bool dm_is_idle(void *handle)
209 {
210         /* XXX todo */
211         return true;
212 }
213
214 static int dm_wait_for_idle(void *handle)
215 {
216         /* XXX todo */
217         return 0;
218 }
219
220 static bool dm_check_soft_reset(void *handle)
221 {
222         return false;
223 }
224
225 static int dm_soft_reset(void *handle)
226 {
227         /* XXX todo */
228         return 0;
229 }
230
231 static struct amdgpu_crtc *
232 get_crtc_by_otg_inst(struct amdgpu_device *adev,
233                      int otg_inst)
234 {
235         struct drm_device *dev = adev->ddev;
236         struct drm_crtc *crtc;
237         struct amdgpu_crtc *amdgpu_crtc;
238
239         /*
240          * following if is check inherited from both functions where this one is
241          * used now. Need to be checked why it could happen.
242          */
243         if (otg_inst == -1) {
244                 WARN_ON(1);
245                 return adev->mode_info.crtcs[0];
246         }
247
248         list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
249                 amdgpu_crtc = to_amdgpu_crtc(crtc);
250
251                 if (amdgpu_crtc->otg_inst == otg_inst)
252                         return amdgpu_crtc;
253         }
254
255         return NULL;
256 }
257
258 static void dm_pflip_high_irq(void *interrupt_params)
259 {
260         struct amdgpu_crtc *amdgpu_crtc;
261         struct common_irq_params *irq_params = interrupt_params;
262         struct amdgpu_device *adev = irq_params->adev;
263         unsigned long flags;
264
265         amdgpu_crtc = get_crtc_by_otg_inst(adev, irq_params->irq_src - IRQ_TYPE_PFLIP);
266
267         /* IRQ could occur when in initial stage */
268         /*TODO work and BO cleanup */
269         if (amdgpu_crtc == NULL) {
270                 DRM_DEBUG_DRIVER("CRTC is null, returning.\n");
271                 return;
272         }
273
274         spin_lock_irqsave(&adev->ddev->event_lock, flags);
275
276         if (amdgpu_crtc->pflip_status != AMDGPU_FLIP_SUBMITTED){
277                 DRM_DEBUG_DRIVER("amdgpu_crtc->pflip_status = %d !=AMDGPU_FLIP_SUBMITTED(%d) on crtc:%d[%p] \n",
278                                                  amdgpu_crtc->pflip_status,
279                                                  AMDGPU_FLIP_SUBMITTED,
280                                                  amdgpu_crtc->crtc_id,
281                                                  amdgpu_crtc);
282                 spin_unlock_irqrestore(&adev->ddev->event_lock, flags);
283                 return;
284         }
285
286
287         /* wakeup usersapce */
288         if (amdgpu_crtc->event) {
289                 /* Update to correct count/ts if racing with vblank irq */
290                 drm_crtc_accurate_vblank_count(&amdgpu_crtc->base);
291
292                 drm_crtc_send_vblank_event(&amdgpu_crtc->base, amdgpu_crtc->event);
293
294                 /* page flip completed. clean up */
295                 amdgpu_crtc->event = NULL;
296
297         } else
298                 WARN_ON(1);
299
300         amdgpu_crtc->pflip_status = AMDGPU_FLIP_NONE;
301         spin_unlock_irqrestore(&adev->ddev->event_lock, flags);
302
303         DRM_DEBUG_DRIVER("%s - crtc :%d[%p], pflip_stat:AMDGPU_FLIP_NONE\n",
304                                         __func__, amdgpu_crtc->crtc_id, amdgpu_crtc);
305
306         drm_crtc_vblank_put(&amdgpu_crtc->base);
307 }
308
309 static void dm_crtc_high_irq(void *interrupt_params)
310 {
311         struct common_irq_params *irq_params = interrupt_params;
312         struct amdgpu_device *adev = irq_params->adev;
313         uint8_t crtc_index = 0;
314         struct amdgpu_crtc *acrtc;
315
316         acrtc = get_crtc_by_otg_inst(adev, irq_params->irq_src - IRQ_TYPE_VBLANK);
317
318         if (acrtc)
319                 crtc_index = acrtc->crtc_id;
320
321         drm_handle_vblank(adev->ddev, crtc_index);
322 }
323
324 static int dm_set_clockgating_state(void *handle,
325                   enum amd_clockgating_state state)
326 {
327         return 0;
328 }
329
330 static int dm_set_powergating_state(void *handle,
331                   enum amd_powergating_state state)
332 {
333         return 0;
334 }
335
336 /* Prototypes of private functions */
337 static int dm_early_init(void* handle);
338
339 static void hotplug_notify_work_func(struct work_struct *work)
340 {
341         struct amdgpu_display_manager *dm = container_of(work, struct amdgpu_display_manager, mst_hotplug_work);
342         struct drm_device *dev = dm->ddev;
343
344         drm_kms_helper_hotplug_event(dev);
345 }
346
347 #if defined(CONFIG_DRM_AMD_DC_FBC)
348 #include "dal_asic_id.h"
349 /* Allocate memory for FBC compressed data  */
350 /* TODO: Dynamic allocation */
351 #define AMDGPU_FBC_SIZE    (3840 * 2160 * 4)
352
353 static void amdgpu_dm_initialize_fbc(struct amdgpu_device *adev)
354 {
355         int r;
356         struct dm_comressor_info *compressor = &adev->dm.compressor;
357
358         if (!compressor->bo_ptr) {
359                 r = amdgpu_bo_create_kernel(adev, AMDGPU_FBC_SIZE, PAGE_SIZE,
360                                 AMDGPU_GEM_DOMAIN_VRAM, &compressor->bo_ptr,
361                                 &compressor->gpu_addr, &compressor->cpu_addr);
362
363                 if (r)
364                         DRM_ERROR("DM: Failed to initialize fbc\n");
365         }
366
367 }
368 #endif
369
370
371 /* Init display KMS
372  *
373  * Returns 0 on success
374  */
375 static int amdgpu_dm_init(struct amdgpu_device *adev)
376 {
377         struct dc_init_data init_data;
378         adev->dm.ddev = adev->ddev;
379         adev->dm.adev = adev;
380
381         /* Zero all the fields */
382         memset(&init_data, 0, sizeof(init_data));
383
384         /* initialize DAL's lock (for SYNC context use) */
385         spin_lock_init(&adev->dm.dal_lock);
386
387         /* initialize DAL's mutex */
388         mutex_init(&adev->dm.dal_mutex);
389
390         if(amdgpu_dm_irq_init(adev)) {
391                 DRM_ERROR("amdgpu: failed to initialize DM IRQ support.\n");
392                 goto error;
393         }
394
395         init_data.asic_id.chip_family = adev->family;
396
397         init_data.asic_id.pci_revision_id = adev->rev_id;
398         init_data.asic_id.hw_internal_rev = adev->external_rev_id;
399
400         init_data.asic_id.vram_width = adev->mc.vram_width;
401         /* TODO: initialize init_data.asic_id.vram_type here!!!! */
402         init_data.asic_id.atombios_base_address =
403                 adev->mode_info.atom_context->bios;
404
405         init_data.driver = adev;
406
407         adev->dm.cgs_device = amdgpu_cgs_create_device(adev);
408
409         if (!adev->dm.cgs_device) {
410                 DRM_ERROR("amdgpu: failed to create cgs device.\n");
411                 goto error;
412         }
413
414         init_data.cgs_device = adev->dm.cgs_device;
415
416         adev->dm.dal = NULL;
417
418         init_data.dce_environment = DCE_ENV_PRODUCTION_DRV;
419
420         if (amdgpu_dc_log)
421                 init_data.log_mask = DC_DEFAULT_LOG_MASK;
422         else
423                 init_data.log_mask = DC_MIN_LOG_MASK;
424
425 #if defined(CONFIG_DRM_AMD_DC_FBC)
426         if (adev->family == FAMILY_CZ)
427                 amdgpu_dm_initialize_fbc(adev);
428         init_data.fbc_gpu_addr = adev->dm.compressor.gpu_addr;
429 #endif
430         /* Display Core create. */
431         adev->dm.dc = dc_create(&init_data);
432
433         if (adev->dm.dc) {
434                 DRM_INFO("Display Core initialized with v%s!\n", DC_VER);
435         } else {
436                 DRM_INFO("Display Core failed to initialize with v%s!\n", DC_VER);
437                 goto error;
438         }
439
440         INIT_WORK(&adev->dm.mst_hotplug_work, hotplug_notify_work_func);
441
442         adev->dm.freesync_module = mod_freesync_create(adev->dm.dc);
443         if (!adev->dm.freesync_module) {
444                 DRM_ERROR(
445                 "amdgpu: failed to initialize freesync_module.\n");
446         } else
447                 DRM_DEBUG_DRIVER("amdgpu: freesync_module init done %p.\n",
448                                 adev->dm.freesync_module);
449
450         if (amdgpu_dm_initialize_drm_device(adev)) {
451                 DRM_ERROR(
452                 "amdgpu: failed to initialize sw for display support.\n");
453                 goto error;
454         }
455
456         /* Update the actual used number of crtc */
457         adev->mode_info.num_crtc = adev->dm.display_indexes_num;
458
459         /* TODO: Add_display_info? */
460
461         /* TODO use dynamic cursor width */
462         adev->ddev->mode_config.cursor_width = adev->dm.dc->caps.max_cursor_size;
463         adev->ddev->mode_config.cursor_height = adev->dm.dc->caps.max_cursor_size;
464
465         if (drm_vblank_init(adev->ddev, adev->dm.display_indexes_num)) {
466                 DRM_ERROR(
467                 "amdgpu: failed to initialize sw for display support.\n");
468                 goto error;
469         }
470
471         DRM_DEBUG_DRIVER("KMS initialized.\n");
472
473         return 0;
474 error:
475         amdgpu_dm_fini(adev);
476
477         return -1;
478 }
479
480 static void amdgpu_dm_fini(struct amdgpu_device *adev)
481 {
482         amdgpu_dm_destroy_drm_device(&adev->dm);
483         /*
484          * TODO: pageflip, vlank interrupt
485          *
486          * amdgpu_dm_irq_fini(adev);
487          */
488
489         if (adev->dm.cgs_device) {
490                 amdgpu_cgs_destroy_device(adev->dm.cgs_device);
491                 adev->dm.cgs_device = NULL;
492         }
493         if (adev->dm.freesync_module) {
494                 mod_freesync_destroy(adev->dm.freesync_module);
495                 adev->dm.freesync_module = NULL;
496         }
497         /* DC Destroy TODO: Replace destroy DAL */
498         if (adev->dm.dc)
499                 dc_destroy(&adev->dm.dc);
500         return;
501 }
502
503 static int dm_sw_init(void *handle)
504 {
505         return 0;
506 }
507
508 static int dm_sw_fini(void *handle)
509 {
510         return 0;
511 }
512
513 static int detect_mst_link_for_all_connectors(struct drm_device *dev)
514 {
515         struct amdgpu_dm_connector *aconnector;
516         struct drm_connector *connector;
517         int ret = 0;
518
519         drm_modeset_lock(&dev->mode_config.connection_mutex, NULL);
520
521         list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
522                 aconnector = to_amdgpu_dm_connector(connector);
523                 if (aconnector->dc_link->type == dc_connection_mst_branch &&
524                     aconnector->mst_mgr.aux) {
525                         DRM_DEBUG_DRIVER("DM_MST: starting TM on aconnector: %p [id: %d]\n",
526                                         aconnector, aconnector->base.base.id);
527
528                         ret = drm_dp_mst_topology_mgr_set_mst(&aconnector->mst_mgr, true);
529                         if (ret < 0) {
530                                 DRM_ERROR("DM_MST: Failed to start MST\n");
531                                 ((struct dc_link *)aconnector->dc_link)->type = dc_connection_single;
532                                 return ret;
533                                 }
534                         }
535         }
536
537         drm_modeset_unlock(&dev->mode_config.connection_mutex);
538         return ret;
539 }
540
541 static int dm_late_init(void *handle)
542 {
543         struct drm_device *dev = ((struct amdgpu_device *)handle)->ddev;
544
545         return detect_mst_link_for_all_connectors(dev);
546 }
547
548 static void s3_handle_mst(struct drm_device *dev, bool suspend)
549 {
550         struct amdgpu_dm_connector *aconnector;
551         struct drm_connector *connector;
552
553         drm_modeset_lock(&dev->mode_config.connection_mutex, NULL);
554
555         list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
556                    aconnector = to_amdgpu_dm_connector(connector);
557                    if (aconnector->dc_link->type == dc_connection_mst_branch &&
558                                    !aconnector->mst_port) {
559
560                            if (suspend)
561                                    drm_dp_mst_topology_mgr_suspend(&aconnector->mst_mgr);
562                            else
563                                    drm_dp_mst_topology_mgr_resume(&aconnector->mst_mgr);
564                    }
565         }
566
567         drm_modeset_unlock(&dev->mode_config.connection_mutex);
568 }
569
570 static int dm_hw_init(void *handle)
571 {
572         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
573         /* Create DAL display manager */
574         amdgpu_dm_init(adev);
575         amdgpu_dm_hpd_init(adev);
576
577         return 0;
578 }
579
580 static int dm_hw_fini(void *handle)
581 {
582         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
583
584         amdgpu_dm_hpd_fini(adev);
585
586         amdgpu_dm_irq_fini(adev);
587         amdgpu_dm_fini(adev);
588         return 0;
589 }
590
591 static int dm_suspend(void *handle)
592 {
593         struct amdgpu_device *adev = handle;
594         struct amdgpu_display_manager *dm = &adev->dm;
595         int ret = 0;
596
597         s3_handle_mst(adev->ddev, true);
598
599         amdgpu_dm_irq_suspend(adev);
600
601         WARN_ON(adev->dm.cached_state);
602         adev->dm.cached_state = drm_atomic_helper_suspend(adev->ddev);
603
604         dc_set_power_state(dm->dc, DC_ACPI_CM_POWER_STATE_D3);
605
606         return ret;
607 }
608
609 static struct amdgpu_dm_connector *
610 amdgpu_dm_find_first_crtc_matching_connector(struct drm_atomic_state *state,
611                                              struct drm_crtc *crtc)
612 {
613         uint32_t i;
614         struct drm_connector_state *new_con_state;
615         struct drm_connector *connector;
616         struct drm_crtc *crtc_from_state;
617
618         for_each_new_connector_in_state(state, connector, new_con_state, i) {
619                 crtc_from_state = new_con_state->crtc;
620
621                 if (crtc_from_state == crtc)
622                         return to_amdgpu_dm_connector(connector);
623         }
624
625         return NULL;
626 }
627
628 static int dm_resume(void *handle)
629 {
630         struct amdgpu_device *adev = handle;
631         struct amdgpu_display_manager *dm = &adev->dm;
632         int ret = 0;
633
634         /* power on hardware */
635         dc_set_power_state(dm->dc, DC_ACPI_CM_POWER_STATE_D0);
636
637         ret = amdgpu_dm_display_resume(adev);
638         return ret;
639 }
640
641 int amdgpu_dm_display_resume(struct amdgpu_device *adev)
642 {
643         struct drm_device *ddev = adev->ddev;
644         struct amdgpu_display_manager *dm = &adev->dm;
645         struct amdgpu_dm_connector *aconnector;
646         struct drm_connector *connector;
647         struct drm_crtc *crtc;
648         struct drm_crtc_state *new_crtc_state;
649         struct dm_crtc_state *dm_new_crtc_state;
650         struct drm_plane *plane;
651         struct drm_plane_state *new_plane_state;
652         struct dm_plane_state *dm_new_plane_state;
653
654         int ret = 0;
655         int i;
656
657         /* program HPD filter */
658         dc_resume(dm->dc);
659
660         /* On resume we need to  rewrite the MSTM control bits to enamble MST*/
661         s3_handle_mst(ddev, false);
662
663         /*
664          * early enable HPD Rx IRQ, should be done before set mode as short
665          * pulse interrupts are used for MST
666          */
667         amdgpu_dm_irq_resume_early(adev);
668
669         /* Do detection*/
670         list_for_each_entry(connector,
671                         &ddev->mode_config.connector_list, head) {
672                 aconnector = to_amdgpu_dm_connector(connector);
673
674                 /*
675                  * this is the case when traversing through already created
676                  * MST connectors, should be skipped
677                  */
678                 if (aconnector->mst_port)
679                         continue;
680
681                 mutex_lock(&aconnector->hpd_lock);
682                 dc_link_detect(aconnector->dc_link, DETECT_REASON_HPD);
683
684                 if (aconnector->fake_enable && aconnector->dc_link->local_sink)
685                         aconnector->fake_enable = false;
686
687                 aconnector->dc_sink = NULL;
688                 amdgpu_dm_update_connector_after_detect(aconnector);
689                 mutex_unlock(&aconnector->hpd_lock);
690         }
691
692         /* Force mode set in atomic comit */
693         for_each_new_crtc_in_state(adev->dm.cached_state, crtc, new_crtc_state, i)
694                 new_crtc_state->active_changed = true;
695
696         /*
697          * atomic_check is expected to create the dc states. We need to release
698          * them here, since they were duplicated as part of the suspend
699          * procedure.
700          */
701         for_each_new_crtc_in_state(adev->dm.cached_state, crtc, new_crtc_state, i) {
702                 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
703                 if (dm_new_crtc_state->stream) {
704                         WARN_ON(kref_read(&dm_new_crtc_state->stream->refcount) > 1);
705                         dc_stream_release(dm_new_crtc_state->stream);
706                         dm_new_crtc_state->stream = NULL;
707                 }
708         }
709
710         for_each_new_plane_in_state(adev->dm.cached_state, plane, new_plane_state, i) {
711                 dm_new_plane_state = to_dm_plane_state(new_plane_state);
712                 if (dm_new_plane_state->dc_state) {
713                         WARN_ON(kref_read(&dm_new_plane_state->dc_state->refcount) > 1);
714                         dc_plane_state_release(dm_new_plane_state->dc_state);
715                         dm_new_plane_state->dc_state = NULL;
716                 }
717         }
718
719         ret = drm_atomic_helper_resume(ddev, adev->dm.cached_state);
720
721         adev->dm.cached_state = NULL;
722
723         amdgpu_dm_irq_resume_late(adev);
724
725         return ret;
726 }
727
728 static const struct amd_ip_funcs amdgpu_dm_funcs = {
729         .name = "dm",
730         .early_init = dm_early_init,
731         .late_init = dm_late_init,
732         .sw_init = dm_sw_init,
733         .sw_fini = dm_sw_fini,
734         .hw_init = dm_hw_init,
735         .hw_fini = dm_hw_fini,
736         .suspend = dm_suspend,
737         .resume = dm_resume,
738         .is_idle = dm_is_idle,
739         .wait_for_idle = dm_wait_for_idle,
740         .check_soft_reset = dm_check_soft_reset,
741         .soft_reset = dm_soft_reset,
742         .set_clockgating_state = dm_set_clockgating_state,
743         .set_powergating_state = dm_set_powergating_state,
744 };
745
746 const struct amdgpu_ip_block_version dm_ip_block =
747 {
748         .type = AMD_IP_BLOCK_TYPE_DCE,
749         .major = 1,
750         .minor = 0,
751         .rev = 0,
752         .funcs = &amdgpu_dm_funcs,
753 };
754
755
756 static struct drm_atomic_state *
757 dm_atomic_state_alloc(struct drm_device *dev)
758 {
759         struct dm_atomic_state *state = kzalloc(sizeof(*state), GFP_KERNEL);
760
761         if (!state)
762                 return NULL;
763
764         if (drm_atomic_state_init(dev, &state->base) < 0)
765                 goto fail;
766
767         return &state->base;
768
769 fail:
770         kfree(state);
771         return NULL;
772 }
773
774 static void
775 dm_atomic_state_clear(struct drm_atomic_state *state)
776 {
777         struct dm_atomic_state *dm_state = to_dm_atomic_state(state);
778
779         if (dm_state->context) {
780                 dc_release_state(dm_state->context);
781                 dm_state->context = NULL;
782         }
783
784         drm_atomic_state_default_clear(state);
785 }
786
787 static void
788 dm_atomic_state_alloc_free(struct drm_atomic_state *state)
789 {
790         struct dm_atomic_state *dm_state = to_dm_atomic_state(state);
791         drm_atomic_state_default_release(state);
792         kfree(dm_state);
793 }
794
795 static const struct drm_mode_config_funcs amdgpu_dm_mode_funcs = {
796         .fb_create = amdgpu_user_framebuffer_create,
797         .output_poll_changed = drm_fb_helper_output_poll_changed,
798         .atomic_check = amdgpu_dm_atomic_check,
799         .atomic_commit = amdgpu_dm_atomic_commit,
800         .atomic_state_alloc = dm_atomic_state_alloc,
801         .atomic_state_clear = dm_atomic_state_clear,
802         .atomic_state_free = dm_atomic_state_alloc_free
803 };
804
805 static struct drm_mode_config_helper_funcs amdgpu_dm_mode_config_helperfuncs = {
806         .atomic_commit_tail = amdgpu_dm_atomic_commit_tail
807 };
808
809 static void
810 amdgpu_dm_update_connector_after_detect(struct amdgpu_dm_connector *aconnector)
811 {
812         struct drm_connector *connector = &aconnector->base;
813         struct drm_device *dev = connector->dev;
814         struct dc_sink *sink;
815
816         /* MST handled by drm_mst framework */
817         if (aconnector->mst_mgr.mst_state == true)
818                 return;
819
820
821         sink = aconnector->dc_link->local_sink;
822
823         /* Edid mgmt connector gets first update only in mode_valid hook and then
824          * the connector sink is set to either fake or physical sink depends on link status.
825          * don't do it here if u are during boot
826          */
827         if (aconnector->base.force != DRM_FORCE_UNSPECIFIED
828                         && aconnector->dc_em_sink) {
829
830                 /* For S3 resume with headless use eml_sink to fake stream
831                  * because on resume connecotr->sink is set ti NULL
832                  */
833                 mutex_lock(&dev->mode_config.mutex);
834
835                 if (sink) {
836                         if (aconnector->dc_sink) {
837                                 amdgpu_dm_remove_sink_from_freesync_module(
838                                                                 connector);
839                                 /* retain and release bellow are used for
840                                  * bump up refcount for sink because the link don't point
841                                  * to it anymore after disconnect so on next crtc to connector
842                                  * reshuffle by UMD we will get into unwanted dc_sink release
843                                  */
844                                 if (aconnector->dc_sink != aconnector->dc_em_sink)
845                                         dc_sink_release(aconnector->dc_sink);
846                         }
847                         aconnector->dc_sink = sink;
848                         amdgpu_dm_add_sink_to_freesync_module(
849                                                 connector, aconnector->edid);
850                 } else {
851                         amdgpu_dm_remove_sink_from_freesync_module(connector);
852                         if (!aconnector->dc_sink)
853                                 aconnector->dc_sink = aconnector->dc_em_sink;
854                         else if (aconnector->dc_sink != aconnector->dc_em_sink)
855                                 dc_sink_retain(aconnector->dc_sink);
856                 }
857
858                 mutex_unlock(&dev->mode_config.mutex);
859                 return;
860         }
861
862         /*
863          * TODO: temporary guard to look for proper fix
864          * if this sink is MST sink, we should not do anything
865          */
866         if (sink && sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT_MST)
867                 return;
868
869         if (aconnector->dc_sink == sink) {
870                 /* We got a DP short pulse (Link Loss, DP CTS, etc...).
871                  * Do nothing!! */
872                 DRM_DEBUG_DRIVER("DCHPD: connector_id=%d: dc_sink didn't change.\n",
873                                 aconnector->connector_id);
874                 return;
875         }
876
877         DRM_DEBUG_DRIVER("DCHPD: connector_id=%d: Old sink=%p New sink=%p\n",
878                 aconnector->connector_id, aconnector->dc_sink, sink);
879
880         mutex_lock(&dev->mode_config.mutex);
881
882         /* 1. Update status of the drm connector
883          * 2. Send an event and let userspace tell us what to do */
884         if (sink) {
885                 /* TODO: check if we still need the S3 mode update workaround.
886                  * If yes, put it here. */
887                 if (aconnector->dc_sink)
888                         amdgpu_dm_remove_sink_from_freesync_module(
889                                                         connector);
890
891                 aconnector->dc_sink = sink;
892                 if (sink->dc_edid.length == 0) {
893                         aconnector->edid = NULL;
894                 } else {
895                         aconnector->edid =
896                                 (struct edid *) sink->dc_edid.raw_edid;
897
898
899                         drm_mode_connector_update_edid_property(connector,
900                                         aconnector->edid);
901                 }
902                 amdgpu_dm_add_sink_to_freesync_module(connector, aconnector->edid);
903
904         } else {
905                 amdgpu_dm_remove_sink_from_freesync_module(connector);
906                 drm_mode_connector_update_edid_property(connector, NULL);
907                 aconnector->num_modes = 0;
908                 aconnector->dc_sink = NULL;
909         }
910
911         mutex_unlock(&dev->mode_config.mutex);
912 }
913
914 static void handle_hpd_irq(void *param)
915 {
916         struct amdgpu_dm_connector *aconnector = (struct amdgpu_dm_connector *)param;
917         struct drm_connector *connector = &aconnector->base;
918         struct drm_device *dev = connector->dev;
919
920         /* In case of failure or MST no need to update connector status or notify the OS
921          * since (for MST case) MST does this in it's own context.
922          */
923         mutex_lock(&aconnector->hpd_lock);
924
925         if (aconnector->fake_enable)
926                 aconnector->fake_enable = false;
927
928         if (dc_link_detect(aconnector->dc_link, DETECT_REASON_HPD)) {
929                 amdgpu_dm_update_connector_after_detect(aconnector);
930
931
932                 drm_modeset_lock_all(dev);
933                 dm_restore_drm_connector_state(dev, connector);
934                 drm_modeset_unlock_all(dev);
935
936                 if (aconnector->base.force == DRM_FORCE_UNSPECIFIED)
937                         drm_kms_helper_hotplug_event(dev);
938         }
939         mutex_unlock(&aconnector->hpd_lock);
940
941 }
942
943 static void dm_handle_hpd_rx_irq(struct amdgpu_dm_connector *aconnector)
944 {
945         uint8_t esi[DP_PSR_ERROR_STATUS - DP_SINK_COUNT_ESI] = { 0 };
946         uint8_t dret;
947         bool new_irq_handled = false;
948         int dpcd_addr;
949         int dpcd_bytes_to_read;
950
951         const int max_process_count = 30;
952         int process_count = 0;
953
954         const struct dc_link_status *link_status = dc_link_get_status(aconnector->dc_link);
955
956         if (link_status->dpcd_caps->dpcd_rev.raw < 0x12) {
957                 dpcd_bytes_to_read = DP_LANE0_1_STATUS - DP_SINK_COUNT;
958                 /* DPCD 0x200 - 0x201 for downstream IRQ */
959                 dpcd_addr = DP_SINK_COUNT;
960         } else {
961                 dpcd_bytes_to_read = DP_PSR_ERROR_STATUS - DP_SINK_COUNT_ESI;
962                 /* DPCD 0x2002 - 0x2005 for downstream IRQ */
963                 dpcd_addr = DP_SINK_COUNT_ESI;
964         }
965
966         dret = drm_dp_dpcd_read(
967                 &aconnector->dm_dp_aux.aux,
968                 dpcd_addr,
969                 esi,
970                 dpcd_bytes_to_read);
971
972         while (dret == dpcd_bytes_to_read &&
973                 process_count < max_process_count) {
974                 uint8_t retry;
975                 dret = 0;
976
977                 process_count++;
978
979                 DRM_DEBUG_DRIVER("ESI %02x %02x %02x\n", esi[0], esi[1], esi[2]);
980                 /* handle HPD short pulse irq */
981                 if (aconnector->mst_mgr.mst_state)
982                         drm_dp_mst_hpd_irq(
983                                 &aconnector->mst_mgr,
984                                 esi,
985                                 &new_irq_handled);
986
987                 if (new_irq_handled) {
988                         /* ACK at DPCD to notify down stream */
989                         const int ack_dpcd_bytes_to_write =
990                                 dpcd_bytes_to_read - 1;
991
992                         for (retry = 0; retry < 3; retry++) {
993                                 uint8_t wret;
994
995                                 wret = drm_dp_dpcd_write(
996                                         &aconnector->dm_dp_aux.aux,
997                                         dpcd_addr + 1,
998                                         &esi[1],
999                                         ack_dpcd_bytes_to_write);
1000                                 if (wret == ack_dpcd_bytes_to_write)
1001                                         break;
1002                         }
1003
1004                         /* check if there is new irq to be handle */
1005                         dret = drm_dp_dpcd_read(
1006                                 &aconnector->dm_dp_aux.aux,
1007                                 dpcd_addr,
1008                                 esi,
1009                                 dpcd_bytes_to_read);
1010
1011                         new_irq_handled = false;
1012                 } else {
1013                         break;
1014                 }
1015         }
1016
1017         if (process_count == max_process_count)
1018                 DRM_DEBUG_DRIVER("Loop exceeded max iterations\n");
1019 }
1020
1021 static void handle_hpd_rx_irq(void *param)
1022 {
1023         struct amdgpu_dm_connector *aconnector = (struct amdgpu_dm_connector *)param;
1024         struct drm_connector *connector = &aconnector->base;
1025         struct drm_device *dev = connector->dev;
1026         struct dc_link *dc_link = aconnector->dc_link;
1027         bool is_mst_root_connector = aconnector->mst_mgr.mst_state;
1028
1029         /* TODO:Temporary add mutex to protect hpd interrupt not have a gpio
1030          * conflict, after implement i2c helper, this mutex should be
1031          * retired.
1032          */
1033         if (dc_link->type != dc_connection_mst_branch)
1034                 mutex_lock(&aconnector->hpd_lock);
1035
1036         if (dc_link_handle_hpd_rx_irq(dc_link, NULL) &&
1037                         !is_mst_root_connector) {
1038                 /* Downstream Port status changed. */
1039                 if (dc_link_detect(dc_link, DETECT_REASON_HPDRX)) {
1040                         amdgpu_dm_update_connector_after_detect(aconnector);
1041
1042
1043                         drm_modeset_lock_all(dev);
1044                         dm_restore_drm_connector_state(dev, connector);
1045                         drm_modeset_unlock_all(dev);
1046
1047                         drm_kms_helper_hotplug_event(dev);
1048                 }
1049         }
1050         if ((dc_link->cur_link_settings.lane_count != LANE_COUNT_UNKNOWN) ||
1051             (dc_link->type == dc_connection_mst_branch))
1052                 dm_handle_hpd_rx_irq(aconnector);
1053
1054         if (dc_link->type != dc_connection_mst_branch)
1055                 mutex_unlock(&aconnector->hpd_lock);
1056 }
1057
1058 static void register_hpd_handlers(struct amdgpu_device *adev)
1059 {
1060         struct drm_device *dev = adev->ddev;
1061         struct drm_connector *connector;
1062         struct amdgpu_dm_connector *aconnector;
1063         const struct dc_link *dc_link;
1064         struct dc_interrupt_params int_params = {0};
1065
1066         int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
1067         int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
1068
1069         list_for_each_entry(connector,
1070                         &dev->mode_config.connector_list, head) {
1071
1072                 aconnector = to_amdgpu_dm_connector(connector);
1073                 dc_link = aconnector->dc_link;
1074
1075                 if (DC_IRQ_SOURCE_INVALID != dc_link->irq_source_hpd) {
1076                         int_params.int_context = INTERRUPT_LOW_IRQ_CONTEXT;
1077                         int_params.irq_source = dc_link->irq_source_hpd;
1078
1079                         amdgpu_dm_irq_register_interrupt(adev, &int_params,
1080                                         handle_hpd_irq,
1081                                         (void *) aconnector);
1082                 }
1083
1084                 if (DC_IRQ_SOURCE_INVALID != dc_link->irq_source_hpd_rx) {
1085
1086                         /* Also register for DP short pulse (hpd_rx). */
1087                         int_params.int_context = INTERRUPT_LOW_IRQ_CONTEXT;
1088                         int_params.irq_source = dc_link->irq_source_hpd_rx;
1089
1090                         amdgpu_dm_irq_register_interrupt(adev, &int_params,
1091                                         handle_hpd_rx_irq,
1092                                         (void *) aconnector);
1093                 }
1094         }
1095 }
1096
1097 /* Register IRQ sources and initialize IRQ callbacks */
1098 static int dce110_register_irq_handlers(struct amdgpu_device *adev)
1099 {
1100         struct dc *dc = adev->dm.dc;
1101         struct common_irq_params *c_irq_params;
1102         struct dc_interrupt_params int_params = {0};
1103         int r;
1104         int i;
1105         unsigned client_id = AMDGPU_IH_CLIENTID_LEGACY;
1106
1107         if (adev->asic_type == CHIP_VEGA10 ||
1108             adev->asic_type == CHIP_RAVEN)
1109                 client_id = AMDGPU_IH_CLIENTID_DCE;
1110
1111         int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
1112         int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
1113
1114         /* Actions of amdgpu_irq_add_id():
1115          * 1. Register a set() function with base driver.
1116          *    Base driver will call set() function to enable/disable an
1117          *    interrupt in DC hardware.
1118          * 2. Register amdgpu_dm_irq_handler().
1119          *    Base driver will call amdgpu_dm_irq_handler() for ALL interrupts
1120          *    coming from DC hardware.
1121          *    amdgpu_dm_irq_handler() will re-direct the interrupt to DC
1122          *    for acknowledging and handling. */
1123
1124         /* Use VBLANK interrupt */
1125         for (i = VISLANDS30_IV_SRCID_D1_VERTICAL_INTERRUPT0; i <= VISLANDS30_IV_SRCID_D6_VERTICAL_INTERRUPT0; i++) {
1126                 r = amdgpu_irq_add_id(adev, client_id, i, &adev->crtc_irq);
1127                 if (r) {
1128                         DRM_ERROR("Failed to add crtc irq id!\n");
1129                         return r;
1130                 }
1131
1132                 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
1133                 int_params.irq_source =
1134                         dc_interrupt_to_irq_source(dc, i, 0);
1135
1136                 c_irq_params = &adev->dm.vblank_params[int_params.irq_source - DC_IRQ_SOURCE_VBLANK1];
1137
1138                 c_irq_params->adev = adev;
1139                 c_irq_params->irq_src = int_params.irq_source;
1140
1141                 amdgpu_dm_irq_register_interrupt(adev, &int_params,
1142                                 dm_crtc_high_irq, c_irq_params);
1143         }
1144
1145         /* Use GRPH_PFLIP interrupt */
1146         for (i = VISLANDS30_IV_SRCID_D1_GRPH_PFLIP;
1147                         i <= VISLANDS30_IV_SRCID_D6_GRPH_PFLIP; i += 2) {
1148                 r = amdgpu_irq_add_id(adev, client_id, i, &adev->pageflip_irq);
1149                 if (r) {
1150                         DRM_ERROR("Failed to add page flip irq id!\n");
1151                         return r;
1152                 }
1153
1154                 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
1155                 int_params.irq_source =
1156                         dc_interrupt_to_irq_source(dc, i, 0);
1157
1158                 c_irq_params = &adev->dm.pflip_params[int_params.irq_source - DC_IRQ_SOURCE_PFLIP_FIRST];
1159
1160                 c_irq_params->adev = adev;
1161                 c_irq_params->irq_src = int_params.irq_source;
1162
1163                 amdgpu_dm_irq_register_interrupt(adev, &int_params,
1164                                 dm_pflip_high_irq, c_irq_params);
1165
1166         }
1167
1168         /* HPD */
1169         r = amdgpu_irq_add_id(adev, client_id,
1170                         VISLANDS30_IV_SRCID_HOTPLUG_DETECT_A, &adev->hpd_irq);
1171         if (r) {
1172                 DRM_ERROR("Failed to add hpd irq id!\n");
1173                 return r;
1174         }
1175
1176         register_hpd_handlers(adev);
1177
1178         return 0;
1179 }
1180
1181 #if defined(CONFIG_DRM_AMD_DC_DCN1_0)
1182 /* Register IRQ sources and initialize IRQ callbacks */
1183 static int dcn10_register_irq_handlers(struct amdgpu_device *adev)
1184 {
1185         struct dc *dc = adev->dm.dc;
1186         struct common_irq_params *c_irq_params;
1187         struct dc_interrupt_params int_params = {0};
1188         int r;
1189         int i;
1190
1191         int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
1192         int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
1193
1194         /* Actions of amdgpu_irq_add_id():
1195          * 1. Register a set() function with base driver.
1196          *    Base driver will call set() function to enable/disable an
1197          *    interrupt in DC hardware.
1198          * 2. Register amdgpu_dm_irq_handler().
1199          *    Base driver will call amdgpu_dm_irq_handler() for ALL interrupts
1200          *    coming from DC hardware.
1201          *    amdgpu_dm_irq_handler() will re-direct the interrupt to DC
1202          *    for acknowledging and handling.
1203          * */
1204
1205         /* Use VSTARTUP interrupt */
1206         for (i = DCN_1_0__SRCID__DC_D1_OTG_VSTARTUP;
1207                         i <= DCN_1_0__SRCID__DC_D1_OTG_VSTARTUP + adev->mode_info.num_crtc - 1;
1208                         i++) {
1209                 r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_DCE, i, &adev->crtc_irq);
1210
1211                 if (r) {
1212                         DRM_ERROR("Failed to add crtc irq id!\n");
1213                         return r;
1214                 }
1215
1216                 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
1217                 int_params.irq_source =
1218                         dc_interrupt_to_irq_source(dc, i, 0);
1219
1220                 c_irq_params = &adev->dm.vblank_params[int_params.irq_source - DC_IRQ_SOURCE_VBLANK1];
1221
1222                 c_irq_params->adev = adev;
1223                 c_irq_params->irq_src = int_params.irq_source;
1224
1225                 amdgpu_dm_irq_register_interrupt(adev, &int_params,
1226                                 dm_crtc_high_irq, c_irq_params);
1227         }
1228
1229         /* Use GRPH_PFLIP interrupt */
1230         for (i = DCN_1_0__SRCID__HUBP0_FLIP_INTERRUPT;
1231                         i <= DCN_1_0__SRCID__HUBP0_FLIP_INTERRUPT + adev->mode_info.num_crtc - 1;
1232                         i++) {
1233                 r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_DCE, i, &adev->pageflip_irq);
1234                 if (r) {
1235                         DRM_ERROR("Failed to add page flip irq id!\n");
1236                         return r;
1237                 }
1238
1239                 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
1240                 int_params.irq_source =
1241                         dc_interrupt_to_irq_source(dc, i, 0);
1242
1243                 c_irq_params = &adev->dm.pflip_params[int_params.irq_source - DC_IRQ_SOURCE_PFLIP_FIRST];
1244
1245                 c_irq_params->adev = adev;
1246                 c_irq_params->irq_src = int_params.irq_source;
1247
1248                 amdgpu_dm_irq_register_interrupt(adev, &int_params,
1249                                 dm_pflip_high_irq, c_irq_params);
1250
1251         }
1252
1253         /* HPD */
1254         r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_DCE, DCN_1_0__SRCID__DC_HPD1_INT,
1255                         &adev->hpd_irq);
1256         if (r) {
1257                 DRM_ERROR("Failed to add hpd irq id!\n");
1258                 return r;
1259         }
1260
1261         register_hpd_handlers(adev);
1262
1263         return 0;
1264 }
1265 #endif
1266
1267 static int amdgpu_dm_mode_config_init(struct amdgpu_device *adev)
1268 {
1269         int r;
1270
1271         adev->mode_info.mode_config_initialized = true;
1272
1273         adev->ddev->mode_config.funcs = (void *)&amdgpu_dm_mode_funcs;
1274         adev->ddev->mode_config.helper_private = &amdgpu_dm_mode_config_helperfuncs;
1275
1276         adev->ddev->mode_config.max_width = 16384;
1277         adev->ddev->mode_config.max_height = 16384;
1278
1279         adev->ddev->mode_config.preferred_depth = 24;
1280         adev->ddev->mode_config.prefer_shadow = 1;
1281         /* indicate support of immediate flip */
1282         adev->ddev->mode_config.async_page_flip = true;
1283
1284         adev->ddev->mode_config.fb_base = adev->mc.aper_base;
1285
1286         r = amdgpu_modeset_create_props(adev);
1287         if (r)
1288                 return r;
1289
1290         return 0;
1291 }
1292
1293 #if defined(CONFIG_BACKLIGHT_CLASS_DEVICE) ||\
1294         defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE)
1295
1296 static int amdgpu_dm_backlight_update_status(struct backlight_device *bd)
1297 {
1298         struct amdgpu_display_manager *dm = bl_get_data(bd);
1299
1300         if (dc_link_set_backlight_level(dm->backlight_link,
1301                         bd->props.brightness, 0, 0))
1302                 return 0;
1303         else
1304                 return 1;
1305 }
1306
1307 static int amdgpu_dm_backlight_get_brightness(struct backlight_device *bd)
1308 {
1309         return bd->props.brightness;
1310 }
1311
1312 static const struct backlight_ops amdgpu_dm_backlight_ops = {
1313         .get_brightness = amdgpu_dm_backlight_get_brightness,
1314         .update_status  = amdgpu_dm_backlight_update_status,
1315 };
1316
1317 static void
1318 amdgpu_dm_register_backlight_device(struct amdgpu_display_manager *dm)
1319 {
1320         char bl_name[16];
1321         struct backlight_properties props = { 0 };
1322
1323         props.max_brightness = AMDGPU_MAX_BL_LEVEL;
1324         props.type = BACKLIGHT_RAW;
1325
1326         snprintf(bl_name, sizeof(bl_name), "amdgpu_bl%d",
1327                         dm->adev->ddev->primary->index);
1328
1329         dm->backlight_dev = backlight_device_register(bl_name,
1330                         dm->adev->ddev->dev,
1331                         dm,
1332                         &amdgpu_dm_backlight_ops,
1333                         &props);
1334
1335         if (IS_ERR(dm->backlight_dev))
1336                 DRM_ERROR("DM: Backlight registration failed!\n");
1337         else
1338                 DRM_DEBUG_DRIVER("DM: Registered Backlight device: %s\n", bl_name);
1339 }
1340
1341 #endif
1342
1343 /* In this architecture, the association
1344  * connector -> encoder -> crtc
1345  * id not really requried. The crtc and connector will hold the
1346  * display_index as an abstraction to use with DAL component
1347  *
1348  * Returns 0 on success
1349  */
1350 static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev)
1351 {
1352         struct amdgpu_display_manager *dm = &adev->dm;
1353         uint32_t i;
1354         struct amdgpu_dm_connector *aconnector = NULL;
1355         struct amdgpu_encoder *aencoder = NULL;
1356         struct amdgpu_mode_info *mode_info = &adev->mode_info;
1357         uint32_t link_cnt;
1358         unsigned long possible_crtcs;
1359
1360         link_cnt = dm->dc->caps.max_links;
1361         if (amdgpu_dm_mode_config_init(dm->adev)) {
1362                 DRM_ERROR("DM: Failed to initialize mode config\n");
1363                 return -1;
1364         }
1365
1366         for (i = 0; i < dm->dc->caps.max_planes; i++) {
1367                 struct amdgpu_plane *plane;
1368
1369                 plane = kzalloc(sizeof(struct amdgpu_plane), GFP_KERNEL);
1370                 mode_info->planes[i] = plane;
1371
1372                 if (!plane) {
1373                         DRM_ERROR("KMS: Failed to allocate plane\n");
1374                         goto fail;
1375                 }
1376                 plane->base.type = mode_info->plane_type[i];
1377
1378                 /*
1379                  * HACK: IGT tests expect that each plane can only have one
1380                  * one possible CRTC. For now, set one CRTC for each
1381                  * plane that is not an underlay, but still allow multiple
1382                  * CRTCs for underlay planes.
1383                  */
1384                 possible_crtcs = 1 << i;
1385                 if (i >= dm->dc->caps.max_streams)
1386                         possible_crtcs = 0xff;
1387
1388                 if (amdgpu_dm_plane_init(dm, mode_info->planes[i], possible_crtcs)) {
1389                         DRM_ERROR("KMS: Failed to initialize plane\n");
1390                         goto fail;
1391                 }
1392         }
1393
1394         for (i = 0; i < dm->dc->caps.max_streams; i++)
1395                 if (amdgpu_dm_crtc_init(dm, &mode_info->planes[i]->base, i)) {
1396                         DRM_ERROR("KMS: Failed to initialize crtc\n");
1397                         goto fail;
1398                 }
1399
1400         dm->display_indexes_num = dm->dc->caps.max_streams;
1401
1402         /* loops over all connectors on the board */
1403         for (i = 0; i < link_cnt; i++) {
1404
1405                 if (i > AMDGPU_DM_MAX_DISPLAY_INDEX) {
1406                         DRM_ERROR(
1407                                 "KMS: Cannot support more than %d display indexes\n",
1408                                         AMDGPU_DM_MAX_DISPLAY_INDEX);
1409                         continue;
1410                 }
1411
1412                 aconnector = kzalloc(sizeof(*aconnector), GFP_KERNEL);
1413                 if (!aconnector)
1414                         goto fail;
1415
1416                 aencoder = kzalloc(sizeof(*aencoder), GFP_KERNEL);
1417                 if (!aencoder)
1418                         goto fail;
1419
1420                 if (amdgpu_dm_encoder_init(dm->ddev, aencoder, i)) {
1421                         DRM_ERROR("KMS: Failed to initialize encoder\n");
1422                         goto fail;
1423                 }
1424
1425                 if (amdgpu_dm_connector_init(dm, aconnector, i, aencoder)) {
1426                         DRM_ERROR("KMS: Failed to initialize connector\n");
1427                         goto fail;
1428                 }
1429
1430                 if (dc_link_detect(dc_get_link_at_index(dm->dc, i),
1431                                 DETECT_REASON_BOOT))
1432                         amdgpu_dm_update_connector_after_detect(aconnector);
1433         }
1434
1435         /* Software is initialized. Now we can register interrupt handlers. */
1436         switch (adev->asic_type) {
1437         case CHIP_BONAIRE:
1438         case CHIP_HAWAII:
1439         case CHIP_KAVERI:
1440         case CHIP_KABINI:
1441         case CHIP_MULLINS:
1442         case CHIP_TONGA:
1443         case CHIP_FIJI:
1444         case CHIP_CARRIZO:
1445         case CHIP_STONEY:
1446         case CHIP_POLARIS11:
1447         case CHIP_POLARIS10:
1448         case CHIP_POLARIS12:
1449         case CHIP_VEGA10:
1450                 if (dce110_register_irq_handlers(dm->adev)) {
1451                         DRM_ERROR("DM: Failed to initialize IRQ\n");
1452                         goto fail;
1453                 }
1454                 break;
1455 #if defined(CONFIG_DRM_AMD_DC_DCN1_0)
1456         case CHIP_RAVEN:
1457                 if (dcn10_register_irq_handlers(dm->adev)) {
1458                         DRM_ERROR("DM: Failed to initialize IRQ\n");
1459                         goto fail;
1460                 }
1461                 /*
1462                  * Temporary disable until pplib/smu interaction is implemented
1463                  */
1464                 dm->dc->debug.disable_stutter = true;
1465                 break;
1466 #endif
1467         default:
1468                 DRM_ERROR("Usupported ASIC type: 0x%X\n", adev->asic_type);
1469                 goto fail;
1470         }
1471
1472         return 0;
1473 fail:
1474         kfree(aencoder);
1475         kfree(aconnector);
1476         for (i = 0; i < dm->dc->caps.max_planes; i++)
1477                 kfree(mode_info->planes[i]);
1478         return -1;
1479 }
1480
1481 static void amdgpu_dm_destroy_drm_device(struct amdgpu_display_manager *dm)
1482 {
1483         drm_mode_config_cleanup(dm->ddev);
1484         return;
1485 }
1486
1487 /******************************************************************************
1488  * amdgpu_display_funcs functions
1489  *****************************************************************************/
1490
1491 /**
1492  * dm_bandwidth_update - program display watermarks
1493  *
1494  * @adev: amdgpu_device pointer
1495  *
1496  * Calculate and program the display watermarks and line buffer allocation.
1497  */
1498 static void dm_bandwidth_update(struct amdgpu_device *adev)
1499 {
1500         /* TODO: implement later */
1501 }
1502
1503 static void dm_set_backlight_level(struct amdgpu_encoder *amdgpu_encoder,
1504                                      u8 level)
1505 {
1506         /* TODO: translate amdgpu_encoder to display_index and call DAL */
1507 }
1508
1509 static u8 dm_get_backlight_level(struct amdgpu_encoder *amdgpu_encoder)
1510 {
1511         /* TODO: translate amdgpu_encoder to display_index and call DAL */
1512         return 0;
1513 }
1514
1515 static int amdgpu_notify_freesync(struct drm_device *dev, void *data,
1516                                 struct drm_file *filp)
1517 {
1518         struct mod_freesync_params freesync_params;
1519         uint8_t num_streams;
1520         uint8_t i;
1521
1522         struct amdgpu_device *adev = dev->dev_private;
1523         int r = 0;
1524
1525         /* Get freesync enable flag from DRM */
1526
1527         num_streams = dc_get_current_stream_count(adev->dm.dc);
1528
1529         for (i = 0; i < num_streams; i++) {
1530                 struct dc_stream_state *stream;
1531                 stream = dc_get_stream_at_index(adev->dm.dc, i);
1532
1533                 mod_freesync_update_state(adev->dm.freesync_module,
1534                                           &stream, 1, &freesync_params);
1535         }
1536
1537         return r;
1538 }
1539
1540 static const struct amdgpu_display_funcs dm_display_funcs = {
1541         .bandwidth_update = dm_bandwidth_update, /* called unconditionally */
1542         .vblank_get_counter = dm_vblank_get_counter,/* called unconditionally */
1543         .vblank_wait = NULL,
1544         .backlight_set_level =
1545                 dm_set_backlight_level,/* called unconditionally */
1546         .backlight_get_level =
1547                 dm_get_backlight_level,/* called unconditionally */
1548         .hpd_sense = NULL,/* called unconditionally */
1549         .hpd_set_polarity = NULL, /* called unconditionally */
1550         .hpd_get_gpio_reg = NULL, /* VBIOS parsing. DAL does it. */
1551         .page_flip_get_scanoutpos =
1552                 dm_crtc_get_scanoutpos,/* called unconditionally */
1553         .add_encoder = NULL, /* VBIOS parsing. DAL does it. */
1554         .add_connector = NULL, /* VBIOS parsing. DAL does it. */
1555         .notify_freesync = amdgpu_notify_freesync,
1556
1557 };
1558
1559 #if defined(CONFIG_DEBUG_KERNEL_DC)
1560
1561 static ssize_t s3_debug_store(struct device *device,
1562                               struct device_attribute *attr,
1563                               const char *buf,
1564                               size_t count)
1565 {
1566         int ret;
1567         int s3_state;
1568         struct pci_dev *pdev = to_pci_dev(device);
1569         struct drm_device *drm_dev = pci_get_drvdata(pdev);
1570         struct amdgpu_device *adev = drm_dev->dev_private;
1571
1572         ret = kstrtoint(buf, 0, &s3_state);
1573
1574         if (ret == 0) {
1575                 if (s3_state) {
1576                         dm_resume(adev);
1577                         amdgpu_dm_display_resume(adev);
1578                         drm_kms_helper_hotplug_event(adev->ddev);
1579                 } else
1580                         dm_suspend(adev);
1581         }
1582
1583         return ret == 0 ? count : 0;
1584 }
1585
1586 DEVICE_ATTR_WO(s3_debug);
1587
1588 #endif
1589
1590 static int dm_early_init(void *handle)
1591 {
1592         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1593
1594         adev->ddev->driver->driver_features |= DRIVER_ATOMIC;
1595
1596         switch (adev->asic_type) {
1597         case CHIP_BONAIRE:
1598         case CHIP_HAWAII:
1599                 adev->mode_info.num_crtc = 6;
1600                 adev->mode_info.num_hpd = 6;
1601                 adev->mode_info.num_dig = 6;
1602                 adev->mode_info.plane_type = dm_plane_type_default;
1603                 break;
1604         case CHIP_KAVERI:
1605                 adev->mode_info.num_crtc = 4;
1606                 adev->mode_info.num_hpd = 6;
1607                 adev->mode_info.num_dig = 7;
1608                 adev->mode_info.plane_type = dm_plane_type_default;
1609                 break;
1610         case CHIP_KABINI:
1611         case CHIP_MULLINS:
1612                 adev->mode_info.num_crtc = 2;
1613                 adev->mode_info.num_hpd = 6;
1614                 adev->mode_info.num_dig = 6;
1615                 adev->mode_info.plane_type = dm_plane_type_default;
1616                 break;
1617         case CHIP_FIJI:
1618         case CHIP_TONGA:
1619                 adev->mode_info.num_crtc = 6;
1620                 adev->mode_info.num_hpd = 6;
1621                 adev->mode_info.num_dig = 7;
1622                 adev->mode_info.plane_type = dm_plane_type_default;
1623                 break;
1624         case CHIP_CARRIZO:
1625                 adev->mode_info.num_crtc = 3;
1626                 adev->mode_info.num_hpd = 6;
1627                 adev->mode_info.num_dig = 9;
1628                 adev->mode_info.plane_type = dm_plane_type_carizzo;
1629                 break;
1630         case CHIP_STONEY:
1631                 adev->mode_info.num_crtc = 2;
1632                 adev->mode_info.num_hpd = 6;
1633                 adev->mode_info.num_dig = 9;
1634                 adev->mode_info.plane_type = dm_plane_type_stoney;
1635                 break;
1636         case CHIP_POLARIS11:
1637         case CHIP_POLARIS12:
1638                 adev->mode_info.num_crtc = 5;
1639                 adev->mode_info.num_hpd = 5;
1640                 adev->mode_info.num_dig = 5;
1641                 adev->mode_info.plane_type = dm_plane_type_default;
1642                 break;
1643         case CHIP_POLARIS10:
1644                 adev->mode_info.num_crtc = 6;
1645                 adev->mode_info.num_hpd = 6;
1646                 adev->mode_info.num_dig = 6;
1647                 adev->mode_info.plane_type = dm_plane_type_default;
1648                 break;
1649         case CHIP_VEGA10:
1650                 adev->mode_info.num_crtc = 6;
1651                 adev->mode_info.num_hpd = 6;
1652                 adev->mode_info.num_dig = 6;
1653                 adev->mode_info.plane_type = dm_plane_type_default;
1654                 break;
1655 #if defined(CONFIG_DRM_AMD_DC_DCN1_0)
1656         case CHIP_RAVEN:
1657                 adev->mode_info.num_crtc = 4;
1658                 adev->mode_info.num_hpd = 4;
1659                 adev->mode_info.num_dig = 4;
1660                 adev->mode_info.plane_type = dm_plane_type_default;
1661                 break;
1662 #endif
1663         default:
1664                 DRM_ERROR("Usupported ASIC type: 0x%X\n", adev->asic_type);
1665                 return -EINVAL;
1666         }
1667
1668         amdgpu_dm_set_irq_funcs(adev);
1669
1670         if (adev->mode_info.funcs == NULL)
1671                 adev->mode_info.funcs = &dm_display_funcs;
1672
1673         /* Note: Do NOT change adev->audio_endpt_rreg and
1674          * adev->audio_endpt_wreg because they are initialised in
1675          * amdgpu_device_init() */
1676 #if defined(CONFIG_DEBUG_KERNEL_DC)
1677         device_create_file(
1678                 adev->ddev->dev,
1679                 &dev_attr_s3_debug);
1680 #endif
1681
1682         return 0;
1683 }
1684
1685 static bool modeset_required(struct drm_crtc_state *crtc_state,
1686                              struct dc_stream_state *new_stream,
1687                              struct dc_stream_state *old_stream)
1688 {
1689         if (!drm_atomic_crtc_needs_modeset(crtc_state))
1690                 return false;
1691
1692         if (!crtc_state->enable)
1693                 return false;
1694
1695         return crtc_state->active;
1696 }
1697
1698 static bool modereset_required(struct drm_crtc_state *crtc_state)
1699 {
1700         if (!drm_atomic_crtc_needs_modeset(crtc_state))
1701                 return false;
1702
1703         return !crtc_state->enable || !crtc_state->active;
1704 }
1705
1706 static void amdgpu_dm_encoder_destroy(struct drm_encoder *encoder)
1707 {
1708         drm_encoder_cleanup(encoder);
1709         kfree(encoder);
1710 }
1711
1712 static const struct drm_encoder_funcs amdgpu_dm_encoder_funcs = {
1713         .destroy = amdgpu_dm_encoder_destroy,
1714 };
1715
1716 static bool fill_rects_from_plane_state(const struct drm_plane_state *state,
1717                                         struct dc_plane_state *plane_state)
1718 {
1719         plane_state->src_rect.x = state->src_x >> 16;
1720         plane_state->src_rect.y = state->src_y >> 16;
1721         /*we ignore for now mantissa and do not to deal with floating pixels :(*/
1722         plane_state->src_rect.width = state->src_w >> 16;
1723
1724         if (plane_state->src_rect.width == 0)
1725                 return false;
1726
1727         plane_state->src_rect.height = state->src_h >> 16;
1728         if (plane_state->src_rect.height == 0)
1729                 return false;
1730
1731         plane_state->dst_rect.x = state->crtc_x;
1732         plane_state->dst_rect.y = state->crtc_y;
1733
1734         if (state->crtc_w == 0)
1735                 return false;
1736
1737         plane_state->dst_rect.width = state->crtc_w;
1738
1739         if (state->crtc_h == 0)
1740                 return false;
1741
1742         plane_state->dst_rect.height = state->crtc_h;
1743
1744         plane_state->clip_rect = plane_state->dst_rect;
1745
1746         switch (state->rotation & DRM_MODE_ROTATE_MASK) {
1747         case DRM_MODE_ROTATE_0:
1748                 plane_state->rotation = ROTATION_ANGLE_0;
1749                 break;
1750         case DRM_MODE_ROTATE_90:
1751                 plane_state->rotation = ROTATION_ANGLE_90;
1752                 break;
1753         case DRM_MODE_ROTATE_180:
1754                 plane_state->rotation = ROTATION_ANGLE_180;
1755                 break;
1756         case DRM_MODE_ROTATE_270:
1757                 plane_state->rotation = ROTATION_ANGLE_270;
1758                 break;
1759         default:
1760                 plane_state->rotation = ROTATION_ANGLE_0;
1761                 break;
1762         }
1763
1764         return true;
1765 }
1766 static int get_fb_info(const struct amdgpu_framebuffer *amdgpu_fb,
1767                        uint64_t *tiling_flags)
1768 {
1769         struct amdgpu_bo *rbo = gem_to_amdgpu_bo(amdgpu_fb->obj);
1770         int r = amdgpu_bo_reserve(rbo, false);
1771
1772         if (unlikely(r)) {
1773                 // Don't show error msg. when return -ERESTARTSYS
1774                 if (r != -ERESTARTSYS)
1775                         DRM_ERROR("Unable to reserve buffer: %d\n", r);
1776                 return r;
1777         }
1778
1779         if (tiling_flags)
1780                 amdgpu_bo_get_tiling_flags(rbo, tiling_flags);
1781
1782         amdgpu_bo_unreserve(rbo);
1783
1784         return r;
1785 }
1786
1787 static int fill_plane_attributes_from_fb(struct amdgpu_device *adev,
1788                                          struct dc_plane_state *plane_state,
1789                                          const struct amdgpu_framebuffer *amdgpu_fb)
1790 {
1791         uint64_t tiling_flags;
1792         unsigned int awidth;
1793         const struct drm_framebuffer *fb = &amdgpu_fb->base;
1794         int ret = 0;
1795         struct drm_format_name_buf format_name;
1796
1797         ret = get_fb_info(
1798                 amdgpu_fb,
1799                 &tiling_flags);
1800
1801         if (ret)
1802                 return ret;
1803
1804         switch (fb->format->format) {
1805         case DRM_FORMAT_C8:
1806                 plane_state->format = SURFACE_PIXEL_FORMAT_GRPH_PALETA_256_COLORS;
1807                 break;
1808         case DRM_FORMAT_RGB565:
1809                 plane_state->format = SURFACE_PIXEL_FORMAT_GRPH_RGB565;
1810                 break;
1811         case DRM_FORMAT_XRGB8888:
1812         case DRM_FORMAT_ARGB8888:
1813                 plane_state->format = SURFACE_PIXEL_FORMAT_GRPH_ARGB8888;
1814                 break;
1815         case DRM_FORMAT_XRGB2101010:
1816         case DRM_FORMAT_ARGB2101010:
1817                 plane_state->format = SURFACE_PIXEL_FORMAT_GRPH_ARGB2101010;
1818                 break;
1819         case DRM_FORMAT_XBGR2101010:
1820         case DRM_FORMAT_ABGR2101010:
1821                 plane_state->format = SURFACE_PIXEL_FORMAT_GRPH_ABGR2101010;
1822                 break;
1823         case DRM_FORMAT_NV21:
1824                 plane_state->format = SURFACE_PIXEL_FORMAT_VIDEO_420_YCbCr;
1825                 break;
1826         case DRM_FORMAT_NV12:
1827                 plane_state->format = SURFACE_PIXEL_FORMAT_VIDEO_420_YCrCb;
1828                 break;
1829         default:
1830                 DRM_ERROR("Unsupported screen format %s\n",
1831                           drm_get_format_name(fb->format->format, &format_name));
1832                 return -EINVAL;
1833         }
1834
1835         if (plane_state->format < SURFACE_PIXEL_FORMAT_VIDEO_BEGIN) {
1836                 plane_state->address.type = PLN_ADDR_TYPE_GRAPHICS;
1837                 plane_state->plane_size.grph.surface_size.x = 0;
1838                 plane_state->plane_size.grph.surface_size.y = 0;
1839                 plane_state->plane_size.grph.surface_size.width = fb->width;
1840                 plane_state->plane_size.grph.surface_size.height = fb->height;
1841                 plane_state->plane_size.grph.surface_pitch =
1842                                 fb->pitches[0] / fb->format->cpp[0];
1843                 /* TODO: unhardcode */
1844                 plane_state->color_space = COLOR_SPACE_SRGB;
1845
1846         } else {
1847                 awidth = ALIGN(fb->width, 64);
1848                 plane_state->address.type = PLN_ADDR_TYPE_VIDEO_PROGRESSIVE;
1849                 plane_state->plane_size.video.luma_size.x = 0;
1850                 plane_state->plane_size.video.luma_size.y = 0;
1851                 plane_state->plane_size.video.luma_size.width = awidth;
1852                 plane_state->plane_size.video.luma_size.height = fb->height;
1853                 /* TODO: unhardcode */
1854                 plane_state->plane_size.video.luma_pitch = awidth;
1855
1856                 plane_state->plane_size.video.chroma_size.x = 0;
1857                 plane_state->plane_size.video.chroma_size.y = 0;
1858                 plane_state->plane_size.video.chroma_size.width = awidth;
1859                 plane_state->plane_size.video.chroma_size.height = fb->height;
1860                 plane_state->plane_size.video.chroma_pitch = awidth / 2;
1861
1862                 /* TODO: unhardcode */
1863                 plane_state->color_space = COLOR_SPACE_YCBCR709;
1864         }
1865
1866         memset(&plane_state->tiling_info, 0, sizeof(plane_state->tiling_info));
1867
1868         /* Fill GFX8 params */
1869         if (AMDGPU_TILING_GET(tiling_flags, ARRAY_MODE) == DC_ARRAY_2D_TILED_THIN1) {
1870                 unsigned int bankw, bankh, mtaspect, tile_split, num_banks;
1871
1872                 bankw = AMDGPU_TILING_GET(tiling_flags, BANK_WIDTH);
1873                 bankh = AMDGPU_TILING_GET(tiling_flags, BANK_HEIGHT);
1874                 mtaspect = AMDGPU_TILING_GET(tiling_flags, MACRO_TILE_ASPECT);
1875                 tile_split = AMDGPU_TILING_GET(tiling_flags, TILE_SPLIT);
1876                 num_banks = AMDGPU_TILING_GET(tiling_flags, NUM_BANKS);
1877
1878                 /* XXX fix me for VI */
1879                 plane_state->tiling_info.gfx8.num_banks = num_banks;
1880                 plane_state->tiling_info.gfx8.array_mode =
1881                                 DC_ARRAY_2D_TILED_THIN1;
1882                 plane_state->tiling_info.gfx8.tile_split = tile_split;
1883                 plane_state->tiling_info.gfx8.bank_width = bankw;
1884                 plane_state->tiling_info.gfx8.bank_height = bankh;
1885                 plane_state->tiling_info.gfx8.tile_aspect = mtaspect;
1886                 plane_state->tiling_info.gfx8.tile_mode =
1887                                 DC_ADDR_SURF_MICRO_TILING_DISPLAY;
1888         } else if (AMDGPU_TILING_GET(tiling_flags, ARRAY_MODE)
1889                         == DC_ARRAY_1D_TILED_THIN1) {
1890                 plane_state->tiling_info.gfx8.array_mode = DC_ARRAY_1D_TILED_THIN1;
1891         }
1892
1893         plane_state->tiling_info.gfx8.pipe_config =
1894                         AMDGPU_TILING_GET(tiling_flags, PIPE_CONFIG);
1895
1896         if (adev->asic_type == CHIP_VEGA10 ||
1897             adev->asic_type == CHIP_RAVEN) {
1898                 /* Fill GFX9 params */
1899                 plane_state->tiling_info.gfx9.num_pipes =
1900                         adev->gfx.config.gb_addr_config_fields.num_pipes;
1901                 plane_state->tiling_info.gfx9.num_banks =
1902                         adev->gfx.config.gb_addr_config_fields.num_banks;
1903                 plane_state->tiling_info.gfx9.pipe_interleave =
1904                         adev->gfx.config.gb_addr_config_fields.pipe_interleave_size;
1905                 plane_state->tiling_info.gfx9.num_shader_engines =
1906                         adev->gfx.config.gb_addr_config_fields.num_se;
1907                 plane_state->tiling_info.gfx9.max_compressed_frags =
1908                         adev->gfx.config.gb_addr_config_fields.max_compress_frags;
1909                 plane_state->tiling_info.gfx9.num_rb_per_se =
1910                         adev->gfx.config.gb_addr_config_fields.num_rb_per_se;
1911                 plane_state->tiling_info.gfx9.swizzle =
1912                         AMDGPU_TILING_GET(tiling_flags, SWIZZLE_MODE);
1913                 plane_state->tiling_info.gfx9.shaderEnable = 1;
1914         }
1915
1916         plane_state->visible = true;
1917         plane_state->scaling_quality.h_taps_c = 0;
1918         plane_state->scaling_quality.v_taps_c = 0;
1919
1920         /* is this needed? is plane_state zeroed at allocation? */
1921         plane_state->scaling_quality.h_taps = 0;
1922         plane_state->scaling_quality.v_taps = 0;
1923         plane_state->stereo_format = PLANE_STEREO_FORMAT_NONE;
1924
1925         return ret;
1926
1927 }
1928
1929 static void fill_gamma_from_crtc_state(const struct drm_crtc_state *crtc_state,
1930                                        struct dc_plane_state *plane_state)
1931 {
1932         int i;
1933         struct dc_gamma *gamma;
1934         struct drm_color_lut *lut =
1935                         (struct drm_color_lut *) crtc_state->gamma_lut->data;
1936
1937         gamma = dc_create_gamma();
1938
1939         if (gamma == NULL) {
1940                 WARN_ON(1);
1941                 return;
1942         }
1943
1944         gamma->type = GAMMA_RGB_256;
1945         gamma->num_entries = GAMMA_RGB_256_ENTRIES;
1946         for (i = 0; i < GAMMA_RGB_256_ENTRIES; i++) {
1947                 gamma->entries.red[i] = dal_fixed31_32_from_int(lut[i].red);
1948                 gamma->entries.green[i] = dal_fixed31_32_from_int(lut[i].green);
1949                 gamma->entries.blue[i] = dal_fixed31_32_from_int(lut[i].blue);
1950         }
1951
1952         plane_state->gamma_correction = gamma;
1953 }
1954
1955 static int fill_plane_attributes(struct amdgpu_device *adev,
1956                                  struct dc_plane_state *dc_plane_state,
1957                                  struct drm_plane_state *plane_state,
1958                                  struct drm_crtc_state *crtc_state)
1959 {
1960         const struct amdgpu_framebuffer *amdgpu_fb =
1961                 to_amdgpu_framebuffer(plane_state->fb);
1962         const struct drm_crtc *crtc = plane_state->crtc;
1963         struct dc_transfer_func *input_tf;
1964         int ret = 0;
1965
1966         if (!fill_rects_from_plane_state(plane_state, dc_plane_state))
1967                 return -EINVAL;
1968
1969         ret = fill_plane_attributes_from_fb(
1970                 crtc->dev->dev_private,
1971                 dc_plane_state,
1972                 amdgpu_fb);
1973
1974         if (ret)
1975                 return ret;
1976
1977         input_tf = dc_create_transfer_func();
1978
1979         if (input_tf == NULL)
1980                 return -ENOMEM;
1981
1982         input_tf->type = TF_TYPE_PREDEFINED;
1983         input_tf->tf = TRANSFER_FUNCTION_SRGB;
1984
1985         dc_plane_state->in_transfer_func = input_tf;
1986
1987         /* In case of gamma set, update gamma value */
1988         if (crtc_state->gamma_lut)
1989                 fill_gamma_from_crtc_state(crtc_state, dc_plane_state);
1990
1991         return ret;
1992 }
1993
1994 /*****************************************************************************/
1995
1996 static void update_stream_scaling_settings(const struct drm_display_mode *mode,
1997                                            const struct dm_connector_state *dm_state,
1998                                            struct dc_stream_state *stream)
1999 {
2000         enum amdgpu_rmx_type rmx_type;
2001
2002         struct rect src = { 0 }; /* viewport in composition space*/
2003         struct rect dst = { 0 }; /* stream addressable area */
2004
2005         /* no mode. nothing to be done */
2006         if (!mode)
2007                 return;
2008
2009         /* Full screen scaling by default */
2010         src.width = mode->hdisplay;
2011         src.height = mode->vdisplay;
2012         dst.width = stream->timing.h_addressable;
2013         dst.height = stream->timing.v_addressable;
2014
2015         rmx_type = dm_state->scaling;
2016         if (rmx_type == RMX_ASPECT || rmx_type == RMX_OFF) {
2017                 if (src.width * dst.height <
2018                                 src.height * dst.width) {
2019                         /* height needs less upscaling/more downscaling */
2020                         dst.width = src.width *
2021                                         dst.height / src.height;
2022                 } else {
2023                         /* width needs less upscaling/more downscaling */
2024                         dst.height = src.height *
2025                                         dst.width / src.width;
2026                 }
2027         } else if (rmx_type == RMX_CENTER) {
2028                 dst = src;
2029         }
2030
2031         dst.x = (stream->timing.h_addressable - dst.width) / 2;
2032         dst.y = (stream->timing.v_addressable - dst.height) / 2;
2033
2034         if (dm_state->underscan_enable) {
2035                 dst.x += dm_state->underscan_hborder / 2;
2036                 dst.y += dm_state->underscan_vborder / 2;
2037                 dst.width -= dm_state->underscan_hborder;
2038                 dst.height -= dm_state->underscan_vborder;
2039         }
2040
2041         stream->src = src;
2042         stream->dst = dst;
2043
2044         DRM_DEBUG_DRIVER("Destination Rectangle x:%d  y:%d  width:%d  height:%d\n",
2045                         dst.x, dst.y, dst.width, dst.height);
2046
2047 }
2048
2049 static enum dc_color_depth
2050 convert_color_depth_from_display_info(const struct drm_connector *connector)
2051 {
2052         uint32_t bpc = connector->display_info.bpc;
2053
2054         /* Limited color depth to 8bit
2055          * TODO: Still need to handle deep color
2056          */
2057         if (bpc > 8)
2058                 bpc = 8;
2059
2060         switch (bpc) {
2061         case 0:
2062                 /* Temporary Work around, DRM don't parse color depth for
2063                  * EDID revision before 1.4
2064                  * TODO: Fix edid parsing
2065                  */
2066                 return COLOR_DEPTH_888;
2067         case 6:
2068                 return COLOR_DEPTH_666;
2069         case 8:
2070                 return COLOR_DEPTH_888;
2071         case 10:
2072                 return COLOR_DEPTH_101010;
2073         case 12:
2074                 return COLOR_DEPTH_121212;
2075         case 14:
2076                 return COLOR_DEPTH_141414;
2077         case 16:
2078                 return COLOR_DEPTH_161616;
2079         default:
2080                 return COLOR_DEPTH_UNDEFINED;
2081         }
2082 }
2083
2084 static enum dc_aspect_ratio
2085 get_aspect_ratio(const struct drm_display_mode *mode_in)
2086 {
2087         int32_t width = mode_in->crtc_hdisplay * 9;
2088         int32_t height = mode_in->crtc_vdisplay * 16;
2089
2090         if ((width - height) < 10 && (width - height) > -10)
2091                 return ASPECT_RATIO_16_9;
2092         else
2093                 return ASPECT_RATIO_4_3;
2094 }
2095
2096 static enum dc_color_space
2097 get_output_color_space(const struct dc_crtc_timing *dc_crtc_timing)
2098 {
2099         enum dc_color_space color_space = COLOR_SPACE_SRGB;
2100
2101         switch (dc_crtc_timing->pixel_encoding) {
2102         case PIXEL_ENCODING_YCBCR422:
2103         case PIXEL_ENCODING_YCBCR444:
2104         case PIXEL_ENCODING_YCBCR420:
2105         {
2106                 /*
2107                  * 27030khz is the separation point between HDTV and SDTV
2108                  * according to HDMI spec, we use YCbCr709 and YCbCr601
2109                  * respectively
2110                  */
2111                 if (dc_crtc_timing->pix_clk_khz > 27030) {
2112                         if (dc_crtc_timing->flags.Y_ONLY)
2113                                 color_space =
2114                                         COLOR_SPACE_YCBCR709_LIMITED;
2115                         else
2116                                 color_space = COLOR_SPACE_YCBCR709;
2117                 } else {
2118                         if (dc_crtc_timing->flags.Y_ONLY)
2119                                 color_space =
2120                                         COLOR_SPACE_YCBCR601_LIMITED;
2121                         else
2122                                 color_space = COLOR_SPACE_YCBCR601;
2123                 }
2124
2125         }
2126         break;
2127         case PIXEL_ENCODING_RGB:
2128                 color_space = COLOR_SPACE_SRGB;
2129                 break;
2130
2131         default:
2132                 WARN_ON(1);
2133                 break;
2134         }
2135
2136         return color_space;
2137 }
2138
2139 /*****************************************************************************/
2140
2141 static void
2142 fill_stream_properties_from_drm_display_mode(struct dc_stream_state *stream,
2143                                              const struct drm_display_mode *mode_in,
2144                                              const struct drm_connector *connector)
2145 {
2146         struct dc_crtc_timing *timing_out = &stream->timing;
2147         struct dc_transfer_func *tf = dc_create_transfer_func();
2148
2149         memset(timing_out, 0, sizeof(struct dc_crtc_timing));
2150
2151         timing_out->h_border_left = 0;
2152         timing_out->h_border_right = 0;
2153         timing_out->v_border_top = 0;
2154         timing_out->v_border_bottom = 0;
2155         /* TODO: un-hardcode */
2156
2157         if ((connector->display_info.color_formats & DRM_COLOR_FORMAT_YCRCB444)
2158                         && stream->sink->sink_signal == SIGNAL_TYPE_HDMI_TYPE_A)
2159                 timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR444;
2160         else
2161                 timing_out->pixel_encoding = PIXEL_ENCODING_RGB;
2162
2163         timing_out->timing_3d_format = TIMING_3D_FORMAT_NONE;
2164         timing_out->display_color_depth = convert_color_depth_from_display_info(
2165                         connector);
2166         timing_out->scan_type = SCANNING_TYPE_NODATA;
2167         timing_out->hdmi_vic = 0;
2168         timing_out->vic = drm_match_cea_mode(mode_in);
2169
2170         timing_out->h_addressable = mode_in->crtc_hdisplay;
2171         timing_out->h_total = mode_in->crtc_htotal;
2172         timing_out->h_sync_width =
2173                 mode_in->crtc_hsync_end - mode_in->crtc_hsync_start;
2174         timing_out->h_front_porch =
2175                 mode_in->crtc_hsync_start - mode_in->crtc_hdisplay;
2176         timing_out->v_total = mode_in->crtc_vtotal;
2177         timing_out->v_addressable = mode_in->crtc_vdisplay;
2178         timing_out->v_front_porch =
2179                 mode_in->crtc_vsync_start - mode_in->crtc_vdisplay;
2180         timing_out->v_sync_width =
2181                 mode_in->crtc_vsync_end - mode_in->crtc_vsync_start;
2182         timing_out->pix_clk_khz = mode_in->crtc_clock;
2183         timing_out->aspect_ratio = get_aspect_ratio(mode_in);
2184         if (mode_in->flags & DRM_MODE_FLAG_PHSYNC)
2185                 timing_out->flags.HSYNC_POSITIVE_POLARITY = 1;
2186         if (mode_in->flags & DRM_MODE_FLAG_PVSYNC)
2187                 timing_out->flags.VSYNC_POSITIVE_POLARITY = 1;
2188
2189         stream->output_color_space = get_output_color_space(timing_out);
2190
2191         tf->type = TF_TYPE_PREDEFINED;
2192         tf->tf = TRANSFER_FUNCTION_SRGB;
2193         stream->out_transfer_func = tf;
2194 }
2195
2196 static void fill_audio_info(struct audio_info *audio_info,
2197                             const struct drm_connector *drm_connector,
2198                             const struct dc_sink *dc_sink)
2199 {
2200         int i = 0;
2201         int cea_revision = 0;
2202         const struct dc_edid_caps *edid_caps = &dc_sink->edid_caps;
2203
2204         audio_info->manufacture_id = edid_caps->manufacturer_id;
2205         audio_info->product_id = edid_caps->product_id;
2206
2207         cea_revision = drm_connector->display_info.cea_rev;
2208
2209         strncpy(audio_info->display_name,
2210                 edid_caps->display_name,
2211                 AUDIO_INFO_DISPLAY_NAME_SIZE_IN_CHARS - 1);
2212
2213         if (cea_revision >= 3) {
2214                 audio_info->mode_count = edid_caps->audio_mode_count;
2215
2216                 for (i = 0; i < audio_info->mode_count; ++i) {
2217                         audio_info->modes[i].format_code =
2218                                         (enum audio_format_code)
2219                                         (edid_caps->audio_modes[i].format_code);
2220                         audio_info->modes[i].channel_count =
2221                                         edid_caps->audio_modes[i].channel_count;
2222                         audio_info->modes[i].sample_rates.all =
2223                                         edid_caps->audio_modes[i].sample_rate;
2224                         audio_info->modes[i].sample_size =
2225                                         edid_caps->audio_modes[i].sample_size;
2226                 }
2227         }
2228
2229         audio_info->flags.all = edid_caps->speaker_flags;
2230
2231         /* TODO: We only check for the progressive mode, check for interlace mode too */
2232         if (drm_connector->latency_present[0]) {
2233                 audio_info->video_latency = drm_connector->video_latency[0];
2234                 audio_info->audio_latency = drm_connector->audio_latency[0];
2235         }
2236
2237         /* TODO: For DP, video and audio latency should be calculated from DPCD caps */
2238
2239 }
2240
2241 static void
2242 copy_crtc_timing_for_drm_display_mode(const struct drm_display_mode *src_mode,
2243                                       struct drm_display_mode *dst_mode)
2244 {
2245         dst_mode->crtc_hdisplay = src_mode->crtc_hdisplay;
2246         dst_mode->crtc_vdisplay = src_mode->crtc_vdisplay;
2247         dst_mode->crtc_clock = src_mode->crtc_clock;
2248         dst_mode->crtc_hblank_start = src_mode->crtc_hblank_start;
2249         dst_mode->crtc_hblank_end = src_mode->crtc_hblank_end;
2250         dst_mode->crtc_hsync_start =  src_mode->crtc_hsync_start;
2251         dst_mode->crtc_hsync_end = src_mode->crtc_hsync_end;
2252         dst_mode->crtc_htotal = src_mode->crtc_htotal;
2253         dst_mode->crtc_hskew = src_mode->crtc_hskew;
2254         dst_mode->crtc_vblank_start = src_mode->crtc_vblank_start;
2255         dst_mode->crtc_vblank_end = src_mode->crtc_vblank_end;
2256         dst_mode->crtc_vsync_start = src_mode->crtc_vsync_start;
2257         dst_mode->crtc_vsync_end = src_mode->crtc_vsync_end;
2258         dst_mode->crtc_vtotal = src_mode->crtc_vtotal;
2259 }
2260
2261 static void
2262 decide_crtc_timing_for_drm_display_mode(struct drm_display_mode *drm_mode,
2263                                         const struct drm_display_mode *native_mode,
2264                                         bool scale_enabled)
2265 {
2266         if (scale_enabled) {
2267                 copy_crtc_timing_for_drm_display_mode(native_mode, drm_mode);
2268         } else if (native_mode->clock == drm_mode->clock &&
2269                         native_mode->htotal == drm_mode->htotal &&
2270                         native_mode->vtotal == drm_mode->vtotal) {
2271                 copy_crtc_timing_for_drm_display_mode(native_mode, drm_mode);
2272         } else {
2273                 /* no scaling nor amdgpu inserted, no need to patch */
2274         }
2275 }
2276
2277 static int create_fake_sink(struct amdgpu_dm_connector *aconnector)
2278 {
2279         struct dc_sink *sink = NULL;
2280         struct dc_sink_init_data sink_init_data = { 0 };
2281
2282         sink_init_data.link = aconnector->dc_link;
2283         sink_init_data.sink_signal = aconnector->dc_link->connector_signal;
2284
2285         sink = dc_sink_create(&sink_init_data);
2286         if (!sink) {
2287                 DRM_ERROR("Failed to create sink!\n");
2288                 return -ENOMEM;
2289         }
2290
2291         sink->sink_signal = SIGNAL_TYPE_VIRTUAL;
2292         aconnector->fake_enable = true;
2293
2294         aconnector->dc_sink = sink;
2295         aconnector->dc_link->local_sink = sink;
2296
2297         return 0;
2298 }
2299
2300 static void set_multisync_trigger_params(
2301                 struct dc_stream_state *stream)
2302 {
2303         if (stream->triggered_crtc_reset.enabled) {
2304                 stream->triggered_crtc_reset.event = CRTC_EVENT_VSYNC_RISING;
2305                 stream->triggered_crtc_reset.delay = TRIGGER_DELAY_NEXT_LINE;
2306         }
2307 }
2308
2309 static void set_master_stream(struct dc_stream_state *stream_set[],
2310                               int stream_count)
2311 {
2312         int j, highest_rfr = 0, master_stream = 0;
2313
2314         for (j = 0;  j < stream_count; j++) {
2315                 if (stream_set[j] && stream_set[j]->triggered_crtc_reset.enabled) {
2316                         int refresh_rate = 0;
2317
2318                         refresh_rate = (stream_set[j]->timing.pix_clk_khz*1000)/
2319                                 (stream_set[j]->timing.h_total*stream_set[j]->timing.v_total);
2320                         if (refresh_rate > highest_rfr) {
2321                                 highest_rfr = refresh_rate;
2322                                 master_stream = j;
2323                         }
2324                 }
2325         }
2326         for (j = 0;  j < stream_count; j++) {
2327                 if (stream_set[j] && j != master_stream)
2328                         stream_set[j]->triggered_crtc_reset.event_source = stream_set[master_stream];
2329         }
2330 }
2331
2332 static void dm_enable_per_frame_crtc_master_sync(struct dc_state *context)
2333 {
2334         int i = 0;
2335
2336         if (context->stream_count < 2)
2337                 return;
2338         for (i = 0; i < context->stream_count ; i++) {
2339                 if (!context->streams[i])
2340                         continue;
2341                 /* TODO: add a function to read AMD VSDB bits and will set
2342                  * crtc_sync_master.multi_sync_enabled flag
2343                  * For now its set to false
2344                  */
2345                 set_multisync_trigger_params(context->streams[i]);
2346         }
2347         set_master_stream(context->streams, context->stream_count);
2348 }
2349
2350 static struct dc_stream_state *
2351 create_stream_for_sink(struct amdgpu_dm_connector *aconnector,
2352                        const struct drm_display_mode *drm_mode,
2353                        const struct dm_connector_state *dm_state)
2354 {
2355         struct drm_display_mode *preferred_mode = NULL;
2356         struct drm_connector *drm_connector;
2357         struct dc_stream_state *stream = NULL;
2358         struct drm_display_mode mode = *drm_mode;
2359         bool native_mode_found = false;
2360
2361         if (aconnector == NULL) {
2362                 DRM_ERROR("aconnector is NULL!\n");
2363                 goto drm_connector_null;
2364         }
2365
2366         if (dm_state == NULL) {
2367                 DRM_ERROR("dm_state is NULL!\n");
2368                 goto dm_state_null;
2369         }
2370
2371         drm_connector = &aconnector->base;
2372
2373         if (!aconnector->dc_sink) {
2374                 /*
2375                  * Create dc_sink when necessary to MST
2376                  * Don't apply fake_sink to MST
2377                  */
2378                 if (aconnector->mst_port) {
2379                         dm_dp_mst_dc_sink_create(drm_connector);
2380                         goto mst_dc_sink_create_done;
2381                 }
2382
2383                 if (create_fake_sink(aconnector))
2384                         goto stream_create_fail;
2385         }
2386
2387         stream = dc_create_stream_for_sink(aconnector->dc_sink);
2388
2389         if (stream == NULL) {
2390                 DRM_ERROR("Failed to create stream for sink!\n");
2391                 goto stream_create_fail;
2392         }
2393
2394         list_for_each_entry(preferred_mode, &aconnector->base.modes, head) {
2395                 /* Search for preferred mode */
2396                 if (preferred_mode->type & DRM_MODE_TYPE_PREFERRED) {
2397                         native_mode_found = true;
2398                         break;
2399                 }
2400         }
2401         if (!native_mode_found)
2402                 preferred_mode = list_first_entry_or_null(
2403                                 &aconnector->base.modes,
2404                                 struct drm_display_mode,
2405                                 head);
2406
2407         if (preferred_mode == NULL) {
2408                 /* This may not be an error, the use case is when we we have no
2409                  * usermode calls to reset and set mode upon hotplug. In this
2410                  * case, we call set mode ourselves to restore the previous mode
2411                  * and the modelist may not be filled in in time.
2412                  */
2413                 DRM_DEBUG_DRIVER("No preferred mode found\n");
2414         } else {
2415                 decide_crtc_timing_for_drm_display_mode(
2416                                 &mode, preferred_mode,
2417                                 dm_state->scaling != RMX_OFF);
2418         }
2419
2420         fill_stream_properties_from_drm_display_mode(stream,
2421                         &mode, &aconnector->base);
2422         update_stream_scaling_settings(&mode, dm_state, stream);
2423
2424         fill_audio_info(
2425                 &stream->audio_info,
2426                 drm_connector,
2427                 aconnector->dc_sink);
2428
2429 stream_create_fail:
2430 dm_state_null:
2431 drm_connector_null:
2432 mst_dc_sink_create_done:
2433         return stream;
2434 }
2435
2436 static void amdgpu_dm_crtc_destroy(struct drm_crtc *crtc)
2437 {
2438         drm_crtc_cleanup(crtc);
2439         kfree(crtc);
2440 }
2441
2442 static void dm_crtc_destroy_state(struct drm_crtc *crtc,
2443                                   struct drm_crtc_state *state)
2444 {
2445         struct dm_crtc_state *cur = to_dm_crtc_state(state);
2446
2447         /* TODO Destroy dc_stream objects are stream object is flattened */
2448         if (cur->stream)
2449                 dc_stream_release(cur->stream);
2450
2451
2452         __drm_atomic_helper_crtc_destroy_state(state);
2453
2454
2455         kfree(state);
2456 }
2457
2458 static void dm_crtc_reset_state(struct drm_crtc *crtc)
2459 {
2460         struct dm_crtc_state *state;
2461
2462         if (crtc->state)
2463                 dm_crtc_destroy_state(crtc, crtc->state);
2464
2465         state = kzalloc(sizeof(*state), GFP_KERNEL);
2466         if (WARN_ON(!state))
2467                 return;
2468
2469         crtc->state = &state->base;
2470         crtc->state->crtc = crtc;
2471
2472 }
2473
2474 static struct drm_crtc_state *
2475 dm_crtc_duplicate_state(struct drm_crtc *crtc)
2476 {
2477         struct dm_crtc_state *state, *cur;
2478
2479         cur = to_dm_crtc_state(crtc->state);
2480
2481         if (WARN_ON(!crtc->state))
2482                 return NULL;
2483
2484         state = kzalloc(sizeof(*state), GFP_KERNEL);
2485         if (!state)
2486                 return NULL;
2487
2488         __drm_atomic_helper_crtc_duplicate_state(crtc, &state->base);
2489
2490         if (cur->stream) {
2491                 state->stream = cur->stream;
2492                 dc_stream_retain(state->stream);
2493         }
2494
2495         /* TODO Duplicate dc_stream after objects are stream object is flattened */
2496
2497         return &state->base;
2498 }
2499
2500 /* Implemented only the options currently availible for the driver */
2501 static const struct drm_crtc_funcs amdgpu_dm_crtc_funcs = {
2502         .reset = dm_crtc_reset_state,
2503         .destroy = amdgpu_dm_crtc_destroy,
2504         .gamma_set = drm_atomic_helper_legacy_gamma_set,
2505         .set_config = drm_atomic_helper_set_config,
2506         .page_flip = drm_atomic_helper_page_flip,
2507         .atomic_duplicate_state = dm_crtc_duplicate_state,
2508         .atomic_destroy_state = dm_crtc_destroy_state,
2509 };
2510
2511 static enum drm_connector_status
2512 amdgpu_dm_connector_detect(struct drm_connector *connector, bool force)
2513 {
2514         bool connected;
2515         struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
2516
2517         /* Notes:
2518          * 1. This interface is NOT called in context of HPD irq.
2519          * 2. This interface *is called* in context of user-mode ioctl. Which
2520          * makes it a bad place for *any* MST-related activit. */
2521
2522         if (aconnector->base.force == DRM_FORCE_UNSPECIFIED &&
2523             !aconnector->fake_enable)
2524                 connected = (aconnector->dc_sink != NULL);
2525         else
2526                 connected = (aconnector->base.force == DRM_FORCE_ON);
2527
2528         return (connected ? connector_status_connected :
2529                         connector_status_disconnected);
2530 }
2531
2532 int amdgpu_dm_connector_atomic_set_property(struct drm_connector *connector,
2533                                             struct drm_connector_state *connector_state,
2534                                             struct drm_property *property,
2535                                             uint64_t val)
2536 {
2537         struct drm_device *dev = connector->dev;
2538         struct amdgpu_device *adev = dev->dev_private;
2539         struct dm_connector_state *dm_old_state =
2540                 to_dm_connector_state(connector->state);
2541         struct dm_connector_state *dm_new_state =
2542                 to_dm_connector_state(connector_state);
2543
2544         int ret = -EINVAL;
2545
2546         if (property == dev->mode_config.scaling_mode_property) {
2547                 enum amdgpu_rmx_type rmx_type;
2548
2549                 switch (val) {
2550                 case DRM_MODE_SCALE_CENTER:
2551                         rmx_type = RMX_CENTER;
2552                         break;
2553                 case DRM_MODE_SCALE_ASPECT:
2554                         rmx_type = RMX_ASPECT;
2555                         break;
2556                 case DRM_MODE_SCALE_FULLSCREEN:
2557                         rmx_type = RMX_FULL;
2558                         break;
2559                 case DRM_MODE_SCALE_NONE:
2560                 default:
2561                         rmx_type = RMX_OFF;
2562                         break;
2563                 }
2564
2565                 if (dm_old_state->scaling == rmx_type)
2566                         return 0;
2567
2568                 dm_new_state->scaling = rmx_type;
2569                 ret = 0;
2570         } else if (property == adev->mode_info.underscan_hborder_property) {
2571                 dm_new_state->underscan_hborder = val;
2572                 ret = 0;
2573         } else if (property == adev->mode_info.underscan_vborder_property) {
2574                 dm_new_state->underscan_vborder = val;
2575                 ret = 0;
2576         } else if (property == adev->mode_info.underscan_property) {
2577                 dm_new_state->underscan_enable = val;
2578                 ret = 0;
2579         }
2580
2581         return ret;
2582 }
2583
2584 int amdgpu_dm_connector_atomic_get_property(struct drm_connector *connector,
2585                                             const struct drm_connector_state *state,
2586                                             struct drm_property *property,
2587                                             uint64_t *val)
2588 {
2589         struct drm_device *dev = connector->dev;
2590         struct amdgpu_device *adev = dev->dev_private;
2591         struct dm_connector_state *dm_state =
2592                 to_dm_connector_state(state);
2593         int ret = -EINVAL;
2594
2595         if (property == dev->mode_config.scaling_mode_property) {
2596                 switch (dm_state->scaling) {
2597                 case RMX_CENTER:
2598                         *val = DRM_MODE_SCALE_CENTER;
2599                         break;
2600                 case RMX_ASPECT:
2601                         *val = DRM_MODE_SCALE_ASPECT;
2602                         break;
2603                 case RMX_FULL:
2604                         *val = DRM_MODE_SCALE_FULLSCREEN;
2605                         break;
2606                 case RMX_OFF:
2607                 default:
2608                         *val = DRM_MODE_SCALE_NONE;
2609                         break;
2610                 }
2611                 ret = 0;
2612         } else if (property == adev->mode_info.underscan_hborder_property) {
2613                 *val = dm_state->underscan_hborder;
2614                 ret = 0;
2615         } else if (property == adev->mode_info.underscan_vborder_property) {
2616                 *val = dm_state->underscan_vborder;
2617                 ret = 0;
2618         } else if (property == adev->mode_info.underscan_property) {
2619                 *val = dm_state->underscan_enable;
2620                 ret = 0;
2621         }
2622         return ret;
2623 }
2624
2625 static void amdgpu_dm_connector_destroy(struct drm_connector *connector)
2626 {
2627         struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
2628         const struct dc_link *link = aconnector->dc_link;
2629         struct amdgpu_device *adev = connector->dev->dev_private;
2630         struct amdgpu_display_manager *dm = &adev->dm;
2631 #if defined(CONFIG_BACKLIGHT_CLASS_DEVICE) ||\
2632         defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE)
2633
2634         if (link->connector_signal & (SIGNAL_TYPE_EDP | SIGNAL_TYPE_LVDS)) {
2635                 amdgpu_dm_register_backlight_device(dm);
2636
2637                 if (dm->backlight_dev) {
2638                         backlight_device_unregister(dm->backlight_dev);
2639                         dm->backlight_dev = NULL;
2640                 }
2641
2642         }
2643 #endif
2644         drm_connector_unregister(connector);
2645         drm_connector_cleanup(connector);
2646         kfree(connector);
2647 }
2648
2649 void amdgpu_dm_connector_funcs_reset(struct drm_connector *connector)
2650 {
2651         struct dm_connector_state *state =
2652                 to_dm_connector_state(connector->state);
2653
2654         kfree(state);
2655
2656         state = kzalloc(sizeof(*state), GFP_KERNEL);
2657
2658         if (state) {
2659                 state->scaling = RMX_OFF;
2660                 state->underscan_enable = false;
2661                 state->underscan_hborder = 0;
2662                 state->underscan_vborder = 0;
2663
2664                 connector->state = &state->base;
2665                 connector->state->connector = connector;
2666         }
2667 }
2668
2669 struct drm_connector_state *
2670 amdgpu_dm_connector_atomic_duplicate_state(struct drm_connector *connector)
2671 {
2672         struct dm_connector_state *state =
2673                 to_dm_connector_state(connector->state);
2674
2675         struct dm_connector_state *new_state =
2676                         kmemdup(state, sizeof(*state), GFP_KERNEL);
2677
2678         if (new_state) {
2679                 __drm_atomic_helper_connector_duplicate_state(connector,
2680                                                               &new_state->base);
2681                 return &new_state->base;
2682         }
2683
2684         return NULL;
2685 }
2686
2687 static const struct drm_connector_funcs amdgpu_dm_connector_funcs = {
2688         .reset = amdgpu_dm_connector_funcs_reset,
2689         .detect = amdgpu_dm_connector_detect,
2690         .fill_modes = drm_helper_probe_single_connector_modes,
2691         .destroy = amdgpu_dm_connector_destroy,
2692         .atomic_duplicate_state = amdgpu_dm_connector_atomic_duplicate_state,
2693         .atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
2694         .atomic_set_property = amdgpu_dm_connector_atomic_set_property,
2695         .atomic_get_property = amdgpu_dm_connector_atomic_get_property
2696 };
2697
2698 static struct drm_encoder *best_encoder(struct drm_connector *connector)
2699 {
2700         int enc_id = connector->encoder_ids[0];
2701         struct drm_mode_object *obj;
2702         struct drm_encoder *encoder;
2703
2704         DRM_DEBUG_DRIVER("Finding the best encoder\n");
2705
2706         /* pick the encoder ids */
2707         if (enc_id) {
2708                 obj = drm_mode_object_find(connector->dev, NULL, enc_id, DRM_MODE_OBJECT_ENCODER);
2709                 if (!obj) {
2710                         DRM_ERROR("Couldn't find a matching encoder for our connector\n");
2711                         return NULL;
2712                 }
2713                 encoder = obj_to_encoder(obj);
2714                 return encoder;
2715         }
2716         DRM_ERROR("No encoder id\n");
2717         return NULL;
2718 }
2719
2720 static int get_modes(struct drm_connector *connector)
2721 {
2722         return amdgpu_dm_connector_get_modes(connector);
2723 }
2724
2725 static void create_eml_sink(struct amdgpu_dm_connector *aconnector)
2726 {
2727         struct dc_sink_init_data init_params = {
2728                         .link = aconnector->dc_link,
2729                         .sink_signal = SIGNAL_TYPE_VIRTUAL
2730         };
2731         struct edid *edid;
2732
2733         if (!aconnector->base.edid_blob_ptr) {
2734                 DRM_ERROR("No EDID firmware found on connector: %s ,forcing to OFF!\n",
2735                                 aconnector->base.name);
2736
2737                 aconnector->base.force = DRM_FORCE_OFF;
2738                 aconnector->base.override_edid = false;
2739                 return;
2740         }
2741
2742         edid = (struct edid *) aconnector->base.edid_blob_ptr->data;
2743
2744         aconnector->edid = edid;
2745
2746         aconnector->dc_em_sink = dc_link_add_remote_sink(
2747                 aconnector->dc_link,
2748                 (uint8_t *)edid,
2749                 (edid->extensions + 1) * EDID_LENGTH,
2750                 &init_params);
2751
2752         if (aconnector->base.force == DRM_FORCE_ON)
2753                 aconnector->dc_sink = aconnector->dc_link->local_sink ?
2754                 aconnector->dc_link->local_sink :
2755                 aconnector->dc_em_sink;
2756 }
2757
2758 static void handle_edid_mgmt(struct amdgpu_dm_connector *aconnector)
2759 {
2760         struct dc_link *link = (struct dc_link *)aconnector->dc_link;
2761
2762         /* In case of headless boot with force on for DP managed connector
2763          * Those settings have to be != 0 to get initial modeset
2764          */
2765         if (link->connector_signal == SIGNAL_TYPE_DISPLAY_PORT) {
2766                 link->verified_link_cap.lane_count = LANE_COUNT_FOUR;
2767                 link->verified_link_cap.link_rate = LINK_RATE_HIGH2;
2768         }
2769
2770
2771         aconnector->base.override_edid = true;
2772         create_eml_sink(aconnector);
2773 }
2774
2775 int amdgpu_dm_connector_mode_valid(struct drm_connector *connector,
2776                                    struct drm_display_mode *mode)
2777 {
2778         int result = MODE_ERROR;
2779         struct dc_sink *dc_sink;
2780         struct amdgpu_device *adev = connector->dev->dev_private;
2781         /* TODO: Unhardcode stream count */
2782         struct dc_stream_state *stream;
2783         struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
2784
2785         if ((mode->flags & DRM_MODE_FLAG_INTERLACE) ||
2786                         (mode->flags & DRM_MODE_FLAG_DBLSCAN))
2787                 return result;
2788
2789         /* Only run this the first time mode_valid is called to initilialize
2790          * EDID mgmt
2791          */
2792         if (aconnector->base.force != DRM_FORCE_UNSPECIFIED &&
2793                 !aconnector->dc_em_sink)
2794                 handle_edid_mgmt(aconnector);
2795
2796         dc_sink = to_amdgpu_dm_connector(connector)->dc_sink;
2797
2798         if (dc_sink == NULL) {
2799                 DRM_ERROR("dc_sink is NULL!\n");
2800                 goto fail;
2801         }
2802
2803         stream = dc_create_stream_for_sink(dc_sink);
2804         if (stream == NULL) {
2805                 DRM_ERROR("Failed to create stream for sink!\n");
2806                 goto fail;
2807         }
2808
2809         drm_mode_set_crtcinfo(mode, 0);
2810         fill_stream_properties_from_drm_display_mode(stream, mode, connector);
2811
2812         stream->src.width = mode->hdisplay;
2813         stream->src.height = mode->vdisplay;
2814         stream->dst = stream->src;
2815
2816         if (dc_validate_stream(adev->dm.dc, stream) == DC_OK)
2817                 result = MODE_OK;
2818
2819         dc_stream_release(stream);
2820
2821 fail:
2822         /* TODO: error handling*/
2823         return result;
2824 }
2825
2826 static const struct drm_connector_helper_funcs
2827 amdgpu_dm_connector_helper_funcs = {
2828         /*
2829          * If hotplug a second bigger display in FB Con mode, bigger resolution
2830          * modes will be filtered by drm_mode_validate_size(), and those modes
2831          * is missing after user start lightdm. So we need to renew modes list.
2832          * in get_modes call back, not just return the modes count
2833          */
2834         .get_modes = get_modes,
2835         .mode_valid = amdgpu_dm_connector_mode_valid,
2836         .best_encoder = best_encoder
2837 };
2838
2839 static void dm_crtc_helper_disable(struct drm_crtc *crtc)
2840 {
2841 }
2842
2843 static int dm_crtc_helper_atomic_check(struct drm_crtc *crtc,
2844                                        struct drm_crtc_state *state)
2845 {
2846         struct amdgpu_device *adev = crtc->dev->dev_private;
2847         struct dc *dc = adev->dm.dc;
2848         struct dm_crtc_state *dm_crtc_state = to_dm_crtc_state(state);
2849         int ret = -EINVAL;
2850
2851         if (unlikely(!dm_crtc_state->stream &&
2852                      modeset_required(state, NULL, dm_crtc_state->stream))) {
2853                 WARN_ON(1);
2854                 return ret;
2855         }
2856
2857         /* In some use cases, like reset, no stream  is attached */
2858         if (!dm_crtc_state->stream)
2859                 return 0;
2860
2861         if (dc_validate_stream(dc, dm_crtc_state->stream) == DC_OK)
2862                 return 0;
2863
2864         return ret;
2865 }
2866
2867 static bool dm_crtc_helper_mode_fixup(struct drm_crtc *crtc,
2868                                       const struct drm_display_mode *mode,
2869                                       struct drm_display_mode *adjusted_mode)
2870 {
2871         return true;
2872 }
2873
2874 static const struct drm_crtc_helper_funcs amdgpu_dm_crtc_helper_funcs = {
2875         .disable = dm_crtc_helper_disable,
2876         .atomic_check = dm_crtc_helper_atomic_check,
2877         .mode_fixup = dm_crtc_helper_mode_fixup
2878 };
2879
2880 static void dm_encoder_helper_disable(struct drm_encoder *encoder)
2881 {
2882
2883 }
2884
2885 static int dm_encoder_helper_atomic_check(struct drm_encoder *encoder,
2886                                           struct drm_crtc_state *crtc_state,
2887                                           struct drm_connector_state *conn_state)
2888 {
2889         return 0;
2890 }
2891
2892 const struct drm_encoder_helper_funcs amdgpu_dm_encoder_helper_funcs = {
2893         .disable = dm_encoder_helper_disable,
2894         .atomic_check = dm_encoder_helper_atomic_check
2895 };
2896
2897 static void dm_drm_plane_reset(struct drm_plane *plane)
2898 {
2899         struct dm_plane_state *amdgpu_state = NULL;
2900
2901         if (plane->state)
2902                 plane->funcs->atomic_destroy_state(plane, plane->state);
2903
2904         amdgpu_state = kzalloc(sizeof(*amdgpu_state), GFP_KERNEL);
2905         WARN_ON(amdgpu_state == NULL);
2906         
2907         if (amdgpu_state) {
2908                 plane->state = &amdgpu_state->base;
2909                 plane->state->plane = plane;
2910                 plane->state->rotation = DRM_MODE_ROTATE_0;
2911         }
2912 }
2913
2914 static struct drm_plane_state *
2915 dm_drm_plane_duplicate_state(struct drm_plane *plane)
2916 {
2917         struct dm_plane_state *dm_plane_state, *old_dm_plane_state;
2918
2919         old_dm_plane_state = to_dm_plane_state(plane->state);
2920         dm_plane_state = kzalloc(sizeof(*dm_plane_state), GFP_KERNEL);
2921         if (!dm_plane_state)
2922                 return NULL;
2923
2924         __drm_atomic_helper_plane_duplicate_state(plane, &dm_plane_state->base);
2925
2926         if (old_dm_plane_state->dc_state) {
2927                 dm_plane_state->dc_state = old_dm_plane_state->dc_state;
2928                 dc_plane_state_retain(dm_plane_state->dc_state);
2929         }
2930
2931         return &dm_plane_state->base;
2932 }
2933
2934 void dm_drm_plane_destroy_state(struct drm_plane *plane,
2935                                 struct drm_plane_state *state)
2936 {
2937         struct dm_plane_state *dm_plane_state = to_dm_plane_state(state);
2938
2939         if (dm_plane_state->dc_state)
2940                 dc_plane_state_release(dm_plane_state->dc_state);
2941
2942         drm_atomic_helper_plane_destroy_state(plane, state);
2943 }
2944
2945 static const struct drm_plane_funcs dm_plane_funcs = {
2946         .update_plane   = drm_atomic_helper_update_plane,
2947         .disable_plane  = drm_atomic_helper_disable_plane,
2948         .destroy        = drm_plane_cleanup,
2949         .reset = dm_drm_plane_reset,
2950         .atomic_duplicate_state = dm_drm_plane_duplicate_state,
2951         .atomic_destroy_state = dm_drm_plane_destroy_state,
2952 };
2953
2954 static int dm_plane_helper_prepare_fb(struct drm_plane *plane,
2955                                       struct drm_plane_state *new_state)
2956 {
2957         struct amdgpu_framebuffer *afb;
2958         struct drm_gem_object *obj;
2959         struct amdgpu_bo *rbo;
2960         uint64_t chroma_addr = 0;
2961         int r;
2962         struct dm_plane_state *dm_plane_state_new, *dm_plane_state_old;
2963         unsigned int awidth;
2964
2965         dm_plane_state_old = to_dm_plane_state(plane->state);
2966         dm_plane_state_new = to_dm_plane_state(new_state);
2967
2968         if (!new_state->fb) {
2969                 DRM_DEBUG_DRIVER("No FB bound\n");
2970                 return 0;
2971         }
2972
2973         afb = to_amdgpu_framebuffer(new_state->fb);
2974
2975         obj = afb->obj;
2976         rbo = gem_to_amdgpu_bo(obj);
2977         r = amdgpu_bo_reserve(rbo, false);
2978         if (unlikely(r != 0))
2979                 return r;
2980
2981         r = amdgpu_bo_pin(rbo, AMDGPU_GEM_DOMAIN_VRAM, &afb->address);
2982
2983
2984         amdgpu_bo_unreserve(rbo);
2985
2986         if (unlikely(r != 0)) {
2987                 if (r != -ERESTARTSYS)
2988                         DRM_ERROR("Failed to pin framebuffer with error %d\n", r);
2989                 return r;
2990         }
2991
2992         amdgpu_bo_ref(rbo);
2993
2994         if (dm_plane_state_new->dc_state &&
2995                         dm_plane_state_old->dc_state != dm_plane_state_new->dc_state) {
2996                 struct dc_plane_state *plane_state = dm_plane_state_new->dc_state;
2997
2998                 if (plane_state->format < SURFACE_PIXEL_FORMAT_VIDEO_BEGIN) {
2999                         plane_state->address.grph.addr.low_part = lower_32_bits(afb->address);
3000                         plane_state->address.grph.addr.high_part = upper_32_bits(afb->address);
3001                 } else {
3002                         awidth = ALIGN(new_state->fb->width, 64);
3003                         plane_state->address.type = PLN_ADDR_TYPE_VIDEO_PROGRESSIVE;
3004                         plane_state->address.video_progressive.luma_addr.low_part
3005                                                         = lower_32_bits(afb->address);
3006                         plane_state->address.video_progressive.luma_addr.high_part
3007                                                         = upper_32_bits(afb->address);
3008                         chroma_addr = afb->address + (u64)awidth * new_state->fb->height;
3009                         plane_state->address.video_progressive.chroma_addr.low_part
3010                                                         = lower_32_bits(chroma_addr);
3011                         plane_state->address.video_progressive.chroma_addr.high_part
3012                                                         = upper_32_bits(chroma_addr);
3013                 }
3014         }
3015
3016         /* It's a hack for s3 since in 4.9 kernel filter out cursor buffer
3017          * prepare and cleanup in drm_atomic_helper_prepare_planes
3018          * and drm_atomic_helper_cleanup_planes because fb doens't in s3.
3019          * IN 4.10 kernel this code should be removed and amdgpu_device_suspend
3020          * code touching fram buffers should be avoided for DC.
3021          */
3022         if (plane->type == DRM_PLANE_TYPE_CURSOR) {
3023                 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(new_state->crtc);
3024
3025                 acrtc->cursor_bo = obj;
3026         }
3027         return 0;
3028 }
3029
3030 static void dm_plane_helper_cleanup_fb(struct drm_plane *plane,
3031                                        struct drm_plane_state *old_state)
3032 {
3033         struct amdgpu_bo *rbo;
3034         struct amdgpu_framebuffer *afb;
3035         int r;
3036
3037         if (!old_state->fb)
3038                 return;
3039
3040         afb = to_amdgpu_framebuffer(old_state->fb);
3041         rbo = gem_to_amdgpu_bo(afb->obj);
3042         r = amdgpu_bo_reserve(rbo, false);
3043         if (unlikely(r)) {
3044                 DRM_ERROR("failed to reserve rbo before unpin\n");
3045                 return;
3046         }
3047
3048         amdgpu_bo_unpin(rbo);
3049         amdgpu_bo_unreserve(rbo);
3050         amdgpu_bo_unref(&rbo);
3051 }
3052
3053 static int dm_plane_atomic_check(struct drm_plane *plane,
3054                                  struct drm_plane_state *state)
3055 {
3056         struct amdgpu_device *adev = plane->dev->dev_private;
3057         struct dc *dc = adev->dm.dc;
3058         struct dm_plane_state *dm_plane_state = to_dm_plane_state(state);
3059
3060         if (!dm_plane_state->dc_state)
3061                 return 0;
3062
3063         if (dc_validate_plane(dc, dm_plane_state->dc_state) == DC_OK)
3064                 return 0;
3065
3066         return -EINVAL;
3067 }
3068
3069 static const struct drm_plane_helper_funcs dm_plane_helper_funcs = {
3070         .prepare_fb = dm_plane_helper_prepare_fb,
3071         .cleanup_fb = dm_plane_helper_cleanup_fb,
3072         .atomic_check = dm_plane_atomic_check,
3073 };
3074
3075 /*
3076  * TODO: these are currently initialized to rgb formats only.
3077  * For future use cases we should either initialize them dynamically based on
3078  * plane capabilities, or initialize this array to all formats, so internal drm
3079  * check will succeed, and let DC to implement proper check
3080  */
3081 static const uint32_t rgb_formats[] = {
3082         DRM_FORMAT_RGB888,
3083         DRM_FORMAT_XRGB8888,
3084         DRM_FORMAT_ARGB8888,
3085         DRM_FORMAT_RGBA8888,
3086         DRM_FORMAT_XRGB2101010,
3087         DRM_FORMAT_XBGR2101010,
3088         DRM_FORMAT_ARGB2101010,
3089         DRM_FORMAT_ABGR2101010,
3090 };
3091
3092 static const uint32_t yuv_formats[] = {
3093         DRM_FORMAT_NV12,
3094         DRM_FORMAT_NV21,
3095 };
3096
3097 static const u32 cursor_formats[] = {
3098         DRM_FORMAT_ARGB8888
3099 };
3100
3101 static int amdgpu_dm_plane_init(struct amdgpu_display_manager *dm,
3102                                 struct amdgpu_plane *aplane,
3103                                 unsigned long possible_crtcs)
3104 {
3105         int res = -EPERM;
3106
3107         switch (aplane->base.type) {
3108         case DRM_PLANE_TYPE_PRIMARY:
3109                 aplane->base.format_default = true;
3110
3111                 res = drm_universal_plane_init(
3112                                 dm->adev->ddev,
3113                                 &aplane->base,
3114                                 possible_crtcs,
3115                                 &dm_plane_funcs,
3116                                 rgb_formats,
3117                                 ARRAY_SIZE(rgb_formats),
3118                                 NULL, aplane->base.type, NULL);
3119                 break;
3120         case DRM_PLANE_TYPE_OVERLAY:
3121                 res = drm_universal_plane_init(
3122                                 dm->adev->ddev,
3123                                 &aplane->base,
3124                                 possible_crtcs,
3125                                 &dm_plane_funcs,
3126                                 yuv_formats,
3127                                 ARRAY_SIZE(yuv_formats),
3128                                 NULL, aplane->base.type, NULL);
3129                 break;
3130         case DRM_PLANE_TYPE_CURSOR:
3131                 res = drm_universal_plane_init(
3132                                 dm->adev->ddev,
3133                                 &aplane->base,
3134                                 possible_crtcs,
3135                                 &dm_plane_funcs,
3136                                 cursor_formats,
3137                                 ARRAY_SIZE(cursor_formats),
3138                                 NULL, aplane->base.type, NULL);
3139                 break;
3140         }
3141
3142         drm_plane_helper_add(&aplane->base, &dm_plane_helper_funcs);
3143
3144         /* Create (reset) the plane state */
3145         if (aplane->base.funcs->reset)
3146                 aplane->base.funcs->reset(&aplane->base);
3147
3148
3149         return res;
3150 }
3151
3152 static int amdgpu_dm_crtc_init(struct amdgpu_display_manager *dm,
3153                                struct drm_plane *plane,
3154                                uint32_t crtc_index)
3155 {
3156         struct amdgpu_crtc *acrtc = NULL;
3157         struct amdgpu_plane *cursor_plane;
3158
3159         int res = -ENOMEM;
3160
3161         cursor_plane = kzalloc(sizeof(*cursor_plane), GFP_KERNEL);
3162         if (!cursor_plane)
3163                 goto fail;
3164
3165         cursor_plane->base.type = DRM_PLANE_TYPE_CURSOR;
3166         res = amdgpu_dm_plane_init(dm, cursor_plane, 0);
3167
3168         acrtc = kzalloc(sizeof(struct amdgpu_crtc), GFP_KERNEL);
3169         if (!acrtc)
3170                 goto fail;
3171
3172         res = drm_crtc_init_with_planes(
3173                         dm->ddev,
3174                         &acrtc->base,
3175                         plane,
3176                         &cursor_plane->base,
3177                         &amdgpu_dm_crtc_funcs, NULL);
3178
3179         if (res)
3180                 goto fail;
3181
3182         drm_crtc_helper_add(&acrtc->base, &amdgpu_dm_crtc_helper_funcs);
3183
3184         /* Create (reset) the plane state */
3185         if (acrtc->base.funcs->reset)
3186                 acrtc->base.funcs->reset(&acrtc->base);
3187
3188         acrtc->max_cursor_width = dm->adev->dm.dc->caps.max_cursor_size;
3189         acrtc->max_cursor_height = dm->adev->dm.dc->caps.max_cursor_size;
3190
3191         acrtc->crtc_id = crtc_index;
3192         acrtc->base.enabled = false;
3193
3194         dm->adev->mode_info.crtcs[crtc_index] = acrtc;
3195         drm_mode_crtc_set_gamma_size(&acrtc->base, 256);
3196
3197         return 0;
3198
3199 fail:
3200         kfree(acrtc);
3201         kfree(cursor_plane);
3202         return res;
3203 }
3204
3205
3206 static int to_drm_connector_type(enum signal_type st)
3207 {
3208         switch (st) {
3209         case SIGNAL_TYPE_HDMI_TYPE_A:
3210                 return DRM_MODE_CONNECTOR_HDMIA;
3211         case SIGNAL_TYPE_EDP:
3212                 return DRM_MODE_CONNECTOR_eDP;
3213         case SIGNAL_TYPE_RGB:
3214                 return DRM_MODE_CONNECTOR_VGA;
3215         case SIGNAL_TYPE_DISPLAY_PORT:
3216         case SIGNAL_TYPE_DISPLAY_PORT_MST:
3217                 return DRM_MODE_CONNECTOR_DisplayPort;
3218         case SIGNAL_TYPE_DVI_DUAL_LINK:
3219         case SIGNAL_TYPE_DVI_SINGLE_LINK:
3220                 return DRM_MODE_CONNECTOR_DVID;
3221         case SIGNAL_TYPE_VIRTUAL:
3222                 return DRM_MODE_CONNECTOR_VIRTUAL;
3223
3224         default:
3225                 return DRM_MODE_CONNECTOR_Unknown;
3226         }
3227 }
3228
3229 static void amdgpu_dm_get_native_mode(struct drm_connector *connector)
3230 {
3231         const struct drm_connector_helper_funcs *helper =
3232                 connector->helper_private;
3233         struct drm_encoder *encoder;
3234         struct amdgpu_encoder *amdgpu_encoder;
3235
3236         encoder = helper->best_encoder(connector);
3237
3238         if (encoder == NULL)
3239                 return;
3240
3241         amdgpu_encoder = to_amdgpu_encoder(encoder);
3242
3243         amdgpu_encoder->native_mode.clock = 0;
3244
3245         if (!list_empty(&connector->probed_modes)) {
3246                 struct drm_display_mode *preferred_mode = NULL;
3247
3248                 list_for_each_entry(preferred_mode,
3249                                     &connector->probed_modes,
3250                                     head) {
3251                         if (preferred_mode->type & DRM_MODE_TYPE_PREFERRED)
3252                                 amdgpu_encoder->native_mode = *preferred_mode;
3253
3254                         break;
3255                 }
3256
3257         }
3258 }
3259
3260 static struct drm_display_mode *
3261 amdgpu_dm_create_common_mode(struct drm_encoder *encoder,
3262                              char *name,
3263                              int hdisplay, int vdisplay)
3264 {
3265         struct drm_device *dev = encoder->dev;
3266         struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
3267         struct drm_display_mode *mode = NULL;
3268         struct drm_display_mode *native_mode = &amdgpu_encoder->native_mode;
3269
3270         mode = drm_mode_duplicate(dev, native_mode);
3271
3272         if (mode == NULL)
3273                 return NULL;
3274
3275         mode->hdisplay = hdisplay;
3276         mode->vdisplay = vdisplay;
3277         mode->type &= ~DRM_MODE_TYPE_PREFERRED;
3278         strncpy(mode->name, name, DRM_DISPLAY_MODE_LEN);
3279
3280         return mode;
3281
3282 }
3283
3284 static void amdgpu_dm_connector_add_common_modes(struct drm_encoder *encoder,
3285                                                  struct drm_connector *connector)
3286 {
3287         struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
3288         struct drm_display_mode *mode = NULL;
3289         struct drm_display_mode *native_mode = &amdgpu_encoder->native_mode;
3290         struct amdgpu_dm_connector *amdgpu_dm_connector =
3291                                 to_amdgpu_dm_connector(connector);
3292         int i;
3293         int n;
3294         struct mode_size {
3295                 char name[DRM_DISPLAY_MODE_LEN];
3296                 int w;
3297                 int h;
3298         } common_modes[] = {
3299                 {  "640x480",  640,  480},
3300                 {  "800x600",  800,  600},
3301                 { "1024x768", 1024,  768},
3302                 { "1280x720", 1280,  720},
3303                 { "1280x800", 1280,  800},
3304                 {"1280x1024", 1280, 1024},
3305                 { "1440x900", 1440,  900},
3306                 {"1680x1050", 1680, 1050},
3307                 {"1600x1200", 1600, 1200},
3308                 {"1920x1080", 1920, 1080},
3309                 {"1920x1200", 1920, 1200}
3310         };
3311
3312         n = ARRAY_SIZE(common_modes);
3313
3314         for (i = 0; i < n; i++) {
3315                 struct drm_display_mode *curmode = NULL;
3316                 bool mode_existed = false;
3317
3318                 if (common_modes[i].w > native_mode->hdisplay ||
3319                     common_modes[i].h > native_mode->vdisplay ||
3320                    (common_modes[i].w == native_mode->hdisplay &&
3321                     common_modes[i].h == native_mode->vdisplay))
3322                         continue;
3323
3324                 list_for_each_entry(curmode, &connector->probed_modes, head) {
3325                         if (common_modes[i].w == curmode->hdisplay &&
3326                             common_modes[i].h == curmode->vdisplay) {
3327                                 mode_existed = true;
3328                                 break;
3329                         }
3330                 }
3331
3332                 if (mode_existed)
3333                         continue;
3334
3335                 mode = amdgpu_dm_create_common_mode(encoder,
3336                                 common_modes[i].name, common_modes[i].w,
3337                                 common_modes[i].h);
3338                 drm_mode_probed_add(connector, mode);
3339                 amdgpu_dm_connector->num_modes++;
3340         }
3341 }
3342
3343 static void amdgpu_dm_connector_ddc_get_modes(struct drm_connector *connector,
3344                                               struct edid *edid)
3345 {
3346         struct amdgpu_dm_connector *amdgpu_dm_connector =
3347                         to_amdgpu_dm_connector(connector);
3348
3349         if (edid) {
3350                 /* empty probed_modes */
3351                 INIT_LIST_HEAD(&connector->probed_modes);
3352                 amdgpu_dm_connector->num_modes =
3353                                 drm_add_edid_modes(connector, edid);
3354
3355                 amdgpu_dm_get_native_mode(connector);
3356         } else {
3357                 amdgpu_dm_connector->num_modes = 0;
3358         }
3359 }
3360
3361 static int amdgpu_dm_connector_get_modes(struct drm_connector *connector)
3362 {
3363         const struct drm_connector_helper_funcs *helper =
3364                         connector->helper_private;
3365         struct amdgpu_dm_connector *amdgpu_dm_connector =
3366                         to_amdgpu_dm_connector(connector);
3367         struct drm_encoder *encoder;
3368         struct edid *edid = amdgpu_dm_connector->edid;
3369
3370         encoder = helper->best_encoder(connector);
3371
3372         amdgpu_dm_connector_ddc_get_modes(connector, edid);
3373         amdgpu_dm_connector_add_common_modes(encoder, connector);
3374         return amdgpu_dm_connector->num_modes;
3375 }
3376
3377 void amdgpu_dm_connector_init_helper(struct amdgpu_display_manager *dm,
3378                                      struct amdgpu_dm_connector *aconnector,
3379                                      int connector_type,
3380                                      struct dc_link *link,
3381                                      int link_index)
3382 {
3383         struct amdgpu_device *adev = dm->ddev->dev_private;
3384
3385         aconnector->connector_id = link_index;
3386         aconnector->dc_link = link;
3387         aconnector->base.interlace_allowed = false;
3388         aconnector->base.doublescan_allowed = false;
3389         aconnector->base.stereo_allowed = false;
3390         aconnector->base.dpms = DRM_MODE_DPMS_OFF;
3391         aconnector->hpd.hpd = AMDGPU_HPD_NONE; /* not used */
3392
3393         mutex_init(&aconnector->hpd_lock);
3394
3395         /* configure support HPD hot plug connector_>polled default value is 0
3396          * which means HPD hot plug not supported
3397          */
3398         switch (connector_type) {
3399         case DRM_MODE_CONNECTOR_HDMIA:
3400                 aconnector->base.polled = DRM_CONNECTOR_POLL_HPD;
3401                 break;
3402         case DRM_MODE_CONNECTOR_DisplayPort:
3403                 aconnector->base.polled = DRM_CONNECTOR_POLL_HPD;
3404                 break;
3405         case DRM_MODE_CONNECTOR_DVID:
3406                 aconnector->base.polled = DRM_CONNECTOR_POLL_HPD;
3407                 break;
3408         default:
3409                 break;
3410         }
3411
3412         drm_object_attach_property(&aconnector->base.base,
3413                                 dm->ddev->mode_config.scaling_mode_property,
3414                                 DRM_MODE_SCALE_NONE);
3415
3416         drm_object_attach_property(&aconnector->base.base,
3417                                 adev->mode_info.underscan_property,
3418                                 UNDERSCAN_OFF);
3419         drm_object_attach_property(&aconnector->base.base,
3420                                 adev->mode_info.underscan_hborder_property,
3421                                 0);
3422         drm_object_attach_property(&aconnector->base.base,
3423                                 adev->mode_info.underscan_vborder_property,
3424                                 0);
3425
3426 }
3427
3428 static int amdgpu_dm_i2c_xfer(struct i2c_adapter *i2c_adap,
3429                               struct i2c_msg *msgs, int num)
3430 {
3431         struct amdgpu_i2c_adapter *i2c = i2c_get_adapdata(i2c_adap);
3432         struct ddc_service *ddc_service = i2c->ddc_service;
3433         struct i2c_command cmd;
3434         int i;
3435         int result = -EIO;
3436
3437         cmd.payloads = kcalloc(num, sizeof(struct i2c_payload), GFP_KERNEL);
3438
3439         if (!cmd.payloads)
3440                 return result;
3441
3442         cmd.number_of_payloads = num;
3443         cmd.engine = I2C_COMMAND_ENGINE_DEFAULT;
3444         cmd.speed = 100;
3445
3446         for (i = 0; i < num; i++) {
3447                 cmd.payloads[i].write = !(msgs[i].flags & I2C_M_RD);
3448                 cmd.payloads[i].address = msgs[i].addr;
3449                 cmd.payloads[i].length = msgs[i].len;
3450                 cmd.payloads[i].data = msgs[i].buf;
3451         }
3452
3453         if (dal_i2caux_submit_i2c_command(
3454                         ddc_service->ctx->i2caux,
3455                         ddc_service->ddc_pin,
3456                         &cmd))
3457                 result = num;
3458
3459         kfree(cmd.payloads);
3460         return result;
3461 }
3462
3463 static u32 amdgpu_dm_i2c_func(struct i2c_adapter *adap)
3464 {
3465         return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL;
3466 }
3467
3468 static const struct i2c_algorithm amdgpu_dm_i2c_algo = {
3469         .master_xfer = amdgpu_dm_i2c_xfer,
3470         .functionality = amdgpu_dm_i2c_func,
3471 };
3472
3473 static struct amdgpu_i2c_adapter *
3474 create_i2c(struct ddc_service *ddc_service,
3475            int link_index,
3476            int *res)
3477 {
3478         struct amdgpu_device *adev = ddc_service->ctx->driver_context;
3479         struct amdgpu_i2c_adapter *i2c;
3480
3481         i2c = kzalloc(sizeof(struct amdgpu_i2c_adapter), GFP_KERNEL);
3482         if (!i2c)
3483                 return NULL;
3484         i2c->base.owner = THIS_MODULE;
3485         i2c->base.class = I2C_CLASS_DDC;
3486         i2c->base.dev.parent = &adev->pdev->dev;
3487         i2c->base.algo = &amdgpu_dm_i2c_algo;
3488         snprintf(i2c->base.name, sizeof(i2c->base.name), "AMDGPU DM i2c hw bus %d", link_index);
3489         i2c_set_adapdata(&i2c->base, i2c);
3490         i2c->ddc_service = ddc_service;
3491
3492         return i2c;
3493 }
3494
3495 /* Note: this function assumes that dc_link_detect() was called for the
3496  * dc_link which will be represented by this aconnector.
3497  */
3498 static int amdgpu_dm_connector_init(struct amdgpu_display_manager *dm,
3499                                     struct amdgpu_dm_connector *aconnector,
3500                                     uint32_t link_index,
3501                                     struct amdgpu_encoder *aencoder)
3502 {
3503         int res = 0;
3504         int connector_type;
3505         struct dc *dc = dm->dc;
3506         struct dc_link *link = dc_get_link_at_index(dc, link_index);
3507         struct amdgpu_i2c_adapter *i2c;
3508
3509         link->priv = aconnector;
3510
3511         DRM_DEBUG_DRIVER("%s()\n", __func__);
3512
3513         i2c = create_i2c(link->ddc, link->link_index, &res);
3514         if (!i2c) {
3515                 DRM_ERROR("Failed to create i2c adapter data\n");
3516                 return -ENOMEM;
3517         }
3518
3519         aconnector->i2c = i2c;
3520         res = i2c_add_adapter(&i2c->base);
3521
3522         if (res) {
3523                 DRM_ERROR("Failed to register hw i2c %d\n", link->link_index);
3524                 goto out_free;
3525         }
3526
3527         connector_type = to_drm_connector_type(link->connector_signal);
3528
3529         res = drm_connector_init(
3530                         dm->ddev,
3531                         &aconnector->base,
3532                         &amdgpu_dm_connector_funcs,
3533                         connector_type);
3534
3535         if (res) {
3536                 DRM_ERROR("connector_init failed\n");
3537                 aconnector->connector_id = -1;
3538                 goto out_free;
3539         }
3540
3541         drm_connector_helper_add(
3542                         &aconnector->base,
3543                         &amdgpu_dm_connector_helper_funcs);
3544
3545         if (aconnector->base.funcs->reset)
3546                 aconnector->base.funcs->reset(&aconnector->base);
3547
3548         amdgpu_dm_connector_init_helper(
3549                 dm,
3550                 aconnector,
3551                 connector_type,
3552                 link,
3553                 link_index);
3554
3555         drm_mode_connector_attach_encoder(
3556                 &aconnector->base, &aencoder->base);
3557
3558         drm_connector_register(&aconnector->base);
3559
3560         if (connector_type == DRM_MODE_CONNECTOR_DisplayPort
3561                 || connector_type == DRM_MODE_CONNECTOR_eDP)
3562                 amdgpu_dm_initialize_dp_connector(dm, aconnector);
3563
3564 #if defined(CONFIG_BACKLIGHT_CLASS_DEVICE) ||\
3565         defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE)
3566
3567         /* NOTE: this currently will create backlight device even if a panel
3568          * is not connected to the eDP/LVDS connector.
3569          *
3570          * This is less than ideal but we don't have sink information at this
3571          * stage since detection happens after. We can't do detection earlier
3572          * since MST detection needs connectors to be created first.
3573          */
3574         if (link->connector_signal & (SIGNAL_TYPE_EDP | SIGNAL_TYPE_LVDS)) {
3575                 /* Event if registration failed, we should continue with
3576                  * DM initialization because not having a backlight control
3577                  * is better then a black screen.
3578                  */
3579                 amdgpu_dm_register_backlight_device(dm);
3580
3581                 if (dm->backlight_dev)
3582                         dm->backlight_link = link;
3583         }
3584 #endif
3585
3586 out_free:
3587         if (res) {
3588                 kfree(i2c);
3589                 aconnector->i2c = NULL;
3590         }
3591         return res;
3592 }
3593
3594 int amdgpu_dm_get_encoder_crtc_mask(struct amdgpu_device *adev)
3595 {
3596         switch (adev->mode_info.num_crtc) {
3597         case 1:
3598                 return 0x1;
3599         case 2:
3600                 return 0x3;
3601         case 3:
3602                 return 0x7;
3603         case 4:
3604                 return 0xf;
3605         case 5:
3606                 return 0x1f;
3607         case 6:
3608         default:
3609                 return 0x3f;
3610         }
3611 }
3612
3613 static int amdgpu_dm_encoder_init(struct drm_device *dev,
3614                                   struct amdgpu_encoder *aencoder,
3615                                   uint32_t link_index)
3616 {
3617         struct amdgpu_device *adev = dev->dev_private;
3618
3619         int res = drm_encoder_init(dev,
3620                                    &aencoder->base,
3621                                    &amdgpu_dm_encoder_funcs,
3622                                    DRM_MODE_ENCODER_TMDS,
3623                                    NULL);
3624
3625         aencoder->base.possible_crtcs = amdgpu_dm_get_encoder_crtc_mask(adev);
3626
3627         if (!res)
3628                 aencoder->encoder_id = link_index;
3629         else
3630                 aencoder->encoder_id = -1;
3631
3632         drm_encoder_helper_add(&aencoder->base, &amdgpu_dm_encoder_helper_funcs);
3633
3634         return res;
3635 }
3636
3637 static void manage_dm_interrupts(struct amdgpu_device *adev,
3638                                  struct amdgpu_crtc *acrtc,
3639                                  bool enable)
3640 {
3641         /*
3642          * this is not correct translation but will work as soon as VBLANK
3643          * constant is the same as PFLIP
3644          */
3645         int irq_type =
3646                 amdgpu_crtc_idx_to_irq_type(
3647                         adev,
3648                         acrtc->crtc_id);
3649
3650         if (enable) {
3651                 drm_crtc_vblank_on(&acrtc->base);
3652                 amdgpu_irq_get(
3653                         adev,
3654                         &adev->pageflip_irq,
3655                         irq_type);
3656         } else {
3657
3658                 amdgpu_irq_put(
3659                         adev,
3660                         &adev->pageflip_irq,
3661                         irq_type);
3662                 drm_crtc_vblank_off(&acrtc->base);
3663         }
3664 }
3665
3666 static bool
3667 is_scaling_state_different(const struct dm_connector_state *dm_state,
3668                            const struct dm_connector_state *old_dm_state)
3669 {
3670         if (dm_state->scaling != old_dm_state->scaling)
3671                 return true;
3672         if (!dm_state->underscan_enable && old_dm_state->underscan_enable) {
3673                 if (old_dm_state->underscan_hborder != 0 && old_dm_state->underscan_vborder != 0)
3674                         return true;
3675         } else  if (dm_state->underscan_enable && !old_dm_state->underscan_enable) {
3676                 if (dm_state->underscan_hborder != 0 && dm_state->underscan_vborder != 0)
3677                         return true;
3678         } else if (dm_state->underscan_hborder != old_dm_state->underscan_hborder ||
3679                    dm_state->underscan_vborder != old_dm_state->underscan_vborder)
3680                 return true;
3681         return false;
3682 }
3683
3684 static void remove_stream(struct amdgpu_device *adev,
3685                           struct amdgpu_crtc *acrtc,
3686                           struct dc_stream_state *stream)
3687 {
3688         /* this is the update mode case */
3689         if (adev->dm.freesync_module)
3690                 mod_freesync_remove_stream(adev->dm.freesync_module, stream);
3691
3692         acrtc->otg_inst = -1;
3693         acrtc->enabled = false;
3694 }
3695
3696 static int get_cursor_position(struct drm_plane *plane, struct drm_crtc *crtc,
3697                                struct dc_cursor_position *position)
3698 {
3699         struct amdgpu_crtc *amdgpu_crtc = amdgpu_crtc = to_amdgpu_crtc(crtc);
3700         int x, y;
3701         int xorigin = 0, yorigin = 0;
3702
3703         if (!crtc || !plane->state->fb) {
3704                 position->enable = false;
3705                 position->x = 0;
3706                 position->y = 0;
3707                 return 0;
3708         }
3709
3710         if ((plane->state->crtc_w > amdgpu_crtc->max_cursor_width) ||
3711             (plane->state->crtc_h > amdgpu_crtc->max_cursor_height)) {
3712                 DRM_ERROR("%s: bad cursor width or height %d x %d\n",
3713                           __func__,
3714                           plane->state->crtc_w,
3715                           plane->state->crtc_h);
3716                 return -EINVAL;
3717         }
3718
3719         x = plane->state->crtc_x;
3720         y = plane->state->crtc_y;
3721         /* avivo cursor are offset into the total surface */
3722         x += crtc->primary->state->src_x >> 16;
3723         y += crtc->primary->state->src_y >> 16;
3724         if (x < 0) {
3725                 xorigin = min(-x, amdgpu_crtc->max_cursor_width - 1);
3726                 x = 0;
3727         }
3728         if (y < 0) {
3729                 yorigin = min(-y, amdgpu_crtc->max_cursor_height - 1);
3730                 y = 0;
3731         }
3732         position->enable = true;
3733         position->x = x;
3734         position->y = y;
3735         position->x_hotspot = xorigin;
3736         position->y_hotspot = yorigin;
3737
3738         return 0;
3739 }
3740
3741 static void handle_cursor_update(struct drm_plane *plane,
3742                                  struct drm_plane_state *old_plane_state)
3743 {
3744         struct amdgpu_framebuffer *afb = to_amdgpu_framebuffer(plane->state->fb);
3745         struct drm_crtc *crtc = afb ? plane->state->crtc : old_plane_state->crtc;
3746         struct dm_crtc_state *crtc_state = crtc ? to_dm_crtc_state(crtc->state) : NULL;
3747         struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
3748         uint64_t address = afb ? afb->address : 0;
3749         struct dc_cursor_position position;
3750         struct dc_cursor_attributes attributes;
3751         int ret;
3752
3753         if (!plane->state->fb && !old_plane_state->fb)
3754                 return;
3755
3756         DRM_DEBUG_DRIVER("%s: crtc_id=%d with size %d to %d\n",
3757                          __func__,
3758                          amdgpu_crtc->crtc_id,
3759                          plane->state->crtc_w,
3760                          plane->state->crtc_h);
3761
3762         ret = get_cursor_position(plane, crtc, &position);
3763         if (ret)
3764                 return;
3765
3766         if (!position.enable) {
3767                 /* turn off cursor */
3768                 if (crtc_state && crtc_state->stream)
3769                         dc_stream_set_cursor_position(crtc_state->stream,
3770                                                       &position);
3771                 return;
3772         }
3773
3774         amdgpu_crtc->cursor_width = plane->state->crtc_w;
3775         amdgpu_crtc->cursor_height = plane->state->crtc_h;
3776
3777         attributes.address.high_part = upper_32_bits(address);
3778         attributes.address.low_part  = lower_32_bits(address);
3779         attributes.width             = plane->state->crtc_w;
3780         attributes.height            = plane->state->crtc_h;
3781         attributes.color_format      = CURSOR_MODE_COLOR_PRE_MULTIPLIED_ALPHA;
3782         attributes.rotation_angle    = 0;
3783         attributes.attribute_flags.value = 0;
3784
3785         attributes.pitch = attributes.width;
3786
3787         if (crtc_state->stream) {
3788                 if (!dc_stream_set_cursor_attributes(crtc_state->stream,
3789                                                          &attributes))
3790                         DRM_ERROR("DC failed to set cursor attributes\n");
3791
3792                 if (!dc_stream_set_cursor_position(crtc_state->stream,
3793                                                    &position))
3794                         DRM_ERROR("DC failed to set cursor position\n");
3795         }
3796 }
3797
3798 static void prepare_flip_isr(struct amdgpu_crtc *acrtc)
3799 {
3800
3801         assert_spin_locked(&acrtc->base.dev->event_lock);
3802         WARN_ON(acrtc->event);
3803
3804         acrtc->event = acrtc->base.state->event;
3805
3806         /* Set the flip status */
3807         acrtc->pflip_status = AMDGPU_FLIP_SUBMITTED;
3808
3809         /* Mark this event as consumed */
3810         acrtc->base.state->event = NULL;
3811
3812         DRM_DEBUG_DRIVER("crtc:%d, pflip_stat:AMDGPU_FLIP_SUBMITTED\n",
3813                                                  acrtc->crtc_id);
3814 }
3815
3816 /*
3817  * Executes flip
3818  *
3819  * Waits on all BO's fences and for proper vblank count
3820  */
3821 static void amdgpu_dm_do_flip(struct drm_crtc *crtc,
3822                               struct drm_framebuffer *fb,
3823                               uint32_t target,
3824                               struct dc_state *state)
3825 {
3826         unsigned long flags;
3827         uint32_t target_vblank;
3828         int r, vpos, hpos;
3829         struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
3830         struct amdgpu_framebuffer *afb = to_amdgpu_framebuffer(fb);
3831         struct amdgpu_bo *abo = gem_to_amdgpu_bo(afb->obj);
3832         struct amdgpu_device *adev = crtc->dev->dev_private;
3833         bool async_flip = (crtc->state->pageflip_flags & DRM_MODE_PAGE_FLIP_ASYNC) != 0;
3834         struct dc_flip_addrs addr = { {0} };
3835         /* TODO eliminate or rename surface_update */
3836         struct dc_surface_update surface_updates[1] = { {0} };
3837         struct dm_crtc_state *acrtc_state = to_dm_crtc_state(crtc->state);
3838
3839
3840         /* Prepare wait for target vblank early - before the fence-waits */
3841         target_vblank = target - drm_crtc_vblank_count(crtc) +
3842                         amdgpu_get_vblank_counter_kms(crtc->dev, acrtc->crtc_id);
3843
3844         /* TODO This might fail and hence better not used, wait
3845          * explicitly on fences instead
3846          * and in general should be called for
3847          * blocking commit to as per framework helpers
3848          */
3849         r = amdgpu_bo_reserve(abo, true);
3850         if (unlikely(r != 0)) {
3851                 DRM_ERROR("failed to reserve buffer before flip\n");
3852                 WARN_ON(1);
3853         }
3854
3855         /* Wait for all fences on this FB */
3856         WARN_ON(reservation_object_wait_timeout_rcu(abo->tbo.resv, true, false,
3857                                                                     MAX_SCHEDULE_TIMEOUT) < 0);
3858
3859         amdgpu_bo_unreserve(abo);
3860
3861         /* Wait until we're out of the vertical blank period before the one
3862          * targeted by the flip
3863          */
3864         while ((acrtc->enabled &&
3865                 (amdgpu_get_crtc_scanoutpos(adev->ddev, acrtc->crtc_id, 0,
3866                                         &vpos, &hpos, NULL, NULL,
3867                                         &crtc->hwmode)
3868                  & (DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_IN_VBLANK)) ==
3869                 (DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_IN_VBLANK) &&
3870                 (int)(target_vblank -
3871                   amdgpu_get_vblank_counter_kms(adev->ddev, acrtc->crtc_id)) > 0)) {
3872                 usleep_range(1000, 1100);
3873         }
3874
3875         /* Flip */
3876         spin_lock_irqsave(&crtc->dev->event_lock, flags);
3877         /* update crtc fb */
3878         crtc->primary->fb = fb;
3879
3880         WARN_ON(acrtc->pflip_status != AMDGPU_FLIP_NONE);
3881         WARN_ON(!acrtc_state->stream);
3882
3883         addr.address.grph.addr.low_part = lower_32_bits(afb->address);
3884         addr.address.grph.addr.high_part = upper_32_bits(afb->address);
3885         addr.flip_immediate = async_flip;
3886
3887
3888         if (acrtc->base.state->event)
3889                 prepare_flip_isr(acrtc);
3890
3891         surface_updates->surface = dc_stream_get_status(acrtc_state->stream)->plane_states[0];
3892         surface_updates->flip_addr = &addr;
3893
3894
3895         dc_commit_updates_for_stream(adev->dm.dc,
3896                                              surface_updates,
3897                                              1,
3898                                              acrtc_state->stream,
3899                                              NULL,
3900                                              &surface_updates->surface,
3901                                              state);
3902
3903         DRM_DEBUG_DRIVER("%s Flipping to hi: 0x%x, low: 0x%x \n",
3904                          __func__,
3905                          addr.address.grph.addr.high_part,
3906                          addr.address.grph.addr.low_part);
3907
3908
3909         spin_unlock_irqrestore(&crtc->dev->event_lock, flags);
3910 }
3911
3912 static void amdgpu_dm_commit_planes(struct drm_atomic_state *state,
3913                                     struct drm_device *dev,
3914                                     struct amdgpu_display_manager *dm,
3915                                     struct drm_crtc *pcrtc,
3916                                     bool *wait_for_vblank)
3917 {
3918         uint32_t i;
3919         struct drm_plane *plane;
3920         struct drm_plane_state *old_plane_state, *new_plane_state;
3921         struct dc_stream_state *dc_stream_attach;
3922         struct dc_plane_state *plane_states_constructed[MAX_SURFACES];
3923         struct amdgpu_crtc *acrtc_attach = to_amdgpu_crtc(pcrtc);
3924         struct drm_crtc_state *new_pcrtc_state =
3925                         drm_atomic_get_new_crtc_state(state, pcrtc);
3926         struct dm_crtc_state *acrtc_state = to_dm_crtc_state(new_pcrtc_state);
3927         struct dm_atomic_state *dm_state = to_dm_atomic_state(state);
3928         int planes_count = 0;
3929         unsigned long flags;
3930
3931         /* update planes when needed */
3932         for_each_oldnew_plane_in_state(state, plane, old_plane_state, new_plane_state, i) {
3933                 struct drm_crtc *crtc = new_plane_state->crtc;
3934                 struct drm_crtc_state *new_crtc_state;
3935                 struct drm_framebuffer *fb = new_plane_state->fb;
3936                 bool pflip_needed;
3937                 struct dm_plane_state *dm_new_plane_state = to_dm_plane_state(new_plane_state);
3938
3939                 if (plane->type == DRM_PLANE_TYPE_CURSOR) {
3940                         handle_cursor_update(plane, old_plane_state);
3941                         continue;
3942                 }
3943
3944                 if (!fb || !crtc || pcrtc != crtc)
3945                         continue;
3946
3947                 new_crtc_state = drm_atomic_get_new_crtc_state(state, crtc);
3948                 if (!new_crtc_state->active)
3949                         continue;
3950
3951                 pflip_needed = !state->allow_modeset;
3952
3953                 spin_lock_irqsave(&crtc->dev->event_lock, flags);
3954                 if (acrtc_attach->pflip_status != AMDGPU_FLIP_NONE) {
3955                         DRM_ERROR("%s: acrtc %d, already busy\n",
3956                                   __func__,
3957                                   acrtc_attach->crtc_id);
3958                         /* In commit tail framework this cannot happen */
3959                         WARN_ON(1);
3960                 }
3961                 spin_unlock_irqrestore(&crtc->dev->event_lock, flags);
3962
3963                 if (!pflip_needed) {
3964                         WARN_ON(!dm_new_plane_state->dc_state);
3965
3966                         plane_states_constructed[planes_count] = dm_new_plane_state->dc_state;
3967
3968                         dc_stream_attach = acrtc_state->stream;
3969                         planes_count++;
3970
3971                 } else if (new_crtc_state->planes_changed) {
3972                         /* Assume even ONE crtc with immediate flip means
3973                          * entire can't wait for VBLANK
3974                          * TODO Check if it's correct
3975                          */
3976                         *wait_for_vblank =
3977                                         new_pcrtc_state->pageflip_flags & DRM_MODE_PAGE_FLIP_ASYNC ?
3978                                 false : true;
3979
3980                         /* TODO: Needs rework for multiplane flip */
3981                         if (plane->type == DRM_PLANE_TYPE_PRIMARY)
3982                                 drm_crtc_vblank_get(crtc);
3983
3984                         amdgpu_dm_do_flip(
3985                                 crtc,
3986                                 fb,
3987                                 drm_crtc_vblank_count(crtc) + *wait_for_vblank,
3988                                 dm_state->context);
3989                 }
3990
3991         }
3992
3993         if (planes_count) {
3994                 unsigned long flags;
3995
3996                 if (new_pcrtc_state->event) {
3997
3998                         drm_crtc_vblank_get(pcrtc);
3999
4000                         spin_lock_irqsave(&pcrtc->dev->event_lock, flags);
4001                         prepare_flip_isr(acrtc_attach);
4002                         spin_unlock_irqrestore(&pcrtc->dev->event_lock, flags);
4003                 }
4004
4005                 if (false == dc_commit_planes_to_stream(dm->dc,
4006                                                         plane_states_constructed,
4007                                                         planes_count,
4008                                                         dc_stream_attach,
4009                                                         dm_state->context))
4010                         dm_error("%s: Failed to attach plane!\n", __func__);
4011         } else {
4012                 /*TODO BUG Here should go disable planes on CRTC. */
4013         }
4014 }
4015
4016 /**
4017  * amdgpu_dm_crtc_copy_transient_flags - copy mirrored flags from DRM to DC
4018  * @crtc_state: the DRM CRTC state
4019  * @stream_state: the DC stream state.
4020  *
4021  * Copy the mirrored transient state flags from DRM, to DC. It is used to bring
4022  * a dc_stream_state's flags in sync with a drm_crtc_state's flags.
4023  */
4024 static void amdgpu_dm_crtc_copy_transient_flags(struct drm_crtc_state *crtc_state,
4025                                                 struct dc_stream_state *stream_state)
4026 {
4027         stream_state->mode_changed = crtc_state->mode_changed;
4028 }
4029
4030 static int amdgpu_dm_atomic_commit(struct drm_device *dev,
4031                                    struct drm_atomic_state *state,
4032                                    bool nonblock)
4033 {
4034         struct drm_crtc *crtc;
4035         struct drm_crtc_state *old_crtc_state, *new_crtc_state;
4036         struct amdgpu_device *adev = dev->dev_private;
4037         int i;
4038
4039         /*
4040          * We evade vblanks and pflips on crtc that
4041          * should be changed. We do it here to flush & disable
4042          * interrupts before drm_swap_state is called in drm_atomic_helper_commit
4043          * it will update crtc->dm_crtc_state->stream pointer which is used in
4044          * the ISRs.
4045          */
4046         for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
4047                 struct dm_crtc_state *dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
4048                 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
4049
4050                 if (drm_atomic_crtc_needs_modeset(new_crtc_state) && dm_old_crtc_state->stream)
4051                         manage_dm_interrupts(adev, acrtc, false);
4052         }
4053         /* Add check here for SoC's that support hardware cursor plane, to
4054          * unset legacy_cursor_update */
4055
4056         return drm_atomic_helper_commit(dev, state, nonblock);
4057
4058         /*TODO Handle EINTR, reenable IRQ*/
4059 }
4060
4061 static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state)
4062 {
4063         struct drm_device *dev = state->dev;
4064         struct amdgpu_device *adev = dev->dev_private;
4065         struct amdgpu_display_manager *dm = &adev->dm;
4066         struct dm_atomic_state *dm_state;
4067         uint32_t i, j;
4068         struct drm_crtc *crtc;
4069         struct drm_crtc_state *old_crtc_state, *new_crtc_state;
4070         unsigned long flags;
4071         bool wait_for_vblank = true;
4072         struct drm_connector *connector;
4073         struct drm_connector_state *old_con_state, *new_con_state;
4074         struct dm_crtc_state *dm_old_crtc_state, *dm_new_crtc_state;
4075
4076         drm_atomic_helper_update_legacy_modeset_state(dev, state);
4077
4078         dm_state = to_dm_atomic_state(state);
4079
4080         /* update changed items */
4081         for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
4082                 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
4083
4084                 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
4085                 dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
4086
4087                 DRM_DEBUG_DRIVER(
4088                         "amdgpu_crtc id:%d crtc_state_flags: enable:%d, active:%d, "
4089                         "planes_changed:%d, mode_changed:%d,active_changed:%d,"
4090                         "connectors_changed:%d\n",
4091                         acrtc->crtc_id,
4092                         new_crtc_state->enable,
4093                         new_crtc_state->active,
4094                         new_crtc_state->planes_changed,
4095                         new_crtc_state->mode_changed,
4096                         new_crtc_state->active_changed,
4097                         new_crtc_state->connectors_changed);
4098
4099                 /* Copy all transient state flags into dc state */
4100                 if (dm_new_crtc_state->stream) {
4101                         amdgpu_dm_crtc_copy_transient_flags(&dm_new_crtc_state->base,
4102                                                             dm_new_crtc_state->stream);
4103                 }
4104
4105                 /* handles headless hotplug case, updating new_state and
4106                  * aconnector as needed
4107                  */
4108
4109                 if (modeset_required(new_crtc_state, dm_new_crtc_state->stream, dm_old_crtc_state->stream)) {
4110
4111                         DRM_DEBUG_DRIVER("Atomic commit: SET crtc id %d: [%p]\n", acrtc->crtc_id, acrtc);
4112
4113                         if (!dm_new_crtc_state->stream) {
4114                                 /*
4115                                  * this could happen because of issues with
4116                                  * userspace notifications delivery.
4117                                  * In this case userspace tries to set mode on
4118                                  * display which is disconnect in fact.
4119                                  * dc_sink in NULL in this case on aconnector.
4120                                  * We expect reset mode will come soon.
4121                                  *
4122                                  * This can also happen when unplug is done
4123                                  * during resume sequence ended
4124                                  *
4125                                  * In this case, we want to pretend we still
4126                                  * have a sink to keep the pipe running so that
4127                                  * hw state is consistent with the sw state
4128                                  */
4129                                 DRM_DEBUG_DRIVER("%s: Failed to create new stream for crtc %d\n",
4130                                                 __func__, acrtc->base.base.id);
4131                                 continue;
4132                         }
4133
4134                         if (dm_old_crtc_state->stream)
4135                                 remove_stream(adev, acrtc, dm_old_crtc_state->stream);
4136
4137                         acrtc->enabled = true;
4138                         acrtc->hw_mode = new_crtc_state->mode;
4139                         crtc->hwmode = new_crtc_state->mode;
4140                 } else if (modereset_required(new_crtc_state)) {
4141                         DRM_DEBUG_DRIVER("Atomic commit: RESET. crtc id %d:[%p]\n", acrtc->crtc_id, acrtc);
4142
4143                         /* i.e. reset mode */
4144                         if (dm_old_crtc_state->stream)
4145                                 remove_stream(adev, acrtc, dm_old_crtc_state->stream);
4146                 }
4147         } /* for_each_crtc_in_state() */
4148
4149         /*
4150          * Add streams after required streams from new and replaced streams
4151          * are removed from freesync module
4152          */
4153         if (adev->dm.freesync_module) {
4154                 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state,
4155                                               new_crtc_state, i) {
4156                         struct amdgpu_dm_connector *aconnector = NULL;
4157                         struct dm_connector_state *dm_new_con_state = NULL;
4158                         struct amdgpu_crtc *acrtc = NULL;
4159                         bool modeset_needed;
4160
4161                         dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
4162                         dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
4163                         modeset_needed = modeset_required(
4164                                         new_crtc_state,
4165                                         dm_new_crtc_state->stream,
4166                                         dm_old_crtc_state->stream);
4167                         /* We add stream to freesync if:
4168                          * 1. Said stream is not null, and
4169                          * 2. A modeset is requested. This means that the
4170                          *    stream was removed previously, and needs to be
4171                          *    replaced.
4172                          */
4173                         if (dm_new_crtc_state->stream == NULL ||
4174                                         !modeset_needed)
4175                                 continue;
4176
4177                         acrtc = to_amdgpu_crtc(crtc);
4178
4179                         aconnector =
4180                                 amdgpu_dm_find_first_crtc_matching_connector(
4181                                         state, crtc);
4182                         if (!aconnector) {
4183                                 DRM_DEBUG_DRIVER("Atomic commit: Failed to "
4184                                                  "find connector for acrtc "
4185                                                  "id:%d skipping freesync "
4186                                                  "init\n",
4187                                                  acrtc->crtc_id);
4188                                 continue;
4189                         }
4190
4191                         mod_freesync_add_stream(adev->dm.freesync_module,
4192                                                 dm_new_crtc_state->stream,
4193                                                 &aconnector->caps);
4194                         new_con_state = drm_atomic_get_new_connector_state(
4195                                         state, &aconnector->base);
4196                         dm_new_con_state = to_dm_connector_state(new_con_state);
4197
4198                         mod_freesync_set_user_enable(adev->dm.freesync_module,
4199                                                      &dm_new_crtc_state->stream,
4200                                                      1,
4201                                                      &dm_new_con_state->user_enable);
4202                 }
4203         }
4204
4205         if (dm_state->context) {
4206                 dm_enable_per_frame_crtc_master_sync(dm_state->context);
4207                 WARN_ON(!dc_commit_state(dm->dc, dm_state->context));
4208         }
4209
4210         for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) {
4211                 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
4212
4213                 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
4214
4215                 if (dm_new_crtc_state->stream != NULL) {
4216                         const struct dc_stream_status *status =
4217                                         dc_stream_get_status(dm_new_crtc_state->stream);
4218
4219                         if (!status)
4220                                 DC_ERR("got no status for stream %p on acrtc%p\n", dm_new_crtc_state->stream, acrtc);
4221                         else
4222                                 acrtc->otg_inst = status->primary_otg_inst;
4223                 }
4224         }
4225
4226         /* Handle scaling and underscan changes*/
4227         for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
4228                 struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state);
4229                 struct dm_connector_state *dm_old_con_state = to_dm_connector_state(old_con_state);
4230                 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(dm_new_con_state->base.crtc);
4231                 struct dc_stream_status *status = NULL;
4232
4233                 if (acrtc)
4234                         new_crtc_state = drm_atomic_get_new_crtc_state(state, &acrtc->base);
4235
4236                 /* Skip any modesets/resets */
4237                 if (!acrtc || drm_atomic_crtc_needs_modeset(new_crtc_state))
4238                         continue;
4239
4240                 /* Skip any thing not scale or underscan changes */
4241                 if (!is_scaling_state_different(dm_new_con_state, dm_old_con_state))
4242                         continue;
4243
4244                 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
4245
4246                 update_stream_scaling_settings(&dm_new_con_state->base.crtc->mode,
4247                                 dm_new_con_state, (struct dc_stream_state *)dm_new_crtc_state->stream);
4248
4249                 if (!dm_new_crtc_state->stream)
4250                         continue;
4251
4252                 status = dc_stream_get_status(dm_new_crtc_state->stream);
4253                 WARN_ON(!status);
4254                 WARN_ON(!status->plane_count);
4255
4256                 /*TODO How it works with MPO ?*/
4257                 if (!dc_commit_planes_to_stream(
4258                                 dm->dc,
4259                                 status->plane_states,
4260                                 status->plane_count,
4261                                 dm_new_crtc_state->stream,
4262                                 dm_state->context))
4263                         dm_error("%s: Failed to update stream scaling!\n", __func__);
4264         }
4265
4266         for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state,
4267                         new_crtc_state, i) {
4268                 /*
4269                  * loop to enable interrupts on newly arrived crtc
4270                  */
4271                 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
4272                 bool modeset_needed;
4273
4274                 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
4275                 dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
4276                 modeset_needed = modeset_required(
4277                                 new_crtc_state,
4278                                 dm_new_crtc_state->stream,
4279                                 dm_old_crtc_state->stream);
4280
4281                 if (dm_new_crtc_state->stream == NULL || !modeset_needed)
4282                         continue;
4283
4284                 if (adev->dm.freesync_module)
4285                         mod_freesync_notify_mode_change(
4286                                 adev->dm.freesync_module,
4287                                 &dm_new_crtc_state->stream, 1);
4288
4289                 manage_dm_interrupts(adev, acrtc, true);
4290         }
4291
4292         /* update planes when needed per crtc*/
4293         for_each_new_crtc_in_state(state, crtc, new_crtc_state, j) {
4294                 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
4295
4296                 if (dm_new_crtc_state->stream)
4297                         amdgpu_dm_commit_planes(state, dev, dm, crtc, &wait_for_vblank);
4298         }
4299
4300
4301         /*
4302          * send vblank event on all events not handled in flip and
4303          * mark consumed event for drm_atomic_helper_commit_hw_done
4304          */
4305         spin_lock_irqsave(&adev->ddev->event_lock, flags);
4306         for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) {
4307
4308                 if (new_crtc_state->event)
4309                         drm_send_event_locked(dev, &new_crtc_state->event->base);
4310
4311                 new_crtc_state->event = NULL;
4312         }
4313         spin_unlock_irqrestore(&adev->ddev->event_lock, flags);
4314
4315         /* Signal HW programming completion */
4316         drm_atomic_helper_commit_hw_done(state);
4317
4318         if (wait_for_vblank)
4319                 drm_atomic_helper_wait_for_flip_done(dev, state);
4320
4321         drm_atomic_helper_cleanup_planes(dev, state);
4322 }
4323
4324
4325 static int dm_force_atomic_commit(struct drm_connector *connector)
4326 {
4327         int ret = 0;
4328         struct drm_device *ddev = connector->dev;
4329         struct drm_atomic_state *state = drm_atomic_state_alloc(ddev);
4330         struct amdgpu_crtc *disconnected_acrtc = to_amdgpu_crtc(connector->encoder->crtc);
4331         struct drm_plane *plane = disconnected_acrtc->base.primary;
4332         struct drm_connector_state *conn_state;
4333         struct drm_crtc_state *crtc_state;
4334         struct drm_plane_state *plane_state;
4335
4336         if (!state)
4337                 return -ENOMEM;
4338
4339         state->acquire_ctx = ddev->mode_config.acquire_ctx;
4340
4341         /* Construct an atomic state to restore previous display setting */
4342
4343         /*
4344          * Attach connectors to drm_atomic_state
4345          */
4346         conn_state = drm_atomic_get_connector_state(state, connector);
4347
4348         ret = PTR_ERR_OR_ZERO(conn_state);
4349         if (ret)
4350                 goto err;
4351
4352         /* Attach crtc to drm_atomic_state*/
4353         crtc_state = drm_atomic_get_crtc_state(state, &disconnected_acrtc->base);
4354
4355         ret = PTR_ERR_OR_ZERO(crtc_state);
4356         if (ret)
4357                 goto err;
4358
4359         /* force a restore */
4360         crtc_state->mode_changed = true;
4361
4362         /* Attach plane to drm_atomic_state */
4363         plane_state = drm_atomic_get_plane_state(state, plane);
4364
4365         ret = PTR_ERR_OR_ZERO(plane_state);
4366         if (ret)
4367                 goto err;
4368
4369
4370         /* Call commit internally with the state we just constructed */
4371         ret = drm_atomic_commit(state);
4372         if (!ret)
4373                 return 0;
4374
4375 err:
4376         DRM_ERROR("Restoring old state failed with %i\n", ret);
4377         drm_atomic_state_put(state);
4378
4379         return ret;
4380 }
4381
4382 /*
4383  * This functions handle all cases when set mode does not come upon hotplug.
4384  * This include when the same display is unplugged then plugged back into the
4385  * same port and when we are running without usermode desktop manager supprot
4386  */
4387 void dm_restore_drm_connector_state(struct drm_device *dev,
4388                                     struct drm_connector *connector)
4389 {
4390         struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
4391         struct amdgpu_crtc *disconnected_acrtc;
4392         struct dm_crtc_state *acrtc_state;
4393
4394         if (!aconnector->dc_sink || !connector->state || !connector->encoder)
4395                 return;
4396
4397         disconnected_acrtc = to_amdgpu_crtc(connector->encoder->crtc);
4398         if (!disconnected_acrtc)
4399                 return;
4400
4401         acrtc_state = to_dm_crtc_state(disconnected_acrtc->base.state);
4402         if (!acrtc_state->stream)
4403                 return;
4404
4405         /*
4406          * If the previous sink is not released and different from the current,
4407          * we deduce we are in a state where we can not rely on usermode call
4408          * to turn on the display, so we do it here
4409          */
4410         if (acrtc_state->stream->sink != aconnector->dc_sink)
4411                 dm_force_atomic_commit(&aconnector->base);
4412 }
4413
4414 /*`
4415  * Grabs all modesetting locks to serialize against any blocking commits,
4416  * Waits for completion of all non blocking commits.
4417  */
4418 static int do_aquire_global_lock(struct drm_device *dev,
4419                                  struct drm_atomic_state *state)
4420 {
4421         struct drm_crtc *crtc;
4422         struct drm_crtc_commit *commit;
4423         long ret;
4424
4425         /* Adding all modeset locks to aquire_ctx will
4426          * ensure that when the framework release it the
4427          * extra locks we are locking here will get released to
4428          */
4429         ret = drm_modeset_lock_all_ctx(dev, state->acquire_ctx);
4430         if (ret)
4431                 return ret;
4432
4433         list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
4434                 spin_lock(&crtc->commit_lock);
4435                 commit = list_first_entry_or_null(&crtc->commit_list,
4436                                 struct drm_crtc_commit, commit_entry);
4437                 if (commit)
4438                         drm_crtc_commit_get(commit);
4439                 spin_unlock(&crtc->commit_lock);
4440
4441                 if (!commit)
4442                         continue;
4443
4444                 /* Make sure all pending HW programming completed and
4445                  * page flips done
4446                  */
4447                 ret = wait_for_completion_interruptible_timeout(&commit->hw_done, 10*HZ);
4448
4449                 if (ret > 0)
4450                         ret = wait_for_completion_interruptible_timeout(
4451                                         &commit->flip_done, 10*HZ);
4452
4453                 if (ret == 0)
4454                         DRM_ERROR("[CRTC:%d:%s] hw_done or flip_done "
4455                                   "timed out\n", crtc->base.id, crtc->name);
4456
4457                 drm_crtc_commit_put(commit);
4458         }
4459
4460         return ret < 0 ? ret : 0;
4461 }
4462
4463 static int dm_update_crtcs_state(struct dc *dc,
4464                                  struct drm_atomic_state *state,
4465                                  bool enable,
4466                                  bool *lock_and_validation_needed)
4467 {
4468         struct drm_crtc *crtc;
4469         struct drm_crtc_state *old_crtc_state, *new_crtc_state;
4470         int i;
4471         struct dm_crtc_state *dm_old_crtc_state, *dm_new_crtc_state;
4472         struct dm_atomic_state *dm_state = to_dm_atomic_state(state);
4473         struct dc_stream_state *new_stream;
4474         int ret = 0;
4475
4476         /*TODO Move this code into dm_crtc_atomic_check once we get rid of dc_validation_set */
4477         /* update changed items */
4478         for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
4479                 struct amdgpu_crtc *acrtc = NULL;
4480                 struct amdgpu_dm_connector *aconnector = NULL;
4481                 struct drm_connector_state *new_con_state = NULL;
4482                 struct dm_connector_state *dm_conn_state = NULL;
4483
4484                 new_stream = NULL;
4485
4486                 dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
4487                 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
4488                 acrtc = to_amdgpu_crtc(crtc);
4489
4490                 aconnector = amdgpu_dm_find_first_crtc_matching_connector(state, crtc);
4491
4492                 /* TODO This hack should go away */
4493                 if (aconnector && enable) {
4494                         // Make sure fake sink is created in plug-in scenario
4495                         new_con_state = drm_atomic_get_connector_state(state,
4496                                                                     &aconnector->base);
4497
4498                         if (IS_ERR(new_con_state)) {
4499                                 ret = PTR_ERR_OR_ZERO(new_con_state);
4500                                 break;
4501                         }
4502
4503                         dm_conn_state = to_dm_connector_state(new_con_state);
4504
4505                         new_stream = create_stream_for_sink(aconnector,
4506                                                              &new_crtc_state->mode,
4507                                                             dm_conn_state);
4508
4509                         /*
4510                          * we can have no stream on ACTION_SET if a display
4511                          * was disconnected during S3, in this case it not and
4512                          * error, the OS will be updated after detection, and
4513                          * do the right thing on next atomic commit
4514                          */
4515
4516                         if (!new_stream) {
4517                                 DRM_DEBUG_DRIVER("%s: Failed to create new stream for crtc %d\n",
4518                                                 __func__, acrtc->base.base.id);
4519                                 break;
4520                         }
4521
4522                         if (dc_is_stream_unchanged(new_stream, dm_old_crtc_state->stream) &&
4523                             dc_is_stream_scaling_unchanged(new_stream, dm_old_crtc_state->stream)) {
4524                                 new_crtc_state->mode_changed = false;
4525                                 DRM_DEBUG_DRIVER("Mode change not required, setting mode_changed to %d",
4526                                                  new_crtc_state->mode_changed);
4527                         }
4528                 }
4529
4530                 if (!drm_atomic_crtc_needs_modeset(new_crtc_state))
4531                         goto next_crtc;
4532
4533                 DRM_DEBUG_DRIVER(
4534                         "amdgpu_crtc id:%d crtc_state_flags: enable:%d, active:%d, "
4535                         "planes_changed:%d, mode_changed:%d,active_changed:%d,"
4536                         "connectors_changed:%d\n",
4537                         acrtc->crtc_id,
4538                         new_crtc_state->enable,
4539                         new_crtc_state->active,
4540                         new_crtc_state->planes_changed,
4541                         new_crtc_state->mode_changed,
4542                         new_crtc_state->active_changed,
4543                         new_crtc_state->connectors_changed);
4544
4545                 /* Remove stream for any changed/disabled CRTC */
4546                 if (!enable) {
4547
4548                         if (!dm_old_crtc_state->stream)
4549                                 goto next_crtc;
4550
4551                         DRM_DEBUG_DRIVER("Disabling DRM crtc: %d\n",
4552                                         crtc->base.id);
4553
4554                         /* i.e. reset mode */
4555                         if (dc_remove_stream_from_ctx(
4556                                         dc,
4557                                         dm_state->context,
4558                                         dm_old_crtc_state->stream) != DC_OK) {
4559                                 ret = -EINVAL;
4560                                 goto fail;
4561                         }
4562
4563                         dc_stream_release(dm_old_crtc_state->stream);
4564                         dm_new_crtc_state->stream = NULL;
4565
4566                         *lock_and_validation_needed = true;
4567
4568                 } else {/* Add stream for any updated/enabled CRTC */
4569                         /*
4570                          * Quick fix to prevent NULL pointer on new_stream when
4571                          * added MST connectors not found in existing crtc_state in the chained mode
4572                          * TODO: need to dig out the root cause of that
4573                          */
4574                         if (!aconnector || (!aconnector->dc_sink && aconnector->mst_port))
4575                                 goto next_crtc;
4576
4577                         if (modereset_required(new_crtc_state))
4578                                 goto next_crtc;
4579
4580                         if (modeset_required(new_crtc_state, new_stream,
4581                                              dm_old_crtc_state->stream)) {
4582
4583                                 WARN_ON(dm_new_crtc_state->stream);
4584
4585                                 dm_new_crtc_state->stream = new_stream;
4586
4587                                 dc_stream_retain(new_stream);
4588
4589                                 DRM_DEBUG_DRIVER("Enabling DRM crtc: %d\n",
4590                                                         crtc->base.id);
4591
4592                                 if (dc_add_stream_to_ctx(
4593                                                 dc,
4594                                                 dm_state->context,
4595                                                 dm_new_crtc_state->stream) != DC_OK) {
4596                                         ret = -EINVAL;
4597                                         goto fail;
4598                                 }
4599
4600                                 *lock_and_validation_needed = true;
4601                         }
4602                 }
4603
4604 next_crtc:
4605                 /* Release extra reference */
4606                 if (new_stream)
4607                          dc_stream_release(new_stream);
4608         }
4609
4610         return ret;
4611
4612 fail:
4613         if (new_stream)
4614                 dc_stream_release(new_stream);
4615         return ret;
4616 }
4617
4618 static int dm_update_planes_state(struct dc *dc,
4619                                   struct drm_atomic_state *state,
4620                                   bool enable,
4621                                   bool *lock_and_validation_needed)
4622 {
4623         struct drm_crtc *new_plane_crtc, *old_plane_crtc;
4624         struct drm_crtc_state *old_crtc_state, *new_crtc_state;
4625         struct drm_plane *plane;
4626         struct drm_plane_state *old_plane_state, *new_plane_state;
4627         struct dm_crtc_state *dm_new_crtc_state, *dm_old_crtc_state;
4628         struct dm_atomic_state *dm_state = to_dm_atomic_state(state);
4629         struct dm_plane_state *dm_new_plane_state, *dm_old_plane_state;
4630         int i ;
4631         /* TODO return page_flip_needed() function */
4632         bool pflip_needed  = !state->allow_modeset;
4633         int ret = 0;
4634
4635         if (pflip_needed)
4636                 return ret;
4637
4638         /* Add new planes */
4639         for_each_oldnew_plane_in_state(state, plane, old_plane_state, new_plane_state, i) {
4640                 new_plane_crtc = new_plane_state->crtc;
4641                 old_plane_crtc = old_plane_state->crtc;
4642                 dm_new_plane_state = to_dm_plane_state(new_plane_state);
4643                 dm_old_plane_state = to_dm_plane_state(old_plane_state);
4644
4645                 /*TODO Implement atomic check for cursor plane */
4646                 if (plane->type == DRM_PLANE_TYPE_CURSOR)
4647                         continue;
4648
4649                 /* Remove any changed/removed planes */
4650                 if (!enable) {
4651
4652                         if (!old_plane_crtc)
4653                                 continue;
4654
4655                         old_crtc_state = drm_atomic_get_old_crtc_state(
4656                                         state, old_plane_crtc);
4657                         dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
4658
4659                         if (!dm_old_crtc_state->stream)
4660                                 continue;
4661
4662                         DRM_DEBUG_DRIVER("Disabling DRM plane: %d on DRM crtc %d\n",
4663                                         plane->base.id, old_plane_crtc->base.id);
4664
4665                         if (!dc_remove_plane_from_context(
4666                                         dc,
4667                                         dm_old_crtc_state->stream,
4668                                         dm_old_plane_state->dc_state,
4669                                         dm_state->context)) {
4670
4671                                 ret = EINVAL;
4672                                 return ret;
4673                         }
4674
4675
4676                         dc_plane_state_release(dm_old_plane_state->dc_state);
4677                         dm_new_plane_state->dc_state = NULL;
4678
4679                         *lock_and_validation_needed = true;
4680
4681                 } else { /* Add new planes */
4682
4683                         if (drm_atomic_plane_disabling(plane->state, new_plane_state))
4684                                 continue;
4685
4686                         if (!new_plane_crtc)
4687                                 continue;
4688
4689                         new_crtc_state = drm_atomic_get_new_crtc_state(state, new_plane_crtc);
4690                         dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
4691
4692                         if (!dm_new_crtc_state->stream)
4693                                 continue;
4694
4695
4696                         WARN_ON(dm_new_plane_state->dc_state);
4697
4698                         dm_new_plane_state->dc_state = dc_create_plane_state(dc);
4699
4700                         DRM_DEBUG_DRIVER("Enabling DRM plane: %d on DRM crtc %d\n",
4701                                         plane->base.id, new_plane_crtc->base.id);
4702
4703                         if (!dm_new_plane_state->dc_state) {
4704                                 ret = -EINVAL;
4705                                 return ret;
4706                         }
4707
4708                         ret = fill_plane_attributes(
4709                                 new_plane_crtc->dev->dev_private,
4710                                 dm_new_plane_state->dc_state,
4711                                 new_plane_state,
4712                                 new_crtc_state);
4713                         if (ret)
4714                                 return ret;
4715
4716
4717                         if (!dc_add_plane_to_context(
4718                                         dc,
4719                                         dm_new_crtc_state->stream,
4720                                         dm_new_plane_state->dc_state,
4721                                         dm_state->context)) {
4722
4723                                 ret = -EINVAL;
4724                                 return ret;
4725                         }
4726
4727                         /* Tell DC to do a full surface update every time there
4728                          * is a plane change. Inefficient, but works for now.
4729                          */
4730                         dm_new_plane_state->dc_state->update_flags.bits.full_update = 1;
4731
4732                         *lock_and_validation_needed = true;
4733                 }
4734         }
4735
4736
4737         return ret;
4738 }
4739
4740 static int amdgpu_dm_atomic_check(struct drm_device *dev,
4741                                   struct drm_atomic_state *state)
4742 {
4743         struct amdgpu_device *adev = dev->dev_private;
4744         struct dc *dc = adev->dm.dc;
4745         struct dm_atomic_state *dm_state = to_dm_atomic_state(state);
4746         struct drm_connector *connector;
4747         struct drm_connector_state *old_con_state, *new_con_state;
4748         struct drm_crtc *crtc;
4749         struct drm_crtc_state *old_crtc_state, *new_crtc_state;
4750         int ret, i;
4751
4752         /*
4753          * This bool will be set for true for any modeset/reset
4754          * or plane update which implies non fast surface update.
4755          */
4756         bool lock_and_validation_needed = false;
4757
4758         ret = drm_atomic_helper_check_modeset(dev, state);
4759         if (ret)
4760                 goto fail;
4761
4762         for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
4763                 if (!drm_atomic_crtc_needs_modeset(new_crtc_state) &&
4764                     !new_crtc_state->color_mgmt_changed)
4765                         continue;
4766
4767                 if (!new_crtc_state->enable)
4768                         continue;
4769
4770                 ret = drm_atomic_add_affected_connectors(state, crtc);
4771                 if (ret)
4772                         return ret;
4773
4774                 ret = drm_atomic_add_affected_planes(state, crtc);
4775                 if (ret)
4776                         goto fail;
4777         }
4778
4779         dm_state->context = dc_create_state();
4780         ASSERT(dm_state->context);
4781         dc_resource_state_copy_construct_current(dc, dm_state->context);
4782
4783         /* Remove exiting planes if they are modified */
4784         ret = dm_update_planes_state(dc, state, false, &lock_and_validation_needed);
4785         if (ret) {
4786                 goto fail;
4787         }
4788
4789         /* Disable all crtcs which require disable */
4790         ret = dm_update_crtcs_state(dc, state, false, &lock_and_validation_needed);
4791         if (ret) {
4792                 goto fail;
4793         }
4794
4795         /* Enable all crtcs which require enable */
4796         ret = dm_update_crtcs_state(dc, state, true, &lock_and_validation_needed);
4797         if (ret) {
4798                 goto fail;
4799         }
4800
4801         /* Add new/modified planes */
4802         ret = dm_update_planes_state(dc, state, true, &lock_and_validation_needed);
4803         if (ret) {
4804                 goto fail;
4805         }
4806
4807         /* Run this here since we want to validate the streams we created */
4808         ret = drm_atomic_helper_check_planes(dev, state);
4809         if (ret)
4810                 goto fail;
4811
4812         /* Check scaling and underscan changes*/
4813         /*TODO Removed scaling changes validation due to inability to commit
4814          * new stream into context w\o causing full reset. Need to
4815          * decide how to handle.
4816          */
4817         for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
4818                 struct dm_connector_state *dm_old_con_state = to_dm_connector_state(old_con_state);
4819                 struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state);
4820                 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(dm_new_con_state->base.crtc);
4821
4822                 /* Skip any modesets/resets */
4823                 if (!acrtc || drm_atomic_crtc_needs_modeset(
4824                                 drm_atomic_get_new_crtc_state(state, &acrtc->base)))
4825                         continue;
4826
4827                 /* Skip any thing not scale or underscan changes */
4828                 if (!is_scaling_state_different(dm_new_con_state, dm_old_con_state))
4829                         continue;
4830
4831                 lock_and_validation_needed = true;
4832         }
4833
4834         /*
4835          * For full updates case when
4836          * removing/adding/updating  streams on once CRTC while flipping
4837          * on another CRTC,
4838          * acquiring global lock  will guarantee that any such full
4839          * update commit
4840          * will wait for completion of any outstanding flip using DRMs
4841          * synchronization events.
4842          */
4843
4844         if (lock_and_validation_needed) {
4845
4846                 ret = do_aquire_global_lock(dev, state);
4847                 if (ret)
4848                         goto fail;
4849
4850                 if (dc_validate_global_state(dc, dm_state->context) != DC_OK) {
4851                         ret = -EINVAL;
4852                         goto fail;
4853                 }
4854         }
4855
4856         /* Must be success */
4857         WARN_ON(ret);
4858         return ret;
4859
4860 fail:
4861         if (ret == -EDEADLK)
4862                 DRM_DEBUG_DRIVER("Atomic check stopped to avoid deadlock.\n");
4863         else if (ret == -EINTR || ret == -EAGAIN || ret == -ERESTARTSYS)
4864                 DRM_DEBUG_DRIVER("Atomic check stopped due to signal.\n");
4865         else
4866                 DRM_DEBUG_DRIVER("Atomic check failed with err: %d \n", ret);
4867
4868         return ret;
4869 }
4870
4871 static bool is_dp_capable_without_timing_msa(struct dc *dc,
4872                                              struct amdgpu_dm_connector *amdgpu_dm_connector)
4873 {
4874         uint8_t dpcd_data;
4875         bool capable = false;
4876
4877         if (amdgpu_dm_connector->dc_link &&
4878                 dm_helpers_dp_read_dpcd(
4879                                 NULL,
4880                                 amdgpu_dm_connector->dc_link,
4881                                 DP_DOWN_STREAM_PORT_COUNT,
4882                                 &dpcd_data,
4883                                 sizeof(dpcd_data))) {
4884                 capable = (dpcd_data & DP_MSA_TIMING_PAR_IGNORED) ? true:false;
4885         }
4886
4887         return capable;
4888 }
4889 void amdgpu_dm_add_sink_to_freesync_module(struct drm_connector *connector,
4890                                            struct edid *edid)
4891 {
4892         int i;
4893         uint64_t val_capable;
4894         bool edid_check_required;
4895         struct detailed_timing *timing;
4896         struct detailed_non_pixel *data;
4897         struct detailed_data_monitor_range *range;
4898         struct amdgpu_dm_connector *amdgpu_dm_connector =
4899                         to_amdgpu_dm_connector(connector);
4900
4901         struct drm_device *dev = connector->dev;
4902         struct amdgpu_device *adev = dev->dev_private;
4903
4904         edid_check_required = false;
4905         if (!amdgpu_dm_connector->dc_sink) {
4906                 DRM_ERROR("dc_sink NULL, could not add free_sync module.\n");
4907                 return;
4908         }
4909         if (!adev->dm.freesync_module)
4910                 return;
4911         /*
4912          * if edid non zero restrict freesync only for dp and edp
4913          */
4914         if (edid) {
4915                 if (amdgpu_dm_connector->dc_sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT
4916                         || amdgpu_dm_connector->dc_sink->sink_signal == SIGNAL_TYPE_EDP) {
4917                         edid_check_required = is_dp_capable_without_timing_msa(
4918                                                 adev->dm.dc,
4919                                                 amdgpu_dm_connector);
4920                 }
4921         }
4922         val_capable = 0;
4923         if (edid_check_required == true && (edid->version > 1 ||
4924            (edid->version == 1 && edid->revision > 1))) {
4925                 for (i = 0; i < 4; i++) {
4926
4927                         timing  = &edid->detailed_timings[i];
4928                         data    = &timing->data.other_data;
4929                         range   = &data->data.range;
4930                         /*
4931                          * Check if monitor has continuous frequency mode
4932                          */
4933                         if (data->type != EDID_DETAIL_MONITOR_RANGE)
4934                                 continue;
4935                         /*
4936                          * Check for flag range limits only. If flag == 1 then
4937                          * no additional timing information provided.
4938                          * Default GTF, GTF Secondary curve and CVT are not
4939                          * supported
4940                          */
4941                         if (range->flags != 1)
4942                                 continue;
4943
4944                         amdgpu_dm_connector->min_vfreq = range->min_vfreq;
4945                         amdgpu_dm_connector->max_vfreq = range->max_vfreq;
4946                         amdgpu_dm_connector->pixel_clock_mhz =
4947                                 range->pixel_clock_mhz * 10;
4948                         break;
4949                 }
4950
4951                 if (amdgpu_dm_connector->max_vfreq -
4952                                 amdgpu_dm_connector->min_vfreq > 10) {
4953                         amdgpu_dm_connector->caps.supported = true;
4954                         amdgpu_dm_connector->caps.min_refresh_in_micro_hz =
4955                                         amdgpu_dm_connector->min_vfreq * 1000000;
4956                         amdgpu_dm_connector->caps.max_refresh_in_micro_hz =
4957                                         amdgpu_dm_connector->max_vfreq * 1000000;
4958                                 val_capable = 1;
4959                 }
4960         }
4961
4962         /*
4963          * TODO figure out how to notify user-mode or DRM of freesync caps
4964          * once we figure out how to deal with freesync in an upstreamable
4965          * fashion
4966          */
4967
4968 }
4969
4970 void amdgpu_dm_remove_sink_from_freesync_module(struct drm_connector *connector)
4971 {
4972         /*
4973          * TODO fill in once we figure out how to deal with freesync in
4974          * an upstreamable fashion
4975          */
4976 }
This page took 0.335606 seconds and 4 git commands to generate.