]> Git Repo - linux.git/blob - drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
drm/i915: Update DRIVER_DATE to 20180221
[linux.git] / drivers / gpu / drm / amd / display / amdgpu_dm / amdgpu_dm.c
1 /*
2  * Copyright 2015 Advanced Micro Devices, Inc.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20  * OTHER DEALINGS IN THE SOFTWARE.
21  *
22  * Authors: AMD
23  *
24  */
25
26 #include "dm_services_types.h"
27 #include "dc.h"
28 #include "dc/inc/core_types.h"
29
30 #include "vid.h"
31 #include "amdgpu.h"
32 #include "amdgpu_display.h"
33 #include "atom.h"
34 #include "amdgpu_dm.h"
35 #include "amdgpu_pm.h"
36
37 #include "amd_shared.h"
38 #include "amdgpu_dm_irq.h"
39 #include "dm_helpers.h"
40 #include "dm_services_types.h"
41 #include "amdgpu_dm_mst_types.h"
42
43 #include "ivsrcid/ivsrcid_vislands30.h"
44
45 #include <linux/module.h>
46 #include <linux/moduleparam.h>
47 #include <linux/version.h>
48 #include <linux/types.h>
49
50 #include <drm/drmP.h>
51 #include <drm/drm_atomic.h>
52 #include <drm/drm_atomic_helper.h>
53 #include <drm/drm_dp_mst_helper.h>
54 #include <drm/drm_fb_helper.h>
55 #include <drm/drm_edid.h>
56
57 #include "modules/inc/mod_freesync.h"
58
59 #if defined(CONFIG_DRM_AMD_DC_DCN1_0)
60 #include "ivsrcid/irqsrcs_dcn_1_0.h"
61
62 #include "dcn/dcn_1_0_offset.h"
63 #include "dcn/dcn_1_0_sh_mask.h"
64 #include "soc15ip.h"
65
66 #include "soc15_common.h"
67 #endif
68
69 #include "modules/inc/mod_freesync.h"
70
71 #include "i2caux_interface.h"
72
73 /* basic init/fini API */
74 static int amdgpu_dm_init(struct amdgpu_device *adev);
75 static void amdgpu_dm_fini(struct amdgpu_device *adev);
76
77 /* initializes drm_device display related structures, based on the information
78  * provided by DAL. The drm strcutures are: drm_crtc, drm_connector,
79  * drm_encoder, drm_mode_config
80  *
81  * Returns 0 on success
82  */
83 static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev);
84 /* removes and deallocates the drm structures, created by the above function */
85 static void amdgpu_dm_destroy_drm_device(struct amdgpu_display_manager *dm);
86
87 static void
88 amdgpu_dm_update_connector_after_detect(struct amdgpu_dm_connector *aconnector);
89
90 static int amdgpu_dm_plane_init(struct amdgpu_display_manager *dm,
91                                 struct amdgpu_plane *aplane,
92                                 unsigned long possible_crtcs);
93 static int amdgpu_dm_crtc_init(struct amdgpu_display_manager *dm,
94                                struct drm_plane *plane,
95                                uint32_t link_index);
96 static int amdgpu_dm_connector_init(struct amdgpu_display_manager *dm,
97                                     struct amdgpu_dm_connector *amdgpu_dm_connector,
98                                     uint32_t link_index,
99                                     struct amdgpu_encoder *amdgpu_encoder);
100 static int amdgpu_dm_encoder_init(struct drm_device *dev,
101                                   struct amdgpu_encoder *aencoder,
102                                   uint32_t link_index);
103
104 static int amdgpu_dm_connector_get_modes(struct drm_connector *connector);
105
106 static int amdgpu_dm_atomic_commit(struct drm_device *dev,
107                                    struct drm_atomic_state *state,
108                                    bool nonblock);
109
110 static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state);
111
112 static int amdgpu_dm_atomic_check(struct drm_device *dev,
113                                   struct drm_atomic_state *state);
114
115
116
117
118 static const enum drm_plane_type dm_plane_type_default[AMDGPU_MAX_PLANES] = {
119         DRM_PLANE_TYPE_PRIMARY,
120         DRM_PLANE_TYPE_PRIMARY,
121         DRM_PLANE_TYPE_PRIMARY,
122         DRM_PLANE_TYPE_PRIMARY,
123         DRM_PLANE_TYPE_PRIMARY,
124         DRM_PLANE_TYPE_PRIMARY,
125 };
126
127 static const enum drm_plane_type dm_plane_type_carizzo[AMDGPU_MAX_PLANES] = {
128         DRM_PLANE_TYPE_PRIMARY,
129         DRM_PLANE_TYPE_PRIMARY,
130         DRM_PLANE_TYPE_PRIMARY,
131         DRM_PLANE_TYPE_OVERLAY,/* YUV Capable Underlay */
132 };
133
134 static const enum drm_plane_type dm_plane_type_stoney[AMDGPU_MAX_PLANES] = {
135         DRM_PLANE_TYPE_PRIMARY,
136         DRM_PLANE_TYPE_PRIMARY,
137         DRM_PLANE_TYPE_OVERLAY, /* YUV Capable Underlay */
138 };
139
140 /*
141  * dm_vblank_get_counter
142  *
143  * @brief
144  * Get counter for number of vertical blanks
145  *
146  * @param
147  * struct amdgpu_device *adev - [in] desired amdgpu device
148  * int disp_idx - [in] which CRTC to get the counter from
149  *
150  * @return
151  * Counter for vertical blanks
152  */
153 static u32 dm_vblank_get_counter(struct amdgpu_device *adev, int crtc)
154 {
155         if (crtc >= adev->mode_info.num_crtc)
156                 return 0;
157         else {
158                 struct amdgpu_crtc *acrtc = adev->mode_info.crtcs[crtc];
159                 struct dm_crtc_state *acrtc_state = to_dm_crtc_state(
160                                 acrtc->base.state);
161
162
163                 if (acrtc_state->stream == NULL) {
164                         DRM_ERROR("dc_stream_state is NULL for crtc '%d'!\n",
165                                   crtc);
166                         return 0;
167                 }
168
169                 return dc_stream_get_vblank_counter(acrtc_state->stream);
170         }
171 }
172
173 static int dm_crtc_get_scanoutpos(struct amdgpu_device *adev, int crtc,
174                                   u32 *vbl, u32 *position)
175 {
176         uint32_t v_blank_start, v_blank_end, h_position, v_position;
177
178         if ((crtc < 0) || (crtc >= adev->mode_info.num_crtc))
179                 return -EINVAL;
180         else {
181                 struct amdgpu_crtc *acrtc = adev->mode_info.crtcs[crtc];
182                 struct dm_crtc_state *acrtc_state = to_dm_crtc_state(
183                                                 acrtc->base.state);
184
185                 if (acrtc_state->stream ==  NULL) {
186                         DRM_ERROR("dc_stream_state is NULL for crtc '%d'!\n",
187                                   crtc);
188                         return 0;
189                 }
190
191                 /*
192                  * TODO rework base driver to use values directly.
193                  * for now parse it back into reg-format
194                  */
195                 dc_stream_get_scanoutpos(acrtc_state->stream,
196                                          &v_blank_start,
197                                          &v_blank_end,
198                                          &h_position,
199                                          &v_position);
200
201                 *position = v_position | (h_position << 16);
202                 *vbl = v_blank_start | (v_blank_end << 16);
203         }
204
205         return 0;
206 }
207
208 static bool dm_is_idle(void *handle)
209 {
210         /* XXX todo */
211         return true;
212 }
213
214 static int dm_wait_for_idle(void *handle)
215 {
216         /* XXX todo */
217         return 0;
218 }
219
220 static bool dm_check_soft_reset(void *handle)
221 {
222         return false;
223 }
224
225 static int dm_soft_reset(void *handle)
226 {
227         /* XXX todo */
228         return 0;
229 }
230
231 static struct amdgpu_crtc *
232 get_crtc_by_otg_inst(struct amdgpu_device *adev,
233                      int otg_inst)
234 {
235         struct drm_device *dev = adev->ddev;
236         struct drm_crtc *crtc;
237         struct amdgpu_crtc *amdgpu_crtc;
238
239         /*
240          * following if is check inherited from both functions where this one is
241          * used now. Need to be checked why it could happen.
242          */
243         if (otg_inst == -1) {
244                 WARN_ON(1);
245                 return adev->mode_info.crtcs[0];
246         }
247
248         list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
249                 amdgpu_crtc = to_amdgpu_crtc(crtc);
250
251                 if (amdgpu_crtc->otg_inst == otg_inst)
252                         return amdgpu_crtc;
253         }
254
255         return NULL;
256 }
257
258 static void dm_pflip_high_irq(void *interrupt_params)
259 {
260         struct amdgpu_crtc *amdgpu_crtc;
261         struct common_irq_params *irq_params = interrupt_params;
262         struct amdgpu_device *adev = irq_params->adev;
263         unsigned long flags;
264
265         amdgpu_crtc = get_crtc_by_otg_inst(adev, irq_params->irq_src - IRQ_TYPE_PFLIP);
266
267         /* IRQ could occur when in initial stage */
268         /*TODO work and BO cleanup */
269         if (amdgpu_crtc == NULL) {
270                 DRM_DEBUG_DRIVER("CRTC is null, returning.\n");
271                 return;
272         }
273
274         spin_lock_irqsave(&adev->ddev->event_lock, flags);
275
276         if (amdgpu_crtc->pflip_status != AMDGPU_FLIP_SUBMITTED){
277                 DRM_DEBUG_DRIVER("amdgpu_crtc->pflip_status = %d !=AMDGPU_FLIP_SUBMITTED(%d) on crtc:%d[%p] \n",
278                                                  amdgpu_crtc->pflip_status,
279                                                  AMDGPU_FLIP_SUBMITTED,
280                                                  amdgpu_crtc->crtc_id,
281                                                  amdgpu_crtc);
282                 spin_unlock_irqrestore(&adev->ddev->event_lock, flags);
283                 return;
284         }
285
286
287         /* wakeup usersapce */
288         if (amdgpu_crtc->event) {
289                 /* Update to correct count/ts if racing with vblank irq */
290                 drm_crtc_accurate_vblank_count(&amdgpu_crtc->base);
291
292                 drm_crtc_send_vblank_event(&amdgpu_crtc->base, amdgpu_crtc->event);
293
294                 /* page flip completed. clean up */
295                 amdgpu_crtc->event = NULL;
296
297         } else
298                 WARN_ON(1);
299
300         amdgpu_crtc->pflip_status = AMDGPU_FLIP_NONE;
301         spin_unlock_irqrestore(&adev->ddev->event_lock, flags);
302
303         DRM_DEBUG_DRIVER("%s - crtc :%d[%p], pflip_stat:AMDGPU_FLIP_NONE\n",
304                                         __func__, amdgpu_crtc->crtc_id, amdgpu_crtc);
305
306         drm_crtc_vblank_put(&amdgpu_crtc->base);
307 }
308
309 static void dm_crtc_high_irq(void *interrupt_params)
310 {
311         struct common_irq_params *irq_params = interrupt_params;
312         struct amdgpu_device *adev = irq_params->adev;
313         uint8_t crtc_index = 0;
314         struct amdgpu_crtc *acrtc;
315
316         acrtc = get_crtc_by_otg_inst(adev, irq_params->irq_src - IRQ_TYPE_VBLANK);
317
318         if (acrtc)
319                 crtc_index = acrtc->crtc_id;
320
321         drm_handle_vblank(adev->ddev, crtc_index);
322 }
323
324 static int dm_set_clockgating_state(void *handle,
325                   enum amd_clockgating_state state)
326 {
327         return 0;
328 }
329
330 static int dm_set_powergating_state(void *handle,
331                   enum amd_powergating_state state)
332 {
333         return 0;
334 }
335
336 /* Prototypes of private functions */
337 static int dm_early_init(void* handle);
338
339 static void hotplug_notify_work_func(struct work_struct *work)
340 {
341         struct amdgpu_display_manager *dm = container_of(work, struct amdgpu_display_manager, mst_hotplug_work);
342         struct drm_device *dev = dm->ddev;
343
344         drm_kms_helper_hotplug_event(dev);
345 }
346
347 #if defined(CONFIG_DRM_AMD_DC_FBC)
348 #include "dal_asic_id.h"
349 /* Allocate memory for FBC compressed data  */
350 /* TODO: Dynamic allocation */
351 #define AMDGPU_FBC_SIZE    (3840 * 2160 * 4)
352
353 static void amdgpu_dm_initialize_fbc(struct amdgpu_device *adev)
354 {
355         int r;
356         struct dm_comressor_info *compressor = &adev->dm.compressor;
357
358         if (!compressor->bo_ptr) {
359                 r = amdgpu_bo_create_kernel(adev, AMDGPU_FBC_SIZE, PAGE_SIZE,
360                                 AMDGPU_GEM_DOMAIN_VRAM, &compressor->bo_ptr,
361                                 &compressor->gpu_addr, &compressor->cpu_addr);
362
363                 if (r)
364                         DRM_ERROR("DM: Failed to initialize fbc\n");
365         }
366
367 }
368 #endif
369
370
371 /* Init display KMS
372  *
373  * Returns 0 on success
374  */
375 static int amdgpu_dm_init(struct amdgpu_device *adev)
376 {
377         struct dc_init_data init_data;
378         adev->dm.ddev = adev->ddev;
379         adev->dm.adev = adev;
380
381         /* Zero all the fields */
382         memset(&init_data, 0, sizeof(init_data));
383
384         /* initialize DAL's lock (for SYNC context use) */
385         spin_lock_init(&adev->dm.dal_lock);
386
387         /* initialize DAL's mutex */
388         mutex_init(&adev->dm.dal_mutex);
389
390         if(amdgpu_dm_irq_init(adev)) {
391                 DRM_ERROR("amdgpu: failed to initialize DM IRQ support.\n");
392                 goto error;
393         }
394
395         init_data.asic_id.chip_family = adev->family;
396
397         init_data.asic_id.pci_revision_id = adev->rev_id;
398         init_data.asic_id.hw_internal_rev = adev->external_rev_id;
399
400         init_data.asic_id.vram_width = adev->mc.vram_width;
401         /* TODO: initialize init_data.asic_id.vram_type here!!!! */
402         init_data.asic_id.atombios_base_address =
403                 adev->mode_info.atom_context->bios;
404
405         init_data.driver = adev;
406
407         adev->dm.cgs_device = amdgpu_cgs_create_device(adev);
408
409         if (!adev->dm.cgs_device) {
410                 DRM_ERROR("amdgpu: failed to create cgs device.\n");
411                 goto error;
412         }
413
414         init_data.cgs_device = adev->dm.cgs_device;
415
416         adev->dm.dal = NULL;
417
418         init_data.dce_environment = DCE_ENV_PRODUCTION_DRV;
419
420         if (amdgpu_dc_log)
421                 init_data.log_mask = DC_DEFAULT_LOG_MASK;
422         else
423                 init_data.log_mask = DC_MIN_LOG_MASK;
424
425 #if defined(CONFIG_DRM_AMD_DC_FBC)
426         if (adev->family == FAMILY_CZ)
427                 amdgpu_dm_initialize_fbc(adev);
428         init_data.fbc_gpu_addr = adev->dm.compressor.gpu_addr;
429 #endif
430         /* Display Core create. */
431         adev->dm.dc = dc_create(&init_data);
432
433         if (adev->dm.dc) {
434                 DRM_INFO("Display Core initialized with v%s!\n", DC_VER);
435         } else {
436                 DRM_INFO("Display Core failed to initialize with v%s!\n", DC_VER);
437                 goto error;
438         }
439
440         INIT_WORK(&adev->dm.mst_hotplug_work, hotplug_notify_work_func);
441
442         adev->dm.freesync_module = mod_freesync_create(adev->dm.dc);
443         if (!adev->dm.freesync_module) {
444                 DRM_ERROR(
445                 "amdgpu: failed to initialize freesync_module.\n");
446         } else
447                 DRM_DEBUG_DRIVER("amdgpu: freesync_module init done %p.\n",
448                                 adev->dm.freesync_module);
449
450         if (amdgpu_dm_initialize_drm_device(adev)) {
451                 DRM_ERROR(
452                 "amdgpu: failed to initialize sw for display support.\n");
453                 goto error;
454         }
455
456         /* Update the actual used number of crtc */
457         adev->mode_info.num_crtc = adev->dm.display_indexes_num;
458
459         /* TODO: Add_display_info? */
460
461         /* TODO use dynamic cursor width */
462         adev->ddev->mode_config.cursor_width = adev->dm.dc->caps.max_cursor_size;
463         adev->ddev->mode_config.cursor_height = adev->dm.dc->caps.max_cursor_size;
464
465         if (drm_vblank_init(adev->ddev, adev->dm.display_indexes_num)) {
466                 DRM_ERROR(
467                 "amdgpu: failed to initialize sw for display support.\n");
468                 goto error;
469         }
470
471         DRM_DEBUG_DRIVER("KMS initialized.\n");
472
473         return 0;
474 error:
475         amdgpu_dm_fini(adev);
476
477         return -1;
478 }
479
480 static void amdgpu_dm_fini(struct amdgpu_device *adev)
481 {
482         amdgpu_dm_destroy_drm_device(&adev->dm);
483         /*
484          * TODO: pageflip, vlank interrupt
485          *
486          * amdgpu_dm_irq_fini(adev);
487          */
488
489         if (adev->dm.cgs_device) {
490                 amdgpu_cgs_destroy_device(adev->dm.cgs_device);
491                 adev->dm.cgs_device = NULL;
492         }
493         if (adev->dm.freesync_module) {
494                 mod_freesync_destroy(adev->dm.freesync_module);
495                 adev->dm.freesync_module = NULL;
496         }
497         /* DC Destroy TODO: Replace destroy DAL */
498         if (adev->dm.dc)
499                 dc_destroy(&adev->dm.dc);
500         return;
501 }
502
503 static int dm_sw_init(void *handle)
504 {
505         return 0;
506 }
507
508 static int dm_sw_fini(void *handle)
509 {
510         return 0;
511 }
512
513 static int detect_mst_link_for_all_connectors(struct drm_device *dev)
514 {
515         struct amdgpu_dm_connector *aconnector;
516         struct drm_connector *connector;
517         int ret = 0;
518
519         drm_modeset_lock(&dev->mode_config.connection_mutex, NULL);
520
521         list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
522                 aconnector = to_amdgpu_dm_connector(connector);
523                 if (aconnector->dc_link->type == dc_connection_mst_branch &&
524                     aconnector->mst_mgr.aux) {
525                         DRM_DEBUG_DRIVER("DM_MST: starting TM on aconnector: %p [id: %d]\n",
526                                         aconnector, aconnector->base.base.id);
527
528                         ret = drm_dp_mst_topology_mgr_set_mst(&aconnector->mst_mgr, true);
529                         if (ret < 0) {
530                                 DRM_ERROR("DM_MST: Failed to start MST\n");
531                                 ((struct dc_link *)aconnector->dc_link)->type = dc_connection_single;
532                                 return ret;
533                                 }
534                         }
535         }
536
537         drm_modeset_unlock(&dev->mode_config.connection_mutex);
538         return ret;
539 }
540
541 static int dm_late_init(void *handle)
542 {
543         struct drm_device *dev = ((struct amdgpu_device *)handle)->ddev;
544
545         return detect_mst_link_for_all_connectors(dev);
546 }
547
548 static void s3_handle_mst(struct drm_device *dev, bool suspend)
549 {
550         struct amdgpu_dm_connector *aconnector;
551         struct drm_connector *connector;
552
553         drm_modeset_lock(&dev->mode_config.connection_mutex, NULL);
554
555         list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
556                    aconnector = to_amdgpu_dm_connector(connector);
557                    if (aconnector->dc_link->type == dc_connection_mst_branch &&
558                                    !aconnector->mst_port) {
559
560                            if (suspend)
561                                    drm_dp_mst_topology_mgr_suspend(&aconnector->mst_mgr);
562                            else
563                                    drm_dp_mst_topology_mgr_resume(&aconnector->mst_mgr);
564                    }
565         }
566
567         drm_modeset_unlock(&dev->mode_config.connection_mutex);
568 }
569
570 static int dm_hw_init(void *handle)
571 {
572         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
573         /* Create DAL display manager */
574         amdgpu_dm_init(adev);
575         amdgpu_dm_hpd_init(adev);
576
577         return 0;
578 }
579
580 static int dm_hw_fini(void *handle)
581 {
582         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
583
584         amdgpu_dm_hpd_fini(adev);
585
586         amdgpu_dm_irq_fini(adev);
587         amdgpu_dm_fini(adev);
588         return 0;
589 }
590
591 static int dm_suspend(void *handle)
592 {
593         struct amdgpu_device *adev = handle;
594         struct amdgpu_display_manager *dm = &adev->dm;
595         int ret = 0;
596
597         s3_handle_mst(adev->ddev, true);
598
599         amdgpu_dm_irq_suspend(adev);
600
601         WARN_ON(adev->dm.cached_state);
602         adev->dm.cached_state = drm_atomic_helper_suspend(adev->ddev);
603
604         dc_set_power_state(dm->dc, DC_ACPI_CM_POWER_STATE_D3);
605
606         return ret;
607 }
608
609 static struct amdgpu_dm_connector *
610 amdgpu_dm_find_first_crtc_matching_connector(struct drm_atomic_state *state,
611                                              struct drm_crtc *crtc)
612 {
613         uint32_t i;
614         struct drm_connector_state *new_con_state;
615         struct drm_connector *connector;
616         struct drm_crtc *crtc_from_state;
617
618         for_each_new_connector_in_state(state, connector, new_con_state, i) {
619                 crtc_from_state = new_con_state->crtc;
620
621                 if (crtc_from_state == crtc)
622                         return to_amdgpu_dm_connector(connector);
623         }
624
625         return NULL;
626 }
627
628 static int dm_resume(void *handle)
629 {
630         struct amdgpu_device *adev = handle;
631         struct amdgpu_display_manager *dm = &adev->dm;
632
633         /* power on hardware */
634         dc_set_power_state(dm->dc, DC_ACPI_CM_POWER_STATE_D0);
635
636         return 0;
637 }
638
639 int amdgpu_dm_display_resume(struct amdgpu_device *adev)
640 {
641         struct drm_device *ddev = adev->ddev;
642         struct amdgpu_display_manager *dm = &adev->dm;
643         struct amdgpu_dm_connector *aconnector;
644         struct drm_connector *connector;
645         struct drm_crtc *crtc;
646         struct drm_crtc_state *new_crtc_state;
647         struct dm_crtc_state *dm_new_crtc_state;
648         struct drm_plane *plane;
649         struct drm_plane_state *new_plane_state;
650         struct dm_plane_state *dm_new_plane_state;
651
652         int ret = 0;
653         int i;
654
655         /* program HPD filter */
656         dc_resume(dm->dc);
657
658         /* On resume we need to  rewrite the MSTM control bits to enamble MST*/
659         s3_handle_mst(ddev, false);
660
661         /*
662          * early enable HPD Rx IRQ, should be done before set mode as short
663          * pulse interrupts are used for MST
664          */
665         amdgpu_dm_irq_resume_early(adev);
666
667         /* Do detection*/
668         list_for_each_entry(connector,
669                         &ddev->mode_config.connector_list, head) {
670                 aconnector = to_amdgpu_dm_connector(connector);
671
672                 /*
673                  * this is the case when traversing through already created
674                  * MST connectors, should be skipped
675                  */
676                 if (aconnector->mst_port)
677                         continue;
678
679                 mutex_lock(&aconnector->hpd_lock);
680                 dc_link_detect(aconnector->dc_link, DETECT_REASON_HPD);
681
682                 if (aconnector->fake_enable && aconnector->dc_link->local_sink)
683                         aconnector->fake_enable = false;
684
685                 aconnector->dc_sink = NULL;
686                 amdgpu_dm_update_connector_after_detect(aconnector);
687                 mutex_unlock(&aconnector->hpd_lock);
688         }
689
690         /* Force mode set in atomic comit */
691         for_each_new_crtc_in_state(adev->dm.cached_state, crtc, new_crtc_state, i)
692                 new_crtc_state->active_changed = true;
693
694         /*
695          * atomic_check is expected to create the dc states. We need to release
696          * them here, since they were duplicated as part of the suspend
697          * procedure.
698          */
699         for_each_new_crtc_in_state(adev->dm.cached_state, crtc, new_crtc_state, i) {
700                 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
701                 if (dm_new_crtc_state->stream) {
702                         WARN_ON(kref_read(&dm_new_crtc_state->stream->refcount) > 1);
703                         dc_stream_release(dm_new_crtc_state->stream);
704                         dm_new_crtc_state->stream = NULL;
705                 }
706         }
707
708         for_each_new_plane_in_state(adev->dm.cached_state, plane, new_plane_state, i) {
709                 dm_new_plane_state = to_dm_plane_state(new_plane_state);
710                 if (dm_new_plane_state->dc_state) {
711                         WARN_ON(kref_read(&dm_new_plane_state->dc_state->refcount) > 1);
712                         dc_plane_state_release(dm_new_plane_state->dc_state);
713                         dm_new_plane_state->dc_state = NULL;
714                 }
715         }
716
717         ret = drm_atomic_helper_resume(ddev, adev->dm.cached_state);
718
719         adev->dm.cached_state = NULL;
720
721         amdgpu_dm_irq_resume_late(adev);
722
723         return ret;
724 }
725
726 static const struct amd_ip_funcs amdgpu_dm_funcs = {
727         .name = "dm",
728         .early_init = dm_early_init,
729         .late_init = dm_late_init,
730         .sw_init = dm_sw_init,
731         .sw_fini = dm_sw_fini,
732         .hw_init = dm_hw_init,
733         .hw_fini = dm_hw_fini,
734         .suspend = dm_suspend,
735         .resume = dm_resume,
736         .is_idle = dm_is_idle,
737         .wait_for_idle = dm_wait_for_idle,
738         .check_soft_reset = dm_check_soft_reset,
739         .soft_reset = dm_soft_reset,
740         .set_clockgating_state = dm_set_clockgating_state,
741         .set_powergating_state = dm_set_powergating_state,
742 };
743
744 const struct amdgpu_ip_block_version dm_ip_block =
745 {
746         .type = AMD_IP_BLOCK_TYPE_DCE,
747         .major = 1,
748         .minor = 0,
749         .rev = 0,
750         .funcs = &amdgpu_dm_funcs,
751 };
752
753
754 static struct drm_atomic_state *
755 dm_atomic_state_alloc(struct drm_device *dev)
756 {
757         struct dm_atomic_state *state = kzalloc(sizeof(*state), GFP_KERNEL);
758
759         if (!state)
760                 return NULL;
761
762         if (drm_atomic_state_init(dev, &state->base) < 0)
763                 goto fail;
764
765         return &state->base;
766
767 fail:
768         kfree(state);
769         return NULL;
770 }
771
772 static void
773 dm_atomic_state_clear(struct drm_atomic_state *state)
774 {
775         struct dm_atomic_state *dm_state = to_dm_atomic_state(state);
776
777         if (dm_state->context) {
778                 dc_release_state(dm_state->context);
779                 dm_state->context = NULL;
780         }
781
782         drm_atomic_state_default_clear(state);
783 }
784
785 static void
786 dm_atomic_state_alloc_free(struct drm_atomic_state *state)
787 {
788         struct dm_atomic_state *dm_state = to_dm_atomic_state(state);
789         drm_atomic_state_default_release(state);
790         kfree(dm_state);
791 }
792
793 static const struct drm_mode_config_funcs amdgpu_dm_mode_funcs = {
794         .fb_create = amdgpu_user_framebuffer_create,
795         .output_poll_changed = drm_fb_helper_output_poll_changed,
796         .atomic_check = amdgpu_dm_atomic_check,
797         .atomic_commit = amdgpu_dm_atomic_commit,
798         .atomic_state_alloc = dm_atomic_state_alloc,
799         .atomic_state_clear = dm_atomic_state_clear,
800         .atomic_state_free = dm_atomic_state_alloc_free
801 };
802
803 static struct drm_mode_config_helper_funcs amdgpu_dm_mode_config_helperfuncs = {
804         .atomic_commit_tail = amdgpu_dm_atomic_commit_tail
805 };
806
807 static void
808 amdgpu_dm_update_connector_after_detect(struct amdgpu_dm_connector *aconnector)
809 {
810         struct drm_connector *connector = &aconnector->base;
811         struct drm_device *dev = connector->dev;
812         struct dc_sink *sink;
813
814         /* MST handled by drm_mst framework */
815         if (aconnector->mst_mgr.mst_state == true)
816                 return;
817
818
819         sink = aconnector->dc_link->local_sink;
820
821         /* Edid mgmt connector gets first update only in mode_valid hook and then
822          * the connector sink is set to either fake or physical sink depends on link status.
823          * don't do it here if u are during boot
824          */
825         if (aconnector->base.force != DRM_FORCE_UNSPECIFIED
826                         && aconnector->dc_em_sink) {
827
828                 /* For S3 resume with headless use eml_sink to fake stream
829                  * because on resume connecotr->sink is set ti NULL
830                  */
831                 mutex_lock(&dev->mode_config.mutex);
832
833                 if (sink) {
834                         if (aconnector->dc_sink) {
835                                 amdgpu_dm_remove_sink_from_freesync_module(
836                                                                 connector);
837                                 /* retain and release bellow are used for
838                                  * bump up refcount for sink because the link don't point
839                                  * to it anymore after disconnect so on next crtc to connector
840                                  * reshuffle by UMD we will get into unwanted dc_sink release
841                                  */
842                                 if (aconnector->dc_sink != aconnector->dc_em_sink)
843                                         dc_sink_release(aconnector->dc_sink);
844                         }
845                         aconnector->dc_sink = sink;
846                         amdgpu_dm_add_sink_to_freesync_module(
847                                                 connector, aconnector->edid);
848                 } else {
849                         amdgpu_dm_remove_sink_from_freesync_module(connector);
850                         if (!aconnector->dc_sink)
851                                 aconnector->dc_sink = aconnector->dc_em_sink;
852                         else if (aconnector->dc_sink != aconnector->dc_em_sink)
853                                 dc_sink_retain(aconnector->dc_sink);
854                 }
855
856                 mutex_unlock(&dev->mode_config.mutex);
857                 return;
858         }
859
860         /*
861          * TODO: temporary guard to look for proper fix
862          * if this sink is MST sink, we should not do anything
863          */
864         if (sink && sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT_MST)
865                 return;
866
867         if (aconnector->dc_sink == sink) {
868                 /* We got a DP short pulse (Link Loss, DP CTS, etc...).
869                  * Do nothing!! */
870                 DRM_DEBUG_DRIVER("DCHPD: connector_id=%d: dc_sink didn't change.\n",
871                                 aconnector->connector_id);
872                 return;
873         }
874
875         DRM_DEBUG_DRIVER("DCHPD: connector_id=%d: Old sink=%p New sink=%p\n",
876                 aconnector->connector_id, aconnector->dc_sink, sink);
877
878         mutex_lock(&dev->mode_config.mutex);
879
880         /* 1. Update status of the drm connector
881          * 2. Send an event and let userspace tell us what to do */
882         if (sink) {
883                 /* TODO: check if we still need the S3 mode update workaround.
884                  * If yes, put it here. */
885                 if (aconnector->dc_sink)
886                         amdgpu_dm_remove_sink_from_freesync_module(
887                                                         connector);
888
889                 aconnector->dc_sink = sink;
890                 if (sink->dc_edid.length == 0) {
891                         aconnector->edid = NULL;
892                 } else {
893                         aconnector->edid =
894                                 (struct edid *) sink->dc_edid.raw_edid;
895
896
897                         drm_mode_connector_update_edid_property(connector,
898                                         aconnector->edid);
899                 }
900                 amdgpu_dm_add_sink_to_freesync_module(connector, aconnector->edid);
901
902         } else {
903                 amdgpu_dm_remove_sink_from_freesync_module(connector);
904                 drm_mode_connector_update_edid_property(connector, NULL);
905                 aconnector->num_modes = 0;
906                 aconnector->dc_sink = NULL;
907         }
908
909         mutex_unlock(&dev->mode_config.mutex);
910 }
911
912 static void handle_hpd_irq(void *param)
913 {
914         struct amdgpu_dm_connector *aconnector = (struct amdgpu_dm_connector *)param;
915         struct drm_connector *connector = &aconnector->base;
916         struct drm_device *dev = connector->dev;
917
918         /* In case of failure or MST no need to update connector status or notify the OS
919          * since (for MST case) MST does this in it's own context.
920          */
921         mutex_lock(&aconnector->hpd_lock);
922
923         if (aconnector->fake_enable)
924                 aconnector->fake_enable = false;
925
926         if (dc_link_detect(aconnector->dc_link, DETECT_REASON_HPD)) {
927                 amdgpu_dm_update_connector_after_detect(aconnector);
928
929
930                 drm_modeset_lock_all(dev);
931                 dm_restore_drm_connector_state(dev, connector);
932                 drm_modeset_unlock_all(dev);
933
934                 if (aconnector->base.force == DRM_FORCE_UNSPECIFIED)
935                         drm_kms_helper_hotplug_event(dev);
936         }
937         mutex_unlock(&aconnector->hpd_lock);
938
939 }
940
941 static void dm_handle_hpd_rx_irq(struct amdgpu_dm_connector *aconnector)
942 {
943         uint8_t esi[DP_PSR_ERROR_STATUS - DP_SINK_COUNT_ESI] = { 0 };
944         uint8_t dret;
945         bool new_irq_handled = false;
946         int dpcd_addr;
947         int dpcd_bytes_to_read;
948
949         const int max_process_count = 30;
950         int process_count = 0;
951
952         const struct dc_link_status *link_status = dc_link_get_status(aconnector->dc_link);
953
954         if (link_status->dpcd_caps->dpcd_rev.raw < 0x12) {
955                 dpcd_bytes_to_read = DP_LANE0_1_STATUS - DP_SINK_COUNT;
956                 /* DPCD 0x200 - 0x201 for downstream IRQ */
957                 dpcd_addr = DP_SINK_COUNT;
958         } else {
959                 dpcd_bytes_to_read = DP_PSR_ERROR_STATUS - DP_SINK_COUNT_ESI;
960                 /* DPCD 0x2002 - 0x2005 for downstream IRQ */
961                 dpcd_addr = DP_SINK_COUNT_ESI;
962         }
963
964         dret = drm_dp_dpcd_read(
965                 &aconnector->dm_dp_aux.aux,
966                 dpcd_addr,
967                 esi,
968                 dpcd_bytes_to_read);
969
970         while (dret == dpcd_bytes_to_read &&
971                 process_count < max_process_count) {
972                 uint8_t retry;
973                 dret = 0;
974
975                 process_count++;
976
977                 DRM_DEBUG_DRIVER("ESI %02x %02x %02x\n", esi[0], esi[1], esi[2]);
978                 /* handle HPD short pulse irq */
979                 if (aconnector->mst_mgr.mst_state)
980                         drm_dp_mst_hpd_irq(
981                                 &aconnector->mst_mgr,
982                                 esi,
983                                 &new_irq_handled);
984
985                 if (new_irq_handled) {
986                         /* ACK at DPCD to notify down stream */
987                         const int ack_dpcd_bytes_to_write =
988                                 dpcd_bytes_to_read - 1;
989
990                         for (retry = 0; retry < 3; retry++) {
991                                 uint8_t wret;
992
993                                 wret = drm_dp_dpcd_write(
994                                         &aconnector->dm_dp_aux.aux,
995                                         dpcd_addr + 1,
996                                         &esi[1],
997                                         ack_dpcd_bytes_to_write);
998                                 if (wret == ack_dpcd_bytes_to_write)
999                                         break;
1000                         }
1001
1002                         /* check if there is new irq to be handle */
1003                         dret = drm_dp_dpcd_read(
1004                                 &aconnector->dm_dp_aux.aux,
1005                                 dpcd_addr,
1006                                 esi,
1007                                 dpcd_bytes_to_read);
1008
1009                         new_irq_handled = false;
1010                 } else {
1011                         break;
1012                 }
1013         }
1014
1015         if (process_count == max_process_count)
1016                 DRM_DEBUG_DRIVER("Loop exceeded max iterations\n");
1017 }
1018
1019 static void handle_hpd_rx_irq(void *param)
1020 {
1021         struct amdgpu_dm_connector *aconnector = (struct amdgpu_dm_connector *)param;
1022         struct drm_connector *connector = &aconnector->base;
1023         struct drm_device *dev = connector->dev;
1024         struct dc_link *dc_link = aconnector->dc_link;
1025         bool is_mst_root_connector = aconnector->mst_mgr.mst_state;
1026
1027         /* TODO:Temporary add mutex to protect hpd interrupt not have a gpio
1028          * conflict, after implement i2c helper, this mutex should be
1029          * retired.
1030          */
1031         if (dc_link->type != dc_connection_mst_branch)
1032                 mutex_lock(&aconnector->hpd_lock);
1033
1034         if (dc_link_handle_hpd_rx_irq(dc_link, NULL) &&
1035                         !is_mst_root_connector) {
1036                 /* Downstream Port status changed. */
1037                 if (dc_link_detect(dc_link, DETECT_REASON_HPDRX)) {
1038                         amdgpu_dm_update_connector_after_detect(aconnector);
1039
1040
1041                         drm_modeset_lock_all(dev);
1042                         dm_restore_drm_connector_state(dev, connector);
1043                         drm_modeset_unlock_all(dev);
1044
1045                         drm_kms_helper_hotplug_event(dev);
1046                 }
1047         }
1048         if ((dc_link->cur_link_settings.lane_count != LANE_COUNT_UNKNOWN) ||
1049             (dc_link->type == dc_connection_mst_branch))
1050                 dm_handle_hpd_rx_irq(aconnector);
1051
1052         if (dc_link->type != dc_connection_mst_branch)
1053                 mutex_unlock(&aconnector->hpd_lock);
1054 }
1055
1056 static void register_hpd_handlers(struct amdgpu_device *adev)
1057 {
1058         struct drm_device *dev = adev->ddev;
1059         struct drm_connector *connector;
1060         struct amdgpu_dm_connector *aconnector;
1061         const struct dc_link *dc_link;
1062         struct dc_interrupt_params int_params = {0};
1063
1064         int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
1065         int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
1066
1067         list_for_each_entry(connector,
1068                         &dev->mode_config.connector_list, head) {
1069
1070                 aconnector = to_amdgpu_dm_connector(connector);
1071                 dc_link = aconnector->dc_link;
1072
1073                 if (DC_IRQ_SOURCE_INVALID != dc_link->irq_source_hpd) {
1074                         int_params.int_context = INTERRUPT_LOW_IRQ_CONTEXT;
1075                         int_params.irq_source = dc_link->irq_source_hpd;
1076
1077                         amdgpu_dm_irq_register_interrupt(adev, &int_params,
1078                                         handle_hpd_irq,
1079                                         (void *) aconnector);
1080                 }
1081
1082                 if (DC_IRQ_SOURCE_INVALID != dc_link->irq_source_hpd_rx) {
1083
1084                         /* Also register for DP short pulse (hpd_rx). */
1085                         int_params.int_context = INTERRUPT_LOW_IRQ_CONTEXT;
1086                         int_params.irq_source = dc_link->irq_source_hpd_rx;
1087
1088                         amdgpu_dm_irq_register_interrupt(adev, &int_params,
1089                                         handle_hpd_rx_irq,
1090                                         (void *) aconnector);
1091                 }
1092         }
1093 }
1094
1095 /* Register IRQ sources and initialize IRQ callbacks */
1096 static int dce110_register_irq_handlers(struct amdgpu_device *adev)
1097 {
1098         struct dc *dc = adev->dm.dc;
1099         struct common_irq_params *c_irq_params;
1100         struct dc_interrupt_params int_params = {0};
1101         int r;
1102         int i;
1103         unsigned client_id = AMDGPU_IH_CLIENTID_LEGACY;
1104
1105         if (adev->asic_type == CHIP_VEGA10 ||
1106             adev->asic_type == CHIP_RAVEN)
1107                 client_id = AMDGPU_IH_CLIENTID_DCE;
1108
1109         int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
1110         int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
1111
1112         /* Actions of amdgpu_irq_add_id():
1113          * 1. Register a set() function with base driver.
1114          *    Base driver will call set() function to enable/disable an
1115          *    interrupt in DC hardware.
1116          * 2. Register amdgpu_dm_irq_handler().
1117          *    Base driver will call amdgpu_dm_irq_handler() for ALL interrupts
1118          *    coming from DC hardware.
1119          *    amdgpu_dm_irq_handler() will re-direct the interrupt to DC
1120          *    for acknowledging and handling. */
1121
1122         /* Use VBLANK interrupt */
1123         for (i = VISLANDS30_IV_SRCID_D1_VERTICAL_INTERRUPT0; i <= VISLANDS30_IV_SRCID_D6_VERTICAL_INTERRUPT0; i++) {
1124                 r = amdgpu_irq_add_id(adev, client_id, i, &adev->crtc_irq);
1125                 if (r) {
1126                         DRM_ERROR("Failed to add crtc irq id!\n");
1127                         return r;
1128                 }
1129
1130                 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
1131                 int_params.irq_source =
1132                         dc_interrupt_to_irq_source(dc, i, 0);
1133
1134                 c_irq_params = &adev->dm.vblank_params[int_params.irq_source - DC_IRQ_SOURCE_VBLANK1];
1135
1136                 c_irq_params->adev = adev;
1137                 c_irq_params->irq_src = int_params.irq_source;
1138
1139                 amdgpu_dm_irq_register_interrupt(adev, &int_params,
1140                                 dm_crtc_high_irq, c_irq_params);
1141         }
1142
1143         /* Use GRPH_PFLIP interrupt */
1144         for (i = VISLANDS30_IV_SRCID_D1_GRPH_PFLIP;
1145                         i <= VISLANDS30_IV_SRCID_D6_GRPH_PFLIP; i += 2) {
1146                 r = amdgpu_irq_add_id(adev, client_id, i, &adev->pageflip_irq);
1147                 if (r) {
1148                         DRM_ERROR("Failed to add page flip irq id!\n");
1149                         return r;
1150                 }
1151
1152                 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
1153                 int_params.irq_source =
1154                         dc_interrupt_to_irq_source(dc, i, 0);
1155
1156                 c_irq_params = &adev->dm.pflip_params[int_params.irq_source - DC_IRQ_SOURCE_PFLIP_FIRST];
1157
1158                 c_irq_params->adev = adev;
1159                 c_irq_params->irq_src = int_params.irq_source;
1160
1161                 amdgpu_dm_irq_register_interrupt(adev, &int_params,
1162                                 dm_pflip_high_irq, c_irq_params);
1163
1164         }
1165
1166         /* HPD */
1167         r = amdgpu_irq_add_id(adev, client_id,
1168                         VISLANDS30_IV_SRCID_HOTPLUG_DETECT_A, &adev->hpd_irq);
1169         if (r) {
1170                 DRM_ERROR("Failed to add hpd irq id!\n");
1171                 return r;
1172         }
1173
1174         register_hpd_handlers(adev);
1175
1176         return 0;
1177 }
1178
1179 #if defined(CONFIG_DRM_AMD_DC_DCN1_0)
1180 /* Register IRQ sources and initialize IRQ callbacks */
1181 static int dcn10_register_irq_handlers(struct amdgpu_device *adev)
1182 {
1183         struct dc *dc = adev->dm.dc;
1184         struct common_irq_params *c_irq_params;
1185         struct dc_interrupt_params int_params = {0};
1186         int r;
1187         int i;
1188
1189         int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
1190         int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
1191
1192         /* Actions of amdgpu_irq_add_id():
1193          * 1. Register a set() function with base driver.
1194          *    Base driver will call set() function to enable/disable an
1195          *    interrupt in DC hardware.
1196          * 2. Register amdgpu_dm_irq_handler().
1197          *    Base driver will call amdgpu_dm_irq_handler() for ALL interrupts
1198          *    coming from DC hardware.
1199          *    amdgpu_dm_irq_handler() will re-direct the interrupt to DC
1200          *    for acknowledging and handling.
1201          * */
1202
1203         /* Use VSTARTUP interrupt */
1204         for (i = DCN_1_0__SRCID__DC_D1_OTG_VSTARTUP;
1205                         i <= DCN_1_0__SRCID__DC_D1_OTG_VSTARTUP + adev->mode_info.num_crtc - 1;
1206                         i++) {
1207                 r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_DCE, i, &adev->crtc_irq);
1208
1209                 if (r) {
1210                         DRM_ERROR("Failed to add crtc irq id!\n");
1211                         return r;
1212                 }
1213
1214                 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
1215                 int_params.irq_source =
1216                         dc_interrupt_to_irq_source(dc, i, 0);
1217
1218                 c_irq_params = &adev->dm.vblank_params[int_params.irq_source - DC_IRQ_SOURCE_VBLANK1];
1219
1220                 c_irq_params->adev = adev;
1221                 c_irq_params->irq_src = int_params.irq_source;
1222
1223                 amdgpu_dm_irq_register_interrupt(adev, &int_params,
1224                                 dm_crtc_high_irq, c_irq_params);
1225         }
1226
1227         /* Use GRPH_PFLIP interrupt */
1228         for (i = DCN_1_0__SRCID__HUBP0_FLIP_INTERRUPT;
1229                         i <= DCN_1_0__SRCID__HUBP0_FLIP_INTERRUPT + adev->mode_info.num_crtc - 1;
1230                         i++) {
1231                 r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_DCE, i, &adev->pageflip_irq);
1232                 if (r) {
1233                         DRM_ERROR("Failed to add page flip irq id!\n");
1234                         return r;
1235                 }
1236
1237                 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
1238                 int_params.irq_source =
1239                         dc_interrupt_to_irq_source(dc, i, 0);
1240
1241                 c_irq_params = &adev->dm.pflip_params[int_params.irq_source - DC_IRQ_SOURCE_PFLIP_FIRST];
1242
1243                 c_irq_params->adev = adev;
1244                 c_irq_params->irq_src = int_params.irq_source;
1245
1246                 amdgpu_dm_irq_register_interrupt(adev, &int_params,
1247                                 dm_pflip_high_irq, c_irq_params);
1248
1249         }
1250
1251         /* HPD */
1252         r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_DCE, DCN_1_0__SRCID__DC_HPD1_INT,
1253                         &adev->hpd_irq);
1254         if (r) {
1255                 DRM_ERROR("Failed to add hpd irq id!\n");
1256                 return r;
1257         }
1258
1259         register_hpd_handlers(adev);
1260
1261         return 0;
1262 }
1263 #endif
1264
1265 static int amdgpu_dm_mode_config_init(struct amdgpu_device *adev)
1266 {
1267         int r;
1268
1269         adev->mode_info.mode_config_initialized = true;
1270
1271         adev->ddev->mode_config.funcs = (void *)&amdgpu_dm_mode_funcs;
1272         adev->ddev->mode_config.helper_private = &amdgpu_dm_mode_config_helperfuncs;
1273
1274         adev->ddev->mode_config.max_width = 16384;
1275         adev->ddev->mode_config.max_height = 16384;
1276
1277         adev->ddev->mode_config.preferred_depth = 24;
1278         adev->ddev->mode_config.prefer_shadow = 1;
1279         /* indicate support of immediate flip */
1280         adev->ddev->mode_config.async_page_flip = true;
1281
1282         adev->ddev->mode_config.fb_base = adev->mc.aper_base;
1283
1284         r = amdgpu_modeset_create_props(adev);
1285         if (r)
1286                 return r;
1287
1288         return 0;
1289 }
1290
1291 #if defined(CONFIG_BACKLIGHT_CLASS_DEVICE) ||\
1292         defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE)
1293
1294 static int amdgpu_dm_backlight_update_status(struct backlight_device *bd)
1295 {
1296         struct amdgpu_display_manager *dm = bl_get_data(bd);
1297
1298         if (dc_link_set_backlight_level(dm->backlight_link,
1299                         bd->props.brightness, 0, 0))
1300                 return 0;
1301         else
1302                 return 1;
1303 }
1304
1305 static int amdgpu_dm_backlight_get_brightness(struct backlight_device *bd)
1306 {
1307         return bd->props.brightness;
1308 }
1309
1310 static const struct backlight_ops amdgpu_dm_backlight_ops = {
1311         .get_brightness = amdgpu_dm_backlight_get_brightness,
1312         .update_status  = amdgpu_dm_backlight_update_status,
1313 };
1314
1315 static void
1316 amdgpu_dm_register_backlight_device(struct amdgpu_display_manager *dm)
1317 {
1318         char bl_name[16];
1319         struct backlight_properties props = { 0 };
1320
1321         props.max_brightness = AMDGPU_MAX_BL_LEVEL;
1322         props.type = BACKLIGHT_RAW;
1323
1324         snprintf(bl_name, sizeof(bl_name), "amdgpu_bl%d",
1325                         dm->adev->ddev->primary->index);
1326
1327         dm->backlight_dev = backlight_device_register(bl_name,
1328                         dm->adev->ddev->dev,
1329                         dm,
1330                         &amdgpu_dm_backlight_ops,
1331                         &props);
1332
1333         if (IS_ERR(dm->backlight_dev))
1334                 DRM_ERROR("DM: Backlight registration failed!\n");
1335         else
1336                 DRM_DEBUG_DRIVER("DM: Registered Backlight device: %s\n", bl_name);
1337 }
1338
1339 #endif
1340
1341 /* In this architecture, the association
1342  * connector -> encoder -> crtc
1343  * id not really requried. The crtc and connector will hold the
1344  * display_index as an abstraction to use with DAL component
1345  *
1346  * Returns 0 on success
1347  */
1348 static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev)
1349 {
1350         struct amdgpu_display_manager *dm = &adev->dm;
1351         uint32_t i;
1352         struct amdgpu_dm_connector *aconnector = NULL;
1353         struct amdgpu_encoder *aencoder = NULL;
1354         struct amdgpu_mode_info *mode_info = &adev->mode_info;
1355         uint32_t link_cnt;
1356         unsigned long possible_crtcs;
1357
1358         link_cnt = dm->dc->caps.max_links;
1359         if (amdgpu_dm_mode_config_init(dm->adev)) {
1360                 DRM_ERROR("DM: Failed to initialize mode config\n");
1361                 return -1;
1362         }
1363
1364         for (i = 0; i < dm->dc->caps.max_planes; i++) {
1365                 struct amdgpu_plane *plane;
1366
1367                 plane = kzalloc(sizeof(struct amdgpu_plane), GFP_KERNEL);
1368                 mode_info->planes[i] = plane;
1369
1370                 if (!plane) {
1371                         DRM_ERROR("KMS: Failed to allocate plane\n");
1372                         goto fail;
1373                 }
1374                 plane->base.type = mode_info->plane_type[i];
1375
1376                 /*
1377                  * HACK: IGT tests expect that each plane can only have one
1378                  * one possible CRTC. For now, set one CRTC for each
1379                  * plane that is not an underlay, but still allow multiple
1380                  * CRTCs for underlay planes.
1381                  */
1382                 possible_crtcs = 1 << i;
1383                 if (i >= dm->dc->caps.max_streams)
1384                         possible_crtcs = 0xff;
1385
1386                 if (amdgpu_dm_plane_init(dm, mode_info->planes[i], possible_crtcs)) {
1387                         DRM_ERROR("KMS: Failed to initialize plane\n");
1388                         goto fail;
1389                 }
1390         }
1391
1392         for (i = 0; i < dm->dc->caps.max_streams; i++)
1393                 if (amdgpu_dm_crtc_init(dm, &mode_info->planes[i]->base, i)) {
1394                         DRM_ERROR("KMS: Failed to initialize crtc\n");
1395                         goto fail;
1396                 }
1397
1398         dm->display_indexes_num = dm->dc->caps.max_streams;
1399
1400         /* loops over all connectors on the board */
1401         for (i = 0; i < link_cnt; i++) {
1402
1403                 if (i > AMDGPU_DM_MAX_DISPLAY_INDEX) {
1404                         DRM_ERROR(
1405                                 "KMS: Cannot support more than %d display indexes\n",
1406                                         AMDGPU_DM_MAX_DISPLAY_INDEX);
1407                         continue;
1408                 }
1409
1410                 aconnector = kzalloc(sizeof(*aconnector), GFP_KERNEL);
1411                 if (!aconnector)
1412                         goto fail;
1413
1414                 aencoder = kzalloc(sizeof(*aencoder), GFP_KERNEL);
1415                 if (!aencoder)
1416                         goto fail;
1417
1418                 if (amdgpu_dm_encoder_init(dm->ddev, aencoder, i)) {
1419                         DRM_ERROR("KMS: Failed to initialize encoder\n");
1420                         goto fail;
1421                 }
1422
1423                 if (amdgpu_dm_connector_init(dm, aconnector, i, aencoder)) {
1424                         DRM_ERROR("KMS: Failed to initialize connector\n");
1425                         goto fail;
1426                 }
1427
1428                 if (dc_link_detect(dc_get_link_at_index(dm->dc, i),
1429                                 DETECT_REASON_BOOT))
1430                         amdgpu_dm_update_connector_after_detect(aconnector);
1431         }
1432
1433         /* Software is initialized. Now we can register interrupt handlers. */
1434         switch (adev->asic_type) {
1435         case CHIP_BONAIRE:
1436         case CHIP_HAWAII:
1437         case CHIP_KAVERI:
1438         case CHIP_KABINI:
1439         case CHIP_MULLINS:
1440         case CHIP_TONGA:
1441         case CHIP_FIJI:
1442         case CHIP_CARRIZO:
1443         case CHIP_STONEY:
1444         case CHIP_POLARIS11:
1445         case CHIP_POLARIS10:
1446         case CHIP_POLARIS12:
1447         case CHIP_VEGA10:
1448                 if (dce110_register_irq_handlers(dm->adev)) {
1449                         DRM_ERROR("DM: Failed to initialize IRQ\n");
1450                         goto fail;
1451                 }
1452                 break;
1453 #if defined(CONFIG_DRM_AMD_DC_DCN1_0)
1454         case CHIP_RAVEN:
1455                 if (dcn10_register_irq_handlers(dm->adev)) {
1456                         DRM_ERROR("DM: Failed to initialize IRQ\n");
1457                         goto fail;
1458                 }
1459                 /*
1460                  * Temporary disable until pplib/smu interaction is implemented
1461                  */
1462                 dm->dc->debug.disable_stutter = true;
1463                 break;
1464 #endif
1465         default:
1466                 DRM_ERROR("Usupported ASIC type: 0x%X\n", adev->asic_type);
1467                 goto fail;
1468         }
1469
1470         return 0;
1471 fail:
1472         kfree(aencoder);
1473         kfree(aconnector);
1474         for (i = 0; i < dm->dc->caps.max_planes; i++)
1475                 kfree(mode_info->planes[i]);
1476         return -1;
1477 }
1478
1479 static void amdgpu_dm_destroy_drm_device(struct amdgpu_display_manager *dm)
1480 {
1481         drm_mode_config_cleanup(dm->ddev);
1482         return;
1483 }
1484
1485 /******************************************************************************
1486  * amdgpu_display_funcs functions
1487  *****************************************************************************/
1488
1489 /**
1490  * dm_bandwidth_update - program display watermarks
1491  *
1492  * @adev: amdgpu_device pointer
1493  *
1494  * Calculate and program the display watermarks and line buffer allocation.
1495  */
1496 static void dm_bandwidth_update(struct amdgpu_device *adev)
1497 {
1498         /* TODO: implement later */
1499 }
1500
1501 static void dm_set_backlight_level(struct amdgpu_encoder *amdgpu_encoder,
1502                                      u8 level)
1503 {
1504         /* TODO: translate amdgpu_encoder to display_index and call DAL */
1505 }
1506
1507 static u8 dm_get_backlight_level(struct amdgpu_encoder *amdgpu_encoder)
1508 {
1509         /* TODO: translate amdgpu_encoder to display_index and call DAL */
1510         return 0;
1511 }
1512
1513 static int amdgpu_notify_freesync(struct drm_device *dev, void *data,
1514                                 struct drm_file *filp)
1515 {
1516         struct mod_freesync_params freesync_params;
1517         uint8_t num_streams;
1518         uint8_t i;
1519
1520         struct amdgpu_device *adev = dev->dev_private;
1521         int r = 0;
1522
1523         /* Get freesync enable flag from DRM */
1524
1525         num_streams = dc_get_current_stream_count(adev->dm.dc);
1526
1527         for (i = 0; i < num_streams; i++) {
1528                 struct dc_stream_state *stream;
1529                 stream = dc_get_stream_at_index(adev->dm.dc, i);
1530
1531                 mod_freesync_update_state(adev->dm.freesync_module,
1532                                           &stream, 1, &freesync_params);
1533         }
1534
1535         return r;
1536 }
1537
1538 static const struct amdgpu_display_funcs dm_display_funcs = {
1539         .bandwidth_update = dm_bandwidth_update, /* called unconditionally */
1540         .vblank_get_counter = dm_vblank_get_counter,/* called unconditionally */
1541         .vblank_wait = NULL,
1542         .backlight_set_level =
1543                 dm_set_backlight_level,/* called unconditionally */
1544         .backlight_get_level =
1545                 dm_get_backlight_level,/* called unconditionally */
1546         .hpd_sense = NULL,/* called unconditionally */
1547         .hpd_set_polarity = NULL, /* called unconditionally */
1548         .hpd_get_gpio_reg = NULL, /* VBIOS parsing. DAL does it. */
1549         .page_flip_get_scanoutpos =
1550                 dm_crtc_get_scanoutpos,/* called unconditionally */
1551         .add_encoder = NULL, /* VBIOS parsing. DAL does it. */
1552         .add_connector = NULL, /* VBIOS parsing. DAL does it. */
1553         .notify_freesync = amdgpu_notify_freesync,
1554
1555 };
1556
1557 #if defined(CONFIG_DEBUG_KERNEL_DC)
1558
1559 static ssize_t s3_debug_store(struct device *device,
1560                               struct device_attribute *attr,
1561                               const char *buf,
1562                               size_t count)
1563 {
1564         int ret;
1565         int s3_state;
1566         struct pci_dev *pdev = to_pci_dev(device);
1567         struct drm_device *drm_dev = pci_get_drvdata(pdev);
1568         struct amdgpu_device *adev = drm_dev->dev_private;
1569
1570         ret = kstrtoint(buf, 0, &s3_state);
1571
1572         if (ret == 0) {
1573                 if (s3_state) {
1574                         dm_resume(adev);
1575                         amdgpu_dm_display_resume(adev);
1576                         drm_kms_helper_hotplug_event(adev->ddev);
1577                 } else
1578                         dm_suspend(adev);
1579         }
1580
1581         return ret == 0 ? count : 0;
1582 }
1583
1584 DEVICE_ATTR_WO(s3_debug);
1585
1586 #endif
1587
1588 static int dm_early_init(void *handle)
1589 {
1590         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1591
1592         adev->ddev->driver->driver_features |= DRIVER_ATOMIC;
1593
1594         switch (adev->asic_type) {
1595         case CHIP_BONAIRE:
1596         case CHIP_HAWAII:
1597                 adev->mode_info.num_crtc = 6;
1598                 adev->mode_info.num_hpd = 6;
1599                 adev->mode_info.num_dig = 6;
1600                 adev->mode_info.plane_type = dm_plane_type_default;
1601                 break;
1602         case CHIP_KAVERI:
1603                 adev->mode_info.num_crtc = 4;
1604                 adev->mode_info.num_hpd = 6;
1605                 adev->mode_info.num_dig = 7;
1606                 adev->mode_info.plane_type = dm_plane_type_default;
1607                 break;
1608         case CHIP_KABINI:
1609         case CHIP_MULLINS:
1610                 adev->mode_info.num_crtc = 2;
1611                 adev->mode_info.num_hpd = 6;
1612                 adev->mode_info.num_dig = 6;
1613                 adev->mode_info.plane_type = dm_plane_type_default;
1614                 break;
1615         case CHIP_FIJI:
1616         case CHIP_TONGA:
1617                 adev->mode_info.num_crtc = 6;
1618                 adev->mode_info.num_hpd = 6;
1619                 adev->mode_info.num_dig = 7;
1620                 adev->mode_info.plane_type = dm_plane_type_default;
1621                 break;
1622         case CHIP_CARRIZO:
1623                 adev->mode_info.num_crtc = 3;
1624                 adev->mode_info.num_hpd = 6;
1625                 adev->mode_info.num_dig = 9;
1626                 adev->mode_info.plane_type = dm_plane_type_carizzo;
1627                 break;
1628         case CHIP_STONEY:
1629                 adev->mode_info.num_crtc = 2;
1630                 adev->mode_info.num_hpd = 6;
1631                 adev->mode_info.num_dig = 9;
1632                 adev->mode_info.plane_type = dm_plane_type_stoney;
1633                 break;
1634         case CHIP_POLARIS11:
1635         case CHIP_POLARIS12:
1636                 adev->mode_info.num_crtc = 5;
1637                 adev->mode_info.num_hpd = 5;
1638                 adev->mode_info.num_dig = 5;
1639                 adev->mode_info.plane_type = dm_plane_type_default;
1640                 break;
1641         case CHIP_POLARIS10:
1642                 adev->mode_info.num_crtc = 6;
1643                 adev->mode_info.num_hpd = 6;
1644                 adev->mode_info.num_dig = 6;
1645                 adev->mode_info.plane_type = dm_plane_type_default;
1646                 break;
1647         case CHIP_VEGA10:
1648                 adev->mode_info.num_crtc = 6;
1649                 adev->mode_info.num_hpd = 6;
1650                 adev->mode_info.num_dig = 6;
1651                 adev->mode_info.plane_type = dm_plane_type_default;
1652                 break;
1653 #if defined(CONFIG_DRM_AMD_DC_DCN1_0)
1654         case CHIP_RAVEN:
1655                 adev->mode_info.num_crtc = 4;
1656                 adev->mode_info.num_hpd = 4;
1657                 adev->mode_info.num_dig = 4;
1658                 adev->mode_info.plane_type = dm_plane_type_default;
1659                 break;
1660 #endif
1661         default:
1662                 DRM_ERROR("Usupported ASIC type: 0x%X\n", adev->asic_type);
1663                 return -EINVAL;
1664         }
1665
1666         amdgpu_dm_set_irq_funcs(adev);
1667
1668         if (adev->mode_info.funcs == NULL)
1669                 adev->mode_info.funcs = &dm_display_funcs;
1670
1671         /* Note: Do NOT change adev->audio_endpt_rreg and
1672          * adev->audio_endpt_wreg because they are initialised in
1673          * amdgpu_device_init() */
1674 #if defined(CONFIG_DEBUG_KERNEL_DC)
1675         device_create_file(
1676                 adev->ddev->dev,
1677                 &dev_attr_s3_debug);
1678 #endif
1679
1680         return 0;
1681 }
1682
1683 static bool modeset_required(struct drm_crtc_state *crtc_state,
1684                              struct dc_stream_state *new_stream,
1685                              struct dc_stream_state *old_stream)
1686 {
1687         if (!drm_atomic_crtc_needs_modeset(crtc_state))
1688                 return false;
1689
1690         if (!crtc_state->enable)
1691                 return false;
1692
1693         return crtc_state->active;
1694 }
1695
1696 static bool modereset_required(struct drm_crtc_state *crtc_state)
1697 {
1698         if (!drm_atomic_crtc_needs_modeset(crtc_state))
1699                 return false;
1700
1701         return !crtc_state->enable || !crtc_state->active;
1702 }
1703
1704 static void amdgpu_dm_encoder_destroy(struct drm_encoder *encoder)
1705 {
1706         drm_encoder_cleanup(encoder);
1707         kfree(encoder);
1708 }
1709
1710 static const struct drm_encoder_funcs amdgpu_dm_encoder_funcs = {
1711         .destroy = amdgpu_dm_encoder_destroy,
1712 };
1713
1714 static bool fill_rects_from_plane_state(const struct drm_plane_state *state,
1715                                         struct dc_plane_state *plane_state)
1716 {
1717         plane_state->src_rect.x = state->src_x >> 16;
1718         plane_state->src_rect.y = state->src_y >> 16;
1719         /*we ignore for now mantissa and do not to deal with floating pixels :(*/
1720         plane_state->src_rect.width = state->src_w >> 16;
1721
1722         if (plane_state->src_rect.width == 0)
1723                 return false;
1724
1725         plane_state->src_rect.height = state->src_h >> 16;
1726         if (plane_state->src_rect.height == 0)
1727                 return false;
1728
1729         plane_state->dst_rect.x = state->crtc_x;
1730         plane_state->dst_rect.y = state->crtc_y;
1731
1732         if (state->crtc_w == 0)
1733                 return false;
1734
1735         plane_state->dst_rect.width = state->crtc_w;
1736
1737         if (state->crtc_h == 0)
1738                 return false;
1739
1740         plane_state->dst_rect.height = state->crtc_h;
1741
1742         plane_state->clip_rect = plane_state->dst_rect;
1743
1744         switch (state->rotation & DRM_MODE_ROTATE_MASK) {
1745         case DRM_MODE_ROTATE_0:
1746                 plane_state->rotation = ROTATION_ANGLE_0;
1747                 break;
1748         case DRM_MODE_ROTATE_90:
1749                 plane_state->rotation = ROTATION_ANGLE_90;
1750                 break;
1751         case DRM_MODE_ROTATE_180:
1752                 plane_state->rotation = ROTATION_ANGLE_180;
1753                 break;
1754         case DRM_MODE_ROTATE_270:
1755                 plane_state->rotation = ROTATION_ANGLE_270;
1756                 break;
1757         default:
1758                 plane_state->rotation = ROTATION_ANGLE_0;
1759                 break;
1760         }
1761
1762         return true;
1763 }
1764 static int get_fb_info(const struct amdgpu_framebuffer *amdgpu_fb,
1765                        uint64_t *tiling_flags)
1766 {
1767         struct amdgpu_bo *rbo = gem_to_amdgpu_bo(amdgpu_fb->obj);
1768         int r = amdgpu_bo_reserve(rbo, false);
1769
1770         if (unlikely(r)) {
1771                 // Don't show error msg. when return -ERESTARTSYS
1772                 if (r != -ERESTARTSYS)
1773                         DRM_ERROR("Unable to reserve buffer: %d\n", r);
1774                 return r;
1775         }
1776
1777         if (tiling_flags)
1778                 amdgpu_bo_get_tiling_flags(rbo, tiling_flags);
1779
1780         amdgpu_bo_unreserve(rbo);
1781
1782         return r;
1783 }
1784
1785 static int fill_plane_attributes_from_fb(struct amdgpu_device *adev,
1786                                          struct dc_plane_state *plane_state,
1787                                          const struct amdgpu_framebuffer *amdgpu_fb)
1788 {
1789         uint64_t tiling_flags;
1790         unsigned int awidth;
1791         const struct drm_framebuffer *fb = &amdgpu_fb->base;
1792         int ret = 0;
1793         struct drm_format_name_buf format_name;
1794
1795         ret = get_fb_info(
1796                 amdgpu_fb,
1797                 &tiling_flags);
1798
1799         if (ret)
1800                 return ret;
1801
1802         switch (fb->format->format) {
1803         case DRM_FORMAT_C8:
1804                 plane_state->format = SURFACE_PIXEL_FORMAT_GRPH_PALETA_256_COLORS;
1805                 break;
1806         case DRM_FORMAT_RGB565:
1807                 plane_state->format = SURFACE_PIXEL_FORMAT_GRPH_RGB565;
1808                 break;
1809         case DRM_FORMAT_XRGB8888:
1810         case DRM_FORMAT_ARGB8888:
1811                 plane_state->format = SURFACE_PIXEL_FORMAT_GRPH_ARGB8888;
1812                 break;
1813         case DRM_FORMAT_XRGB2101010:
1814         case DRM_FORMAT_ARGB2101010:
1815                 plane_state->format = SURFACE_PIXEL_FORMAT_GRPH_ARGB2101010;
1816                 break;
1817         case DRM_FORMAT_XBGR2101010:
1818         case DRM_FORMAT_ABGR2101010:
1819                 plane_state->format = SURFACE_PIXEL_FORMAT_GRPH_ABGR2101010;
1820                 break;
1821         case DRM_FORMAT_NV21:
1822                 plane_state->format = SURFACE_PIXEL_FORMAT_VIDEO_420_YCbCr;
1823                 break;
1824         case DRM_FORMAT_NV12:
1825                 plane_state->format = SURFACE_PIXEL_FORMAT_VIDEO_420_YCrCb;
1826                 break;
1827         default:
1828                 DRM_ERROR("Unsupported screen format %s\n",
1829                           drm_get_format_name(fb->format->format, &format_name));
1830                 return -EINVAL;
1831         }
1832
1833         if (plane_state->format < SURFACE_PIXEL_FORMAT_VIDEO_BEGIN) {
1834                 plane_state->address.type = PLN_ADDR_TYPE_GRAPHICS;
1835                 plane_state->plane_size.grph.surface_size.x = 0;
1836                 plane_state->plane_size.grph.surface_size.y = 0;
1837                 plane_state->plane_size.grph.surface_size.width = fb->width;
1838                 plane_state->plane_size.grph.surface_size.height = fb->height;
1839                 plane_state->plane_size.grph.surface_pitch =
1840                                 fb->pitches[0] / fb->format->cpp[0];
1841                 /* TODO: unhardcode */
1842                 plane_state->color_space = COLOR_SPACE_SRGB;
1843
1844         } else {
1845                 awidth = ALIGN(fb->width, 64);
1846                 plane_state->address.type = PLN_ADDR_TYPE_VIDEO_PROGRESSIVE;
1847                 plane_state->plane_size.video.luma_size.x = 0;
1848                 plane_state->plane_size.video.luma_size.y = 0;
1849                 plane_state->plane_size.video.luma_size.width = awidth;
1850                 plane_state->plane_size.video.luma_size.height = fb->height;
1851                 /* TODO: unhardcode */
1852                 plane_state->plane_size.video.luma_pitch = awidth;
1853
1854                 plane_state->plane_size.video.chroma_size.x = 0;
1855                 plane_state->plane_size.video.chroma_size.y = 0;
1856                 plane_state->plane_size.video.chroma_size.width = awidth;
1857                 plane_state->plane_size.video.chroma_size.height = fb->height;
1858                 plane_state->plane_size.video.chroma_pitch = awidth / 2;
1859
1860                 /* TODO: unhardcode */
1861                 plane_state->color_space = COLOR_SPACE_YCBCR709;
1862         }
1863
1864         memset(&plane_state->tiling_info, 0, sizeof(plane_state->tiling_info));
1865
1866         /* Fill GFX8 params */
1867         if (AMDGPU_TILING_GET(tiling_flags, ARRAY_MODE) == DC_ARRAY_2D_TILED_THIN1) {
1868                 unsigned int bankw, bankh, mtaspect, tile_split, num_banks;
1869
1870                 bankw = AMDGPU_TILING_GET(tiling_flags, BANK_WIDTH);
1871                 bankh = AMDGPU_TILING_GET(tiling_flags, BANK_HEIGHT);
1872                 mtaspect = AMDGPU_TILING_GET(tiling_flags, MACRO_TILE_ASPECT);
1873                 tile_split = AMDGPU_TILING_GET(tiling_flags, TILE_SPLIT);
1874                 num_banks = AMDGPU_TILING_GET(tiling_flags, NUM_BANKS);
1875
1876                 /* XXX fix me for VI */
1877                 plane_state->tiling_info.gfx8.num_banks = num_banks;
1878                 plane_state->tiling_info.gfx8.array_mode =
1879                                 DC_ARRAY_2D_TILED_THIN1;
1880                 plane_state->tiling_info.gfx8.tile_split = tile_split;
1881                 plane_state->tiling_info.gfx8.bank_width = bankw;
1882                 plane_state->tiling_info.gfx8.bank_height = bankh;
1883                 plane_state->tiling_info.gfx8.tile_aspect = mtaspect;
1884                 plane_state->tiling_info.gfx8.tile_mode =
1885                                 DC_ADDR_SURF_MICRO_TILING_DISPLAY;
1886         } else if (AMDGPU_TILING_GET(tiling_flags, ARRAY_MODE)
1887                         == DC_ARRAY_1D_TILED_THIN1) {
1888                 plane_state->tiling_info.gfx8.array_mode = DC_ARRAY_1D_TILED_THIN1;
1889         }
1890
1891         plane_state->tiling_info.gfx8.pipe_config =
1892                         AMDGPU_TILING_GET(tiling_flags, PIPE_CONFIG);
1893
1894         if (adev->asic_type == CHIP_VEGA10 ||
1895             adev->asic_type == CHIP_RAVEN) {
1896                 /* Fill GFX9 params */
1897                 plane_state->tiling_info.gfx9.num_pipes =
1898                         adev->gfx.config.gb_addr_config_fields.num_pipes;
1899                 plane_state->tiling_info.gfx9.num_banks =
1900                         adev->gfx.config.gb_addr_config_fields.num_banks;
1901                 plane_state->tiling_info.gfx9.pipe_interleave =
1902                         adev->gfx.config.gb_addr_config_fields.pipe_interleave_size;
1903                 plane_state->tiling_info.gfx9.num_shader_engines =
1904                         adev->gfx.config.gb_addr_config_fields.num_se;
1905                 plane_state->tiling_info.gfx9.max_compressed_frags =
1906                         adev->gfx.config.gb_addr_config_fields.max_compress_frags;
1907                 plane_state->tiling_info.gfx9.num_rb_per_se =
1908                         adev->gfx.config.gb_addr_config_fields.num_rb_per_se;
1909                 plane_state->tiling_info.gfx9.swizzle =
1910                         AMDGPU_TILING_GET(tiling_flags, SWIZZLE_MODE);
1911                 plane_state->tiling_info.gfx9.shaderEnable = 1;
1912         }
1913
1914         plane_state->visible = true;
1915         plane_state->scaling_quality.h_taps_c = 0;
1916         plane_state->scaling_quality.v_taps_c = 0;
1917
1918         /* is this needed? is plane_state zeroed at allocation? */
1919         plane_state->scaling_quality.h_taps = 0;
1920         plane_state->scaling_quality.v_taps = 0;
1921         plane_state->stereo_format = PLANE_STEREO_FORMAT_NONE;
1922
1923         return ret;
1924
1925 }
1926
1927 static void fill_gamma_from_crtc_state(const struct drm_crtc_state *crtc_state,
1928                                        struct dc_plane_state *plane_state)
1929 {
1930         int i;
1931         struct dc_gamma *gamma;
1932         struct drm_color_lut *lut =
1933                         (struct drm_color_lut *) crtc_state->gamma_lut->data;
1934
1935         gamma = dc_create_gamma();
1936
1937         if (gamma == NULL) {
1938                 WARN_ON(1);
1939                 return;
1940         }
1941
1942         gamma->type = GAMMA_RGB_256;
1943         gamma->num_entries = GAMMA_RGB_256_ENTRIES;
1944         for (i = 0; i < GAMMA_RGB_256_ENTRIES; i++) {
1945                 gamma->entries.red[i] = dal_fixed31_32_from_int(lut[i].red);
1946                 gamma->entries.green[i] = dal_fixed31_32_from_int(lut[i].green);
1947                 gamma->entries.blue[i] = dal_fixed31_32_from_int(lut[i].blue);
1948         }
1949
1950         plane_state->gamma_correction = gamma;
1951 }
1952
1953 static int fill_plane_attributes(struct amdgpu_device *adev,
1954                                  struct dc_plane_state *dc_plane_state,
1955                                  struct drm_plane_state *plane_state,
1956                                  struct drm_crtc_state *crtc_state)
1957 {
1958         const struct amdgpu_framebuffer *amdgpu_fb =
1959                 to_amdgpu_framebuffer(plane_state->fb);
1960         const struct drm_crtc *crtc = plane_state->crtc;
1961         struct dc_transfer_func *input_tf;
1962         int ret = 0;
1963
1964         if (!fill_rects_from_plane_state(plane_state, dc_plane_state))
1965                 return -EINVAL;
1966
1967         ret = fill_plane_attributes_from_fb(
1968                 crtc->dev->dev_private,
1969                 dc_plane_state,
1970                 amdgpu_fb);
1971
1972         if (ret)
1973                 return ret;
1974
1975         input_tf = dc_create_transfer_func();
1976
1977         if (input_tf == NULL)
1978                 return -ENOMEM;
1979
1980         input_tf->type = TF_TYPE_PREDEFINED;
1981         input_tf->tf = TRANSFER_FUNCTION_SRGB;
1982
1983         dc_plane_state->in_transfer_func = input_tf;
1984
1985         /* In case of gamma set, update gamma value */
1986         if (crtc_state->gamma_lut)
1987                 fill_gamma_from_crtc_state(crtc_state, dc_plane_state);
1988
1989         return ret;
1990 }
1991
1992 /*****************************************************************************/
1993
1994 static void update_stream_scaling_settings(const struct drm_display_mode *mode,
1995                                            const struct dm_connector_state *dm_state,
1996                                            struct dc_stream_state *stream)
1997 {
1998         enum amdgpu_rmx_type rmx_type;
1999
2000         struct rect src = { 0 }; /* viewport in composition space*/
2001         struct rect dst = { 0 }; /* stream addressable area */
2002
2003         /* no mode. nothing to be done */
2004         if (!mode)
2005                 return;
2006
2007         /* Full screen scaling by default */
2008         src.width = mode->hdisplay;
2009         src.height = mode->vdisplay;
2010         dst.width = stream->timing.h_addressable;
2011         dst.height = stream->timing.v_addressable;
2012
2013         rmx_type = dm_state->scaling;
2014         if (rmx_type == RMX_ASPECT || rmx_type == RMX_OFF) {
2015                 if (src.width * dst.height <
2016                                 src.height * dst.width) {
2017                         /* height needs less upscaling/more downscaling */
2018                         dst.width = src.width *
2019                                         dst.height / src.height;
2020                 } else {
2021                         /* width needs less upscaling/more downscaling */
2022                         dst.height = src.height *
2023                                         dst.width / src.width;
2024                 }
2025         } else if (rmx_type == RMX_CENTER) {
2026                 dst = src;
2027         }
2028
2029         dst.x = (stream->timing.h_addressable - dst.width) / 2;
2030         dst.y = (stream->timing.v_addressable - dst.height) / 2;
2031
2032         if (dm_state->underscan_enable) {
2033                 dst.x += dm_state->underscan_hborder / 2;
2034                 dst.y += dm_state->underscan_vborder / 2;
2035                 dst.width -= dm_state->underscan_hborder;
2036                 dst.height -= dm_state->underscan_vborder;
2037         }
2038
2039         stream->src = src;
2040         stream->dst = dst;
2041
2042         DRM_DEBUG_DRIVER("Destination Rectangle x:%d  y:%d  width:%d  height:%d\n",
2043                         dst.x, dst.y, dst.width, dst.height);
2044
2045 }
2046
2047 static enum dc_color_depth
2048 convert_color_depth_from_display_info(const struct drm_connector *connector)
2049 {
2050         uint32_t bpc = connector->display_info.bpc;
2051
2052         /* Limited color depth to 8bit
2053          * TODO: Still need to handle deep color
2054          */
2055         if (bpc > 8)
2056                 bpc = 8;
2057
2058         switch (bpc) {
2059         case 0:
2060                 /* Temporary Work around, DRM don't parse color depth for
2061                  * EDID revision before 1.4
2062                  * TODO: Fix edid parsing
2063                  */
2064                 return COLOR_DEPTH_888;
2065         case 6:
2066                 return COLOR_DEPTH_666;
2067         case 8:
2068                 return COLOR_DEPTH_888;
2069         case 10:
2070                 return COLOR_DEPTH_101010;
2071         case 12:
2072                 return COLOR_DEPTH_121212;
2073         case 14:
2074                 return COLOR_DEPTH_141414;
2075         case 16:
2076                 return COLOR_DEPTH_161616;
2077         default:
2078                 return COLOR_DEPTH_UNDEFINED;
2079         }
2080 }
2081
2082 static enum dc_aspect_ratio
2083 get_aspect_ratio(const struct drm_display_mode *mode_in)
2084 {
2085         int32_t width = mode_in->crtc_hdisplay * 9;
2086         int32_t height = mode_in->crtc_vdisplay * 16;
2087
2088         if ((width - height) < 10 && (width - height) > -10)
2089                 return ASPECT_RATIO_16_9;
2090         else
2091                 return ASPECT_RATIO_4_3;
2092 }
2093
2094 static enum dc_color_space
2095 get_output_color_space(const struct dc_crtc_timing *dc_crtc_timing)
2096 {
2097         enum dc_color_space color_space = COLOR_SPACE_SRGB;
2098
2099         switch (dc_crtc_timing->pixel_encoding) {
2100         case PIXEL_ENCODING_YCBCR422:
2101         case PIXEL_ENCODING_YCBCR444:
2102         case PIXEL_ENCODING_YCBCR420:
2103         {
2104                 /*
2105                  * 27030khz is the separation point between HDTV and SDTV
2106                  * according to HDMI spec, we use YCbCr709 and YCbCr601
2107                  * respectively
2108                  */
2109                 if (dc_crtc_timing->pix_clk_khz > 27030) {
2110                         if (dc_crtc_timing->flags.Y_ONLY)
2111                                 color_space =
2112                                         COLOR_SPACE_YCBCR709_LIMITED;
2113                         else
2114                                 color_space = COLOR_SPACE_YCBCR709;
2115                 } else {
2116                         if (dc_crtc_timing->flags.Y_ONLY)
2117                                 color_space =
2118                                         COLOR_SPACE_YCBCR601_LIMITED;
2119                         else
2120                                 color_space = COLOR_SPACE_YCBCR601;
2121                 }
2122
2123         }
2124         break;
2125         case PIXEL_ENCODING_RGB:
2126                 color_space = COLOR_SPACE_SRGB;
2127                 break;
2128
2129         default:
2130                 WARN_ON(1);
2131                 break;
2132         }
2133
2134         return color_space;
2135 }
2136
2137 /*****************************************************************************/
2138
2139 static void
2140 fill_stream_properties_from_drm_display_mode(struct dc_stream_state *stream,
2141                                              const struct drm_display_mode *mode_in,
2142                                              const struct drm_connector *connector)
2143 {
2144         struct dc_crtc_timing *timing_out = &stream->timing;
2145         struct dc_transfer_func *tf = dc_create_transfer_func();
2146
2147         memset(timing_out, 0, sizeof(struct dc_crtc_timing));
2148
2149         timing_out->h_border_left = 0;
2150         timing_out->h_border_right = 0;
2151         timing_out->v_border_top = 0;
2152         timing_out->v_border_bottom = 0;
2153         /* TODO: un-hardcode */
2154
2155         if ((connector->display_info.color_formats & DRM_COLOR_FORMAT_YCRCB444)
2156                         && stream->sink->sink_signal == SIGNAL_TYPE_HDMI_TYPE_A)
2157                 timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR444;
2158         else
2159                 timing_out->pixel_encoding = PIXEL_ENCODING_RGB;
2160
2161         timing_out->timing_3d_format = TIMING_3D_FORMAT_NONE;
2162         timing_out->display_color_depth = convert_color_depth_from_display_info(
2163                         connector);
2164         timing_out->scan_type = SCANNING_TYPE_NODATA;
2165         timing_out->hdmi_vic = 0;
2166         timing_out->vic = drm_match_cea_mode(mode_in);
2167
2168         timing_out->h_addressable = mode_in->crtc_hdisplay;
2169         timing_out->h_total = mode_in->crtc_htotal;
2170         timing_out->h_sync_width =
2171                 mode_in->crtc_hsync_end - mode_in->crtc_hsync_start;
2172         timing_out->h_front_porch =
2173                 mode_in->crtc_hsync_start - mode_in->crtc_hdisplay;
2174         timing_out->v_total = mode_in->crtc_vtotal;
2175         timing_out->v_addressable = mode_in->crtc_vdisplay;
2176         timing_out->v_front_porch =
2177                 mode_in->crtc_vsync_start - mode_in->crtc_vdisplay;
2178         timing_out->v_sync_width =
2179                 mode_in->crtc_vsync_end - mode_in->crtc_vsync_start;
2180         timing_out->pix_clk_khz = mode_in->crtc_clock;
2181         timing_out->aspect_ratio = get_aspect_ratio(mode_in);
2182         if (mode_in->flags & DRM_MODE_FLAG_PHSYNC)
2183                 timing_out->flags.HSYNC_POSITIVE_POLARITY = 1;
2184         if (mode_in->flags & DRM_MODE_FLAG_PVSYNC)
2185                 timing_out->flags.VSYNC_POSITIVE_POLARITY = 1;
2186
2187         stream->output_color_space = get_output_color_space(timing_out);
2188
2189         tf->type = TF_TYPE_PREDEFINED;
2190         tf->tf = TRANSFER_FUNCTION_SRGB;
2191         stream->out_transfer_func = tf;
2192 }
2193
2194 static void fill_audio_info(struct audio_info *audio_info,
2195                             const struct drm_connector *drm_connector,
2196                             const struct dc_sink *dc_sink)
2197 {
2198         int i = 0;
2199         int cea_revision = 0;
2200         const struct dc_edid_caps *edid_caps = &dc_sink->edid_caps;
2201
2202         audio_info->manufacture_id = edid_caps->manufacturer_id;
2203         audio_info->product_id = edid_caps->product_id;
2204
2205         cea_revision = drm_connector->display_info.cea_rev;
2206
2207         strncpy(audio_info->display_name,
2208                 edid_caps->display_name,
2209                 AUDIO_INFO_DISPLAY_NAME_SIZE_IN_CHARS - 1);
2210
2211         if (cea_revision >= 3) {
2212                 audio_info->mode_count = edid_caps->audio_mode_count;
2213
2214                 for (i = 0; i < audio_info->mode_count; ++i) {
2215                         audio_info->modes[i].format_code =
2216                                         (enum audio_format_code)
2217                                         (edid_caps->audio_modes[i].format_code);
2218                         audio_info->modes[i].channel_count =
2219                                         edid_caps->audio_modes[i].channel_count;
2220                         audio_info->modes[i].sample_rates.all =
2221                                         edid_caps->audio_modes[i].sample_rate;
2222                         audio_info->modes[i].sample_size =
2223                                         edid_caps->audio_modes[i].sample_size;
2224                 }
2225         }
2226
2227         audio_info->flags.all = edid_caps->speaker_flags;
2228
2229         /* TODO: We only check for the progressive mode, check for interlace mode too */
2230         if (drm_connector->latency_present[0]) {
2231                 audio_info->video_latency = drm_connector->video_latency[0];
2232                 audio_info->audio_latency = drm_connector->audio_latency[0];
2233         }
2234
2235         /* TODO: For DP, video and audio latency should be calculated from DPCD caps */
2236
2237 }
2238
2239 static void
2240 copy_crtc_timing_for_drm_display_mode(const struct drm_display_mode *src_mode,
2241                                       struct drm_display_mode *dst_mode)
2242 {
2243         dst_mode->crtc_hdisplay = src_mode->crtc_hdisplay;
2244         dst_mode->crtc_vdisplay = src_mode->crtc_vdisplay;
2245         dst_mode->crtc_clock = src_mode->crtc_clock;
2246         dst_mode->crtc_hblank_start = src_mode->crtc_hblank_start;
2247         dst_mode->crtc_hblank_end = src_mode->crtc_hblank_end;
2248         dst_mode->crtc_hsync_start =  src_mode->crtc_hsync_start;
2249         dst_mode->crtc_hsync_end = src_mode->crtc_hsync_end;
2250         dst_mode->crtc_htotal = src_mode->crtc_htotal;
2251         dst_mode->crtc_hskew = src_mode->crtc_hskew;
2252         dst_mode->crtc_vblank_start = src_mode->crtc_vblank_start;
2253         dst_mode->crtc_vblank_end = src_mode->crtc_vblank_end;
2254         dst_mode->crtc_vsync_start = src_mode->crtc_vsync_start;
2255         dst_mode->crtc_vsync_end = src_mode->crtc_vsync_end;
2256         dst_mode->crtc_vtotal = src_mode->crtc_vtotal;
2257 }
2258
2259 static void
2260 decide_crtc_timing_for_drm_display_mode(struct drm_display_mode *drm_mode,
2261                                         const struct drm_display_mode *native_mode,
2262                                         bool scale_enabled)
2263 {
2264         if (scale_enabled) {
2265                 copy_crtc_timing_for_drm_display_mode(native_mode, drm_mode);
2266         } else if (native_mode->clock == drm_mode->clock &&
2267                         native_mode->htotal == drm_mode->htotal &&
2268                         native_mode->vtotal == drm_mode->vtotal) {
2269                 copy_crtc_timing_for_drm_display_mode(native_mode, drm_mode);
2270         } else {
2271                 /* no scaling nor amdgpu inserted, no need to patch */
2272         }
2273 }
2274
2275 static int create_fake_sink(struct amdgpu_dm_connector *aconnector)
2276 {
2277         struct dc_sink *sink = NULL;
2278         struct dc_sink_init_data sink_init_data = { 0 };
2279
2280         sink_init_data.link = aconnector->dc_link;
2281         sink_init_data.sink_signal = aconnector->dc_link->connector_signal;
2282
2283         sink = dc_sink_create(&sink_init_data);
2284         if (!sink) {
2285                 DRM_ERROR("Failed to create sink!\n");
2286                 return -ENOMEM;
2287         }
2288
2289         sink->sink_signal = SIGNAL_TYPE_VIRTUAL;
2290         aconnector->fake_enable = true;
2291
2292         aconnector->dc_sink = sink;
2293         aconnector->dc_link->local_sink = sink;
2294
2295         return 0;
2296 }
2297
2298 static void set_multisync_trigger_params(
2299                 struct dc_stream_state *stream)
2300 {
2301         if (stream->triggered_crtc_reset.enabled) {
2302                 stream->triggered_crtc_reset.event = CRTC_EVENT_VSYNC_RISING;
2303                 stream->triggered_crtc_reset.delay = TRIGGER_DELAY_NEXT_LINE;
2304         }
2305 }
2306
2307 static void set_master_stream(struct dc_stream_state *stream_set[],
2308                               int stream_count)
2309 {
2310         int j, highest_rfr = 0, master_stream = 0;
2311
2312         for (j = 0;  j < stream_count; j++) {
2313                 if (stream_set[j] && stream_set[j]->triggered_crtc_reset.enabled) {
2314                         int refresh_rate = 0;
2315
2316                         refresh_rate = (stream_set[j]->timing.pix_clk_khz*1000)/
2317                                 (stream_set[j]->timing.h_total*stream_set[j]->timing.v_total);
2318                         if (refresh_rate > highest_rfr) {
2319                                 highest_rfr = refresh_rate;
2320                                 master_stream = j;
2321                         }
2322                 }
2323         }
2324         for (j = 0;  j < stream_count; j++) {
2325                 if (stream_set[j] && j != master_stream)
2326                         stream_set[j]->triggered_crtc_reset.event_source = stream_set[master_stream];
2327         }
2328 }
2329
2330 static void dm_enable_per_frame_crtc_master_sync(struct dc_state *context)
2331 {
2332         int i = 0;
2333
2334         if (context->stream_count < 2)
2335                 return;
2336         for (i = 0; i < context->stream_count ; i++) {
2337                 if (!context->streams[i])
2338                         continue;
2339                 /* TODO: add a function to read AMD VSDB bits and will set
2340                  * crtc_sync_master.multi_sync_enabled flag
2341                  * For now its set to false
2342                  */
2343                 set_multisync_trigger_params(context->streams[i]);
2344         }
2345         set_master_stream(context->streams, context->stream_count);
2346 }
2347
2348 static struct dc_stream_state *
2349 create_stream_for_sink(struct amdgpu_dm_connector *aconnector,
2350                        const struct drm_display_mode *drm_mode,
2351                        const struct dm_connector_state *dm_state)
2352 {
2353         struct drm_display_mode *preferred_mode = NULL;
2354         struct drm_connector *drm_connector;
2355         struct dc_stream_state *stream = NULL;
2356         struct drm_display_mode mode = *drm_mode;
2357         bool native_mode_found = false;
2358
2359         if (aconnector == NULL) {
2360                 DRM_ERROR("aconnector is NULL!\n");
2361                 goto drm_connector_null;
2362         }
2363
2364         if (dm_state == NULL) {
2365                 DRM_ERROR("dm_state is NULL!\n");
2366                 goto dm_state_null;
2367         }
2368
2369         drm_connector = &aconnector->base;
2370
2371         if (!aconnector->dc_sink) {
2372                 /*
2373                  * Create dc_sink when necessary to MST
2374                  * Don't apply fake_sink to MST
2375                  */
2376                 if (aconnector->mst_port) {
2377                         dm_dp_mst_dc_sink_create(drm_connector);
2378                         goto mst_dc_sink_create_done;
2379                 }
2380
2381                 if (create_fake_sink(aconnector))
2382                         goto stream_create_fail;
2383         }
2384
2385         stream = dc_create_stream_for_sink(aconnector->dc_sink);
2386
2387         if (stream == NULL) {
2388                 DRM_ERROR("Failed to create stream for sink!\n");
2389                 goto stream_create_fail;
2390         }
2391
2392         list_for_each_entry(preferred_mode, &aconnector->base.modes, head) {
2393                 /* Search for preferred mode */
2394                 if (preferred_mode->type & DRM_MODE_TYPE_PREFERRED) {
2395                         native_mode_found = true;
2396                         break;
2397                 }
2398         }
2399         if (!native_mode_found)
2400                 preferred_mode = list_first_entry_or_null(
2401                                 &aconnector->base.modes,
2402                                 struct drm_display_mode,
2403                                 head);
2404
2405         if (preferred_mode == NULL) {
2406                 /* This may not be an error, the use case is when we we have no
2407                  * usermode calls to reset and set mode upon hotplug. In this
2408                  * case, we call set mode ourselves to restore the previous mode
2409                  * and the modelist may not be filled in in time.
2410                  */
2411                 DRM_DEBUG_DRIVER("No preferred mode found\n");
2412         } else {
2413                 decide_crtc_timing_for_drm_display_mode(
2414                                 &mode, preferred_mode,
2415                                 dm_state->scaling != RMX_OFF);
2416         }
2417
2418         fill_stream_properties_from_drm_display_mode(stream,
2419                         &mode, &aconnector->base);
2420         update_stream_scaling_settings(&mode, dm_state, stream);
2421
2422         fill_audio_info(
2423                 &stream->audio_info,
2424                 drm_connector,
2425                 aconnector->dc_sink);
2426
2427 stream_create_fail:
2428 dm_state_null:
2429 drm_connector_null:
2430 mst_dc_sink_create_done:
2431         return stream;
2432 }
2433
2434 static void amdgpu_dm_crtc_destroy(struct drm_crtc *crtc)
2435 {
2436         drm_crtc_cleanup(crtc);
2437         kfree(crtc);
2438 }
2439
2440 static void dm_crtc_destroy_state(struct drm_crtc *crtc,
2441                                   struct drm_crtc_state *state)
2442 {
2443         struct dm_crtc_state *cur = to_dm_crtc_state(state);
2444
2445         /* TODO Destroy dc_stream objects are stream object is flattened */
2446         if (cur->stream)
2447                 dc_stream_release(cur->stream);
2448
2449
2450         __drm_atomic_helper_crtc_destroy_state(state);
2451
2452
2453         kfree(state);
2454 }
2455
2456 static void dm_crtc_reset_state(struct drm_crtc *crtc)
2457 {
2458         struct dm_crtc_state *state;
2459
2460         if (crtc->state)
2461                 dm_crtc_destroy_state(crtc, crtc->state);
2462
2463         state = kzalloc(sizeof(*state), GFP_KERNEL);
2464         if (WARN_ON(!state))
2465                 return;
2466
2467         crtc->state = &state->base;
2468         crtc->state->crtc = crtc;
2469
2470 }
2471
2472 static struct drm_crtc_state *
2473 dm_crtc_duplicate_state(struct drm_crtc *crtc)
2474 {
2475         struct dm_crtc_state *state, *cur;
2476
2477         cur = to_dm_crtc_state(crtc->state);
2478
2479         if (WARN_ON(!crtc->state))
2480                 return NULL;
2481
2482         state = kzalloc(sizeof(*state), GFP_KERNEL);
2483         if (!state)
2484                 return NULL;
2485
2486         __drm_atomic_helper_crtc_duplicate_state(crtc, &state->base);
2487
2488         if (cur->stream) {
2489                 state->stream = cur->stream;
2490                 dc_stream_retain(state->stream);
2491         }
2492
2493         /* TODO Duplicate dc_stream after objects are stream object is flattened */
2494
2495         return &state->base;
2496 }
2497
2498 /* Implemented only the options currently availible for the driver */
2499 static const struct drm_crtc_funcs amdgpu_dm_crtc_funcs = {
2500         .reset = dm_crtc_reset_state,
2501         .destroy = amdgpu_dm_crtc_destroy,
2502         .gamma_set = drm_atomic_helper_legacy_gamma_set,
2503         .set_config = drm_atomic_helper_set_config,
2504         .page_flip = drm_atomic_helper_page_flip,
2505         .atomic_duplicate_state = dm_crtc_duplicate_state,
2506         .atomic_destroy_state = dm_crtc_destroy_state,
2507 };
2508
2509 static enum drm_connector_status
2510 amdgpu_dm_connector_detect(struct drm_connector *connector, bool force)
2511 {
2512         bool connected;
2513         struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
2514
2515         /* Notes:
2516          * 1. This interface is NOT called in context of HPD irq.
2517          * 2. This interface *is called* in context of user-mode ioctl. Which
2518          * makes it a bad place for *any* MST-related activit. */
2519
2520         if (aconnector->base.force == DRM_FORCE_UNSPECIFIED &&
2521             !aconnector->fake_enable)
2522                 connected = (aconnector->dc_sink != NULL);
2523         else
2524                 connected = (aconnector->base.force == DRM_FORCE_ON);
2525
2526         return (connected ? connector_status_connected :
2527                         connector_status_disconnected);
2528 }
2529
2530 int amdgpu_dm_connector_atomic_set_property(struct drm_connector *connector,
2531                                             struct drm_connector_state *connector_state,
2532                                             struct drm_property *property,
2533                                             uint64_t val)
2534 {
2535         struct drm_device *dev = connector->dev;
2536         struct amdgpu_device *adev = dev->dev_private;
2537         struct dm_connector_state *dm_old_state =
2538                 to_dm_connector_state(connector->state);
2539         struct dm_connector_state *dm_new_state =
2540                 to_dm_connector_state(connector_state);
2541
2542         int ret = -EINVAL;
2543
2544         if (property == dev->mode_config.scaling_mode_property) {
2545                 enum amdgpu_rmx_type rmx_type;
2546
2547                 switch (val) {
2548                 case DRM_MODE_SCALE_CENTER:
2549                         rmx_type = RMX_CENTER;
2550                         break;
2551                 case DRM_MODE_SCALE_ASPECT:
2552                         rmx_type = RMX_ASPECT;
2553                         break;
2554                 case DRM_MODE_SCALE_FULLSCREEN:
2555                         rmx_type = RMX_FULL;
2556                         break;
2557                 case DRM_MODE_SCALE_NONE:
2558                 default:
2559                         rmx_type = RMX_OFF;
2560                         break;
2561                 }
2562
2563                 if (dm_old_state->scaling == rmx_type)
2564                         return 0;
2565
2566                 dm_new_state->scaling = rmx_type;
2567                 ret = 0;
2568         } else if (property == adev->mode_info.underscan_hborder_property) {
2569                 dm_new_state->underscan_hborder = val;
2570                 ret = 0;
2571         } else if (property == adev->mode_info.underscan_vborder_property) {
2572                 dm_new_state->underscan_vborder = val;
2573                 ret = 0;
2574         } else if (property == adev->mode_info.underscan_property) {
2575                 dm_new_state->underscan_enable = val;
2576                 ret = 0;
2577         }
2578
2579         return ret;
2580 }
2581
2582 int amdgpu_dm_connector_atomic_get_property(struct drm_connector *connector,
2583                                             const struct drm_connector_state *state,
2584                                             struct drm_property *property,
2585                                             uint64_t *val)
2586 {
2587         struct drm_device *dev = connector->dev;
2588         struct amdgpu_device *adev = dev->dev_private;
2589         struct dm_connector_state *dm_state =
2590                 to_dm_connector_state(state);
2591         int ret = -EINVAL;
2592
2593         if (property == dev->mode_config.scaling_mode_property) {
2594                 switch (dm_state->scaling) {
2595                 case RMX_CENTER:
2596                         *val = DRM_MODE_SCALE_CENTER;
2597                         break;
2598                 case RMX_ASPECT:
2599                         *val = DRM_MODE_SCALE_ASPECT;
2600                         break;
2601                 case RMX_FULL:
2602                         *val = DRM_MODE_SCALE_FULLSCREEN;
2603                         break;
2604                 case RMX_OFF:
2605                 default:
2606                         *val = DRM_MODE_SCALE_NONE;
2607                         break;
2608                 }
2609                 ret = 0;
2610         } else if (property == adev->mode_info.underscan_hborder_property) {
2611                 *val = dm_state->underscan_hborder;
2612                 ret = 0;
2613         } else if (property == adev->mode_info.underscan_vborder_property) {
2614                 *val = dm_state->underscan_vborder;
2615                 ret = 0;
2616         } else if (property == adev->mode_info.underscan_property) {
2617                 *val = dm_state->underscan_enable;
2618                 ret = 0;
2619         }
2620         return ret;
2621 }
2622
2623 static void amdgpu_dm_connector_destroy(struct drm_connector *connector)
2624 {
2625         struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
2626         const struct dc_link *link = aconnector->dc_link;
2627         struct amdgpu_device *adev = connector->dev->dev_private;
2628         struct amdgpu_display_manager *dm = &adev->dm;
2629 #if defined(CONFIG_BACKLIGHT_CLASS_DEVICE) ||\
2630         defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE)
2631
2632         if (link->connector_signal & (SIGNAL_TYPE_EDP | SIGNAL_TYPE_LVDS)) {
2633                 amdgpu_dm_register_backlight_device(dm);
2634
2635                 if (dm->backlight_dev) {
2636                         backlight_device_unregister(dm->backlight_dev);
2637                         dm->backlight_dev = NULL;
2638                 }
2639
2640         }
2641 #endif
2642         drm_connector_unregister(connector);
2643         drm_connector_cleanup(connector);
2644         kfree(connector);
2645 }
2646
2647 void amdgpu_dm_connector_funcs_reset(struct drm_connector *connector)
2648 {
2649         struct dm_connector_state *state =
2650                 to_dm_connector_state(connector->state);
2651
2652         kfree(state);
2653
2654         state = kzalloc(sizeof(*state), GFP_KERNEL);
2655
2656         if (state) {
2657                 state->scaling = RMX_OFF;
2658                 state->underscan_enable = false;
2659                 state->underscan_hborder = 0;
2660                 state->underscan_vborder = 0;
2661
2662                 connector->state = &state->base;
2663                 connector->state->connector = connector;
2664         }
2665 }
2666
2667 struct drm_connector_state *
2668 amdgpu_dm_connector_atomic_duplicate_state(struct drm_connector *connector)
2669 {
2670         struct dm_connector_state *state =
2671                 to_dm_connector_state(connector->state);
2672
2673         struct dm_connector_state *new_state =
2674                         kmemdup(state, sizeof(*state), GFP_KERNEL);
2675
2676         if (new_state) {
2677                 __drm_atomic_helper_connector_duplicate_state(connector,
2678                                                               &new_state->base);
2679                 return &new_state->base;
2680         }
2681
2682         return NULL;
2683 }
2684
2685 static const struct drm_connector_funcs amdgpu_dm_connector_funcs = {
2686         .reset = amdgpu_dm_connector_funcs_reset,
2687         .detect = amdgpu_dm_connector_detect,
2688         .fill_modes = drm_helper_probe_single_connector_modes,
2689         .destroy = amdgpu_dm_connector_destroy,
2690         .atomic_duplicate_state = amdgpu_dm_connector_atomic_duplicate_state,
2691         .atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
2692         .atomic_set_property = amdgpu_dm_connector_atomic_set_property,
2693         .atomic_get_property = amdgpu_dm_connector_atomic_get_property
2694 };
2695
2696 static struct drm_encoder *best_encoder(struct drm_connector *connector)
2697 {
2698         int enc_id = connector->encoder_ids[0];
2699         struct drm_mode_object *obj;
2700         struct drm_encoder *encoder;
2701
2702         DRM_DEBUG_DRIVER("Finding the best encoder\n");
2703
2704         /* pick the encoder ids */
2705         if (enc_id) {
2706                 obj = drm_mode_object_find(connector->dev, NULL, enc_id, DRM_MODE_OBJECT_ENCODER);
2707                 if (!obj) {
2708                         DRM_ERROR("Couldn't find a matching encoder for our connector\n");
2709                         return NULL;
2710                 }
2711                 encoder = obj_to_encoder(obj);
2712                 return encoder;
2713         }
2714         DRM_ERROR("No encoder id\n");
2715         return NULL;
2716 }
2717
2718 static int get_modes(struct drm_connector *connector)
2719 {
2720         return amdgpu_dm_connector_get_modes(connector);
2721 }
2722
2723 static void create_eml_sink(struct amdgpu_dm_connector *aconnector)
2724 {
2725         struct dc_sink_init_data init_params = {
2726                         .link = aconnector->dc_link,
2727                         .sink_signal = SIGNAL_TYPE_VIRTUAL
2728         };
2729         struct edid *edid;
2730
2731         if (!aconnector->base.edid_blob_ptr) {
2732                 DRM_ERROR("No EDID firmware found on connector: %s ,forcing to OFF!\n",
2733                                 aconnector->base.name);
2734
2735                 aconnector->base.force = DRM_FORCE_OFF;
2736                 aconnector->base.override_edid = false;
2737                 return;
2738         }
2739
2740         edid = (struct edid *) aconnector->base.edid_blob_ptr->data;
2741
2742         aconnector->edid = edid;
2743
2744         aconnector->dc_em_sink = dc_link_add_remote_sink(
2745                 aconnector->dc_link,
2746                 (uint8_t *)edid,
2747                 (edid->extensions + 1) * EDID_LENGTH,
2748                 &init_params);
2749
2750         if (aconnector->base.force == DRM_FORCE_ON)
2751                 aconnector->dc_sink = aconnector->dc_link->local_sink ?
2752                 aconnector->dc_link->local_sink :
2753                 aconnector->dc_em_sink;
2754 }
2755
2756 static void handle_edid_mgmt(struct amdgpu_dm_connector *aconnector)
2757 {
2758         struct dc_link *link = (struct dc_link *)aconnector->dc_link;
2759
2760         /* In case of headless boot with force on for DP managed connector
2761          * Those settings have to be != 0 to get initial modeset
2762          */
2763         if (link->connector_signal == SIGNAL_TYPE_DISPLAY_PORT) {
2764                 link->verified_link_cap.lane_count = LANE_COUNT_FOUR;
2765                 link->verified_link_cap.link_rate = LINK_RATE_HIGH2;
2766         }
2767
2768
2769         aconnector->base.override_edid = true;
2770         create_eml_sink(aconnector);
2771 }
2772
2773 int amdgpu_dm_connector_mode_valid(struct drm_connector *connector,
2774                                    struct drm_display_mode *mode)
2775 {
2776         int result = MODE_ERROR;
2777         struct dc_sink *dc_sink;
2778         struct amdgpu_device *adev = connector->dev->dev_private;
2779         /* TODO: Unhardcode stream count */
2780         struct dc_stream_state *stream;
2781         struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
2782
2783         if ((mode->flags & DRM_MODE_FLAG_INTERLACE) ||
2784                         (mode->flags & DRM_MODE_FLAG_DBLSCAN))
2785                 return result;
2786
2787         /* Only run this the first time mode_valid is called to initilialize
2788          * EDID mgmt
2789          */
2790         if (aconnector->base.force != DRM_FORCE_UNSPECIFIED &&
2791                 !aconnector->dc_em_sink)
2792                 handle_edid_mgmt(aconnector);
2793
2794         dc_sink = to_amdgpu_dm_connector(connector)->dc_sink;
2795
2796         if (dc_sink == NULL) {
2797                 DRM_ERROR("dc_sink is NULL!\n");
2798                 goto fail;
2799         }
2800
2801         stream = dc_create_stream_for_sink(dc_sink);
2802         if (stream == NULL) {
2803                 DRM_ERROR("Failed to create stream for sink!\n");
2804                 goto fail;
2805         }
2806
2807         drm_mode_set_crtcinfo(mode, 0);
2808         fill_stream_properties_from_drm_display_mode(stream, mode, connector);
2809
2810         stream->src.width = mode->hdisplay;
2811         stream->src.height = mode->vdisplay;
2812         stream->dst = stream->src;
2813
2814         if (dc_validate_stream(adev->dm.dc, stream) == DC_OK)
2815                 result = MODE_OK;
2816
2817         dc_stream_release(stream);
2818
2819 fail:
2820         /* TODO: error handling*/
2821         return result;
2822 }
2823
2824 static const struct drm_connector_helper_funcs
2825 amdgpu_dm_connector_helper_funcs = {
2826         /*
2827          * If hotplug a second bigger display in FB Con mode, bigger resolution
2828          * modes will be filtered by drm_mode_validate_size(), and those modes
2829          * is missing after user start lightdm. So we need to renew modes list.
2830          * in get_modes call back, not just return the modes count
2831          */
2832         .get_modes = get_modes,
2833         .mode_valid = amdgpu_dm_connector_mode_valid,
2834         .best_encoder = best_encoder
2835 };
2836
2837 static void dm_crtc_helper_disable(struct drm_crtc *crtc)
2838 {
2839 }
2840
2841 static int dm_crtc_helper_atomic_check(struct drm_crtc *crtc,
2842                                        struct drm_crtc_state *state)
2843 {
2844         struct amdgpu_device *adev = crtc->dev->dev_private;
2845         struct dc *dc = adev->dm.dc;
2846         struct dm_crtc_state *dm_crtc_state = to_dm_crtc_state(state);
2847         int ret = -EINVAL;
2848
2849         if (unlikely(!dm_crtc_state->stream &&
2850                      modeset_required(state, NULL, dm_crtc_state->stream))) {
2851                 WARN_ON(1);
2852                 return ret;
2853         }
2854
2855         /* In some use cases, like reset, no stream  is attached */
2856         if (!dm_crtc_state->stream)
2857                 return 0;
2858
2859         if (dc_validate_stream(dc, dm_crtc_state->stream) == DC_OK)
2860                 return 0;
2861
2862         return ret;
2863 }
2864
2865 static bool dm_crtc_helper_mode_fixup(struct drm_crtc *crtc,
2866                                       const struct drm_display_mode *mode,
2867                                       struct drm_display_mode *adjusted_mode)
2868 {
2869         return true;
2870 }
2871
2872 static const struct drm_crtc_helper_funcs amdgpu_dm_crtc_helper_funcs = {
2873         .disable = dm_crtc_helper_disable,
2874         .atomic_check = dm_crtc_helper_atomic_check,
2875         .mode_fixup = dm_crtc_helper_mode_fixup
2876 };
2877
2878 static void dm_encoder_helper_disable(struct drm_encoder *encoder)
2879 {
2880
2881 }
2882
2883 static int dm_encoder_helper_atomic_check(struct drm_encoder *encoder,
2884                                           struct drm_crtc_state *crtc_state,
2885                                           struct drm_connector_state *conn_state)
2886 {
2887         return 0;
2888 }
2889
2890 const struct drm_encoder_helper_funcs amdgpu_dm_encoder_helper_funcs = {
2891         .disable = dm_encoder_helper_disable,
2892         .atomic_check = dm_encoder_helper_atomic_check
2893 };
2894
2895 static void dm_drm_plane_reset(struct drm_plane *plane)
2896 {
2897         struct dm_plane_state *amdgpu_state = NULL;
2898
2899         if (plane->state)
2900                 plane->funcs->atomic_destroy_state(plane, plane->state);
2901
2902         amdgpu_state = kzalloc(sizeof(*amdgpu_state), GFP_KERNEL);
2903         WARN_ON(amdgpu_state == NULL);
2904         
2905         if (amdgpu_state) {
2906                 plane->state = &amdgpu_state->base;
2907                 plane->state->plane = plane;
2908                 plane->state->rotation = DRM_MODE_ROTATE_0;
2909         }
2910 }
2911
2912 static struct drm_plane_state *
2913 dm_drm_plane_duplicate_state(struct drm_plane *plane)
2914 {
2915         struct dm_plane_state *dm_plane_state, *old_dm_plane_state;
2916
2917         old_dm_plane_state = to_dm_plane_state(plane->state);
2918         dm_plane_state = kzalloc(sizeof(*dm_plane_state), GFP_KERNEL);
2919         if (!dm_plane_state)
2920                 return NULL;
2921
2922         __drm_atomic_helper_plane_duplicate_state(plane, &dm_plane_state->base);
2923
2924         if (old_dm_plane_state->dc_state) {
2925                 dm_plane_state->dc_state = old_dm_plane_state->dc_state;
2926                 dc_plane_state_retain(dm_plane_state->dc_state);
2927         }
2928
2929         return &dm_plane_state->base;
2930 }
2931
2932 void dm_drm_plane_destroy_state(struct drm_plane *plane,
2933                                 struct drm_plane_state *state)
2934 {
2935         struct dm_plane_state *dm_plane_state = to_dm_plane_state(state);
2936
2937         if (dm_plane_state->dc_state)
2938                 dc_plane_state_release(dm_plane_state->dc_state);
2939
2940         drm_atomic_helper_plane_destroy_state(plane, state);
2941 }
2942
2943 static const struct drm_plane_funcs dm_plane_funcs = {
2944         .update_plane   = drm_atomic_helper_update_plane,
2945         .disable_plane  = drm_atomic_helper_disable_plane,
2946         .destroy        = drm_plane_cleanup,
2947         .reset = dm_drm_plane_reset,
2948         .atomic_duplicate_state = dm_drm_plane_duplicate_state,
2949         .atomic_destroy_state = dm_drm_plane_destroy_state,
2950 };
2951
2952 static int dm_plane_helper_prepare_fb(struct drm_plane *plane,
2953                                       struct drm_plane_state *new_state)
2954 {
2955         struct amdgpu_framebuffer *afb;
2956         struct drm_gem_object *obj;
2957         struct amdgpu_bo *rbo;
2958         uint64_t chroma_addr = 0;
2959         int r;
2960         struct dm_plane_state *dm_plane_state_new, *dm_plane_state_old;
2961         unsigned int awidth;
2962
2963         dm_plane_state_old = to_dm_plane_state(plane->state);
2964         dm_plane_state_new = to_dm_plane_state(new_state);
2965
2966         if (!new_state->fb) {
2967                 DRM_DEBUG_DRIVER("No FB bound\n");
2968                 return 0;
2969         }
2970
2971         afb = to_amdgpu_framebuffer(new_state->fb);
2972
2973         obj = afb->obj;
2974         rbo = gem_to_amdgpu_bo(obj);
2975         r = amdgpu_bo_reserve(rbo, false);
2976         if (unlikely(r != 0))
2977                 return r;
2978
2979         r = amdgpu_bo_pin(rbo, AMDGPU_GEM_DOMAIN_VRAM, &afb->address);
2980
2981
2982         amdgpu_bo_unreserve(rbo);
2983
2984         if (unlikely(r != 0)) {
2985                 if (r != -ERESTARTSYS)
2986                         DRM_ERROR("Failed to pin framebuffer with error %d\n", r);
2987                 return r;
2988         }
2989
2990         amdgpu_bo_ref(rbo);
2991
2992         if (dm_plane_state_new->dc_state &&
2993                         dm_plane_state_old->dc_state != dm_plane_state_new->dc_state) {
2994                 struct dc_plane_state *plane_state = dm_plane_state_new->dc_state;
2995
2996                 if (plane_state->format < SURFACE_PIXEL_FORMAT_VIDEO_BEGIN) {
2997                         plane_state->address.grph.addr.low_part = lower_32_bits(afb->address);
2998                         plane_state->address.grph.addr.high_part = upper_32_bits(afb->address);
2999                 } else {
3000                         awidth = ALIGN(new_state->fb->width, 64);
3001                         plane_state->address.type = PLN_ADDR_TYPE_VIDEO_PROGRESSIVE;
3002                         plane_state->address.video_progressive.luma_addr.low_part
3003                                                         = lower_32_bits(afb->address);
3004                         plane_state->address.video_progressive.luma_addr.high_part
3005                                                         = upper_32_bits(afb->address);
3006                         chroma_addr = afb->address + (u64)awidth * new_state->fb->height;
3007                         plane_state->address.video_progressive.chroma_addr.low_part
3008                                                         = lower_32_bits(chroma_addr);
3009                         plane_state->address.video_progressive.chroma_addr.high_part
3010                                                         = upper_32_bits(chroma_addr);
3011                 }
3012         }
3013
3014         /* It's a hack for s3 since in 4.9 kernel filter out cursor buffer
3015          * prepare and cleanup in drm_atomic_helper_prepare_planes
3016          * and drm_atomic_helper_cleanup_planes because fb doens't in s3.
3017          * IN 4.10 kernel this code should be removed and amdgpu_device_suspend
3018          * code touching fram buffers should be avoided for DC.
3019          */
3020         if (plane->type == DRM_PLANE_TYPE_CURSOR) {
3021                 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(new_state->crtc);
3022
3023                 acrtc->cursor_bo = obj;
3024         }
3025         return 0;
3026 }
3027
3028 static void dm_plane_helper_cleanup_fb(struct drm_plane *plane,
3029                                        struct drm_plane_state *old_state)
3030 {
3031         struct amdgpu_bo *rbo;
3032         struct amdgpu_framebuffer *afb;
3033         int r;
3034
3035         if (!old_state->fb)
3036                 return;
3037
3038         afb = to_amdgpu_framebuffer(old_state->fb);
3039         rbo = gem_to_amdgpu_bo(afb->obj);
3040         r = amdgpu_bo_reserve(rbo, false);
3041         if (unlikely(r)) {
3042                 DRM_ERROR("failed to reserve rbo before unpin\n");
3043                 return;
3044         }
3045
3046         amdgpu_bo_unpin(rbo);
3047         amdgpu_bo_unreserve(rbo);
3048         amdgpu_bo_unref(&rbo);
3049 }
3050
3051 static int dm_plane_atomic_check(struct drm_plane *plane,
3052                                  struct drm_plane_state *state)
3053 {
3054         struct amdgpu_device *adev = plane->dev->dev_private;
3055         struct dc *dc = adev->dm.dc;
3056         struct dm_plane_state *dm_plane_state = to_dm_plane_state(state);
3057
3058         if (!dm_plane_state->dc_state)
3059                 return 0;
3060
3061         if (dc_validate_plane(dc, dm_plane_state->dc_state) == DC_OK)
3062                 return 0;
3063
3064         return -EINVAL;
3065 }
3066
3067 static const struct drm_plane_helper_funcs dm_plane_helper_funcs = {
3068         .prepare_fb = dm_plane_helper_prepare_fb,
3069         .cleanup_fb = dm_plane_helper_cleanup_fb,
3070         .atomic_check = dm_plane_atomic_check,
3071 };
3072
3073 /*
3074  * TODO: these are currently initialized to rgb formats only.
3075  * For future use cases we should either initialize them dynamically based on
3076  * plane capabilities, or initialize this array to all formats, so internal drm
3077  * check will succeed, and let DC to implement proper check
3078  */
3079 static const uint32_t rgb_formats[] = {
3080         DRM_FORMAT_RGB888,
3081         DRM_FORMAT_XRGB8888,
3082         DRM_FORMAT_ARGB8888,
3083         DRM_FORMAT_RGBA8888,
3084         DRM_FORMAT_XRGB2101010,
3085         DRM_FORMAT_XBGR2101010,
3086         DRM_FORMAT_ARGB2101010,
3087         DRM_FORMAT_ABGR2101010,
3088 };
3089
3090 static const uint32_t yuv_formats[] = {
3091         DRM_FORMAT_NV12,
3092         DRM_FORMAT_NV21,
3093 };
3094
3095 static const u32 cursor_formats[] = {
3096         DRM_FORMAT_ARGB8888
3097 };
3098
3099 static int amdgpu_dm_plane_init(struct amdgpu_display_manager *dm,
3100                                 struct amdgpu_plane *aplane,
3101                                 unsigned long possible_crtcs)
3102 {
3103         int res = -EPERM;
3104
3105         switch (aplane->base.type) {
3106         case DRM_PLANE_TYPE_PRIMARY:
3107                 aplane->base.format_default = true;
3108
3109                 res = drm_universal_plane_init(
3110                                 dm->adev->ddev,
3111                                 &aplane->base,
3112                                 possible_crtcs,
3113                                 &dm_plane_funcs,
3114                                 rgb_formats,
3115                                 ARRAY_SIZE(rgb_formats),
3116                                 NULL, aplane->base.type, NULL);
3117                 break;
3118         case DRM_PLANE_TYPE_OVERLAY:
3119                 res = drm_universal_plane_init(
3120                                 dm->adev->ddev,
3121                                 &aplane->base,
3122                                 possible_crtcs,
3123                                 &dm_plane_funcs,
3124                                 yuv_formats,
3125                                 ARRAY_SIZE(yuv_formats),
3126                                 NULL, aplane->base.type, NULL);
3127                 break;
3128         case DRM_PLANE_TYPE_CURSOR:
3129                 res = drm_universal_plane_init(
3130                                 dm->adev->ddev,
3131                                 &aplane->base,
3132                                 possible_crtcs,
3133                                 &dm_plane_funcs,
3134                                 cursor_formats,
3135                                 ARRAY_SIZE(cursor_formats),
3136                                 NULL, aplane->base.type, NULL);
3137                 break;
3138         }
3139
3140         drm_plane_helper_add(&aplane->base, &dm_plane_helper_funcs);
3141
3142         /* Create (reset) the plane state */
3143         if (aplane->base.funcs->reset)
3144                 aplane->base.funcs->reset(&aplane->base);
3145
3146
3147         return res;
3148 }
3149
3150 static int amdgpu_dm_crtc_init(struct amdgpu_display_manager *dm,
3151                                struct drm_plane *plane,
3152                                uint32_t crtc_index)
3153 {
3154         struct amdgpu_crtc *acrtc = NULL;
3155         struct amdgpu_plane *cursor_plane;
3156
3157         int res = -ENOMEM;
3158
3159         cursor_plane = kzalloc(sizeof(*cursor_plane), GFP_KERNEL);
3160         if (!cursor_plane)
3161                 goto fail;
3162
3163         cursor_plane->base.type = DRM_PLANE_TYPE_CURSOR;
3164         res = amdgpu_dm_plane_init(dm, cursor_plane, 0);
3165
3166         acrtc = kzalloc(sizeof(struct amdgpu_crtc), GFP_KERNEL);
3167         if (!acrtc)
3168                 goto fail;
3169
3170         res = drm_crtc_init_with_planes(
3171                         dm->ddev,
3172                         &acrtc->base,
3173                         plane,
3174                         &cursor_plane->base,
3175                         &amdgpu_dm_crtc_funcs, NULL);
3176
3177         if (res)
3178                 goto fail;
3179
3180         drm_crtc_helper_add(&acrtc->base, &amdgpu_dm_crtc_helper_funcs);
3181
3182         /* Create (reset) the plane state */
3183         if (acrtc->base.funcs->reset)
3184                 acrtc->base.funcs->reset(&acrtc->base);
3185
3186         acrtc->max_cursor_width = dm->adev->dm.dc->caps.max_cursor_size;
3187         acrtc->max_cursor_height = dm->adev->dm.dc->caps.max_cursor_size;
3188
3189         acrtc->crtc_id = crtc_index;
3190         acrtc->base.enabled = false;
3191
3192         dm->adev->mode_info.crtcs[crtc_index] = acrtc;
3193         drm_mode_crtc_set_gamma_size(&acrtc->base, 256);
3194
3195         return 0;
3196
3197 fail:
3198         kfree(acrtc);
3199         kfree(cursor_plane);
3200         return res;
3201 }
3202
3203
3204 static int to_drm_connector_type(enum signal_type st)
3205 {
3206         switch (st) {
3207         case SIGNAL_TYPE_HDMI_TYPE_A:
3208                 return DRM_MODE_CONNECTOR_HDMIA;
3209         case SIGNAL_TYPE_EDP:
3210                 return DRM_MODE_CONNECTOR_eDP;
3211         case SIGNAL_TYPE_RGB:
3212                 return DRM_MODE_CONNECTOR_VGA;
3213         case SIGNAL_TYPE_DISPLAY_PORT:
3214         case SIGNAL_TYPE_DISPLAY_PORT_MST:
3215                 return DRM_MODE_CONNECTOR_DisplayPort;
3216         case SIGNAL_TYPE_DVI_DUAL_LINK:
3217         case SIGNAL_TYPE_DVI_SINGLE_LINK:
3218                 return DRM_MODE_CONNECTOR_DVID;
3219         case SIGNAL_TYPE_VIRTUAL:
3220                 return DRM_MODE_CONNECTOR_VIRTUAL;
3221
3222         default:
3223                 return DRM_MODE_CONNECTOR_Unknown;
3224         }
3225 }
3226
3227 static void amdgpu_dm_get_native_mode(struct drm_connector *connector)
3228 {
3229         const struct drm_connector_helper_funcs *helper =
3230                 connector->helper_private;
3231         struct drm_encoder *encoder;
3232         struct amdgpu_encoder *amdgpu_encoder;
3233
3234         encoder = helper->best_encoder(connector);
3235
3236         if (encoder == NULL)
3237                 return;
3238
3239         amdgpu_encoder = to_amdgpu_encoder(encoder);
3240
3241         amdgpu_encoder->native_mode.clock = 0;
3242
3243         if (!list_empty(&connector->probed_modes)) {
3244                 struct drm_display_mode *preferred_mode = NULL;
3245
3246                 list_for_each_entry(preferred_mode,
3247                                     &connector->probed_modes,
3248                                     head) {
3249                         if (preferred_mode->type & DRM_MODE_TYPE_PREFERRED)
3250                                 amdgpu_encoder->native_mode = *preferred_mode;
3251
3252                         break;
3253                 }
3254
3255         }
3256 }
3257
3258 static struct drm_display_mode *
3259 amdgpu_dm_create_common_mode(struct drm_encoder *encoder,
3260                              char *name,
3261                              int hdisplay, int vdisplay)
3262 {
3263         struct drm_device *dev = encoder->dev;
3264         struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
3265         struct drm_display_mode *mode = NULL;
3266         struct drm_display_mode *native_mode = &amdgpu_encoder->native_mode;
3267
3268         mode = drm_mode_duplicate(dev, native_mode);
3269
3270         if (mode == NULL)
3271                 return NULL;
3272
3273         mode->hdisplay = hdisplay;
3274         mode->vdisplay = vdisplay;
3275         mode->type &= ~DRM_MODE_TYPE_PREFERRED;
3276         strncpy(mode->name, name, DRM_DISPLAY_MODE_LEN);
3277
3278         return mode;
3279
3280 }
3281
3282 static void amdgpu_dm_connector_add_common_modes(struct drm_encoder *encoder,
3283                                                  struct drm_connector *connector)
3284 {
3285         struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
3286         struct drm_display_mode *mode = NULL;
3287         struct drm_display_mode *native_mode = &amdgpu_encoder->native_mode;
3288         struct amdgpu_dm_connector *amdgpu_dm_connector =
3289                                 to_amdgpu_dm_connector(connector);
3290         int i;
3291         int n;
3292         struct mode_size {
3293                 char name[DRM_DISPLAY_MODE_LEN];
3294                 int w;
3295                 int h;
3296         } common_modes[] = {
3297                 {  "640x480",  640,  480},
3298                 {  "800x600",  800,  600},
3299                 { "1024x768", 1024,  768},
3300                 { "1280x720", 1280,  720},
3301                 { "1280x800", 1280,  800},
3302                 {"1280x1024", 1280, 1024},
3303                 { "1440x900", 1440,  900},
3304                 {"1680x1050", 1680, 1050},
3305                 {"1600x1200", 1600, 1200},
3306                 {"1920x1080", 1920, 1080},
3307                 {"1920x1200", 1920, 1200}
3308         };
3309
3310         n = ARRAY_SIZE(common_modes);
3311
3312         for (i = 0; i < n; i++) {
3313                 struct drm_display_mode *curmode = NULL;
3314                 bool mode_existed = false;
3315
3316                 if (common_modes[i].w > native_mode->hdisplay ||
3317                     common_modes[i].h > native_mode->vdisplay ||
3318                    (common_modes[i].w == native_mode->hdisplay &&
3319                     common_modes[i].h == native_mode->vdisplay))
3320                         continue;
3321
3322                 list_for_each_entry(curmode, &connector->probed_modes, head) {
3323                         if (common_modes[i].w == curmode->hdisplay &&
3324                             common_modes[i].h == curmode->vdisplay) {
3325                                 mode_existed = true;
3326                                 break;
3327                         }
3328                 }
3329
3330                 if (mode_existed)
3331                         continue;
3332
3333                 mode = amdgpu_dm_create_common_mode(encoder,
3334                                 common_modes[i].name, common_modes[i].w,
3335                                 common_modes[i].h);
3336                 drm_mode_probed_add(connector, mode);
3337                 amdgpu_dm_connector->num_modes++;
3338         }
3339 }
3340
3341 static void amdgpu_dm_connector_ddc_get_modes(struct drm_connector *connector,
3342                                               struct edid *edid)
3343 {
3344         struct amdgpu_dm_connector *amdgpu_dm_connector =
3345                         to_amdgpu_dm_connector(connector);
3346
3347         if (edid) {
3348                 /* empty probed_modes */
3349                 INIT_LIST_HEAD(&connector->probed_modes);
3350                 amdgpu_dm_connector->num_modes =
3351                                 drm_add_edid_modes(connector, edid);
3352
3353                 amdgpu_dm_get_native_mode(connector);
3354         } else {
3355                 amdgpu_dm_connector->num_modes = 0;
3356         }
3357 }
3358
3359 static int amdgpu_dm_connector_get_modes(struct drm_connector *connector)
3360 {
3361         const struct drm_connector_helper_funcs *helper =
3362                         connector->helper_private;
3363         struct amdgpu_dm_connector *amdgpu_dm_connector =
3364                         to_amdgpu_dm_connector(connector);
3365         struct drm_encoder *encoder;
3366         struct edid *edid = amdgpu_dm_connector->edid;
3367
3368         encoder = helper->best_encoder(connector);
3369
3370         amdgpu_dm_connector_ddc_get_modes(connector, edid);
3371         amdgpu_dm_connector_add_common_modes(encoder, connector);
3372         return amdgpu_dm_connector->num_modes;
3373 }
3374
3375 void amdgpu_dm_connector_init_helper(struct amdgpu_display_manager *dm,
3376                                      struct amdgpu_dm_connector *aconnector,
3377                                      int connector_type,
3378                                      struct dc_link *link,
3379                                      int link_index)
3380 {
3381         struct amdgpu_device *adev = dm->ddev->dev_private;
3382
3383         aconnector->connector_id = link_index;
3384         aconnector->dc_link = link;
3385         aconnector->base.interlace_allowed = false;
3386         aconnector->base.doublescan_allowed = false;
3387         aconnector->base.stereo_allowed = false;
3388         aconnector->base.dpms = DRM_MODE_DPMS_OFF;
3389         aconnector->hpd.hpd = AMDGPU_HPD_NONE; /* not used */
3390
3391         mutex_init(&aconnector->hpd_lock);
3392
3393         /* configure support HPD hot plug connector_>polled default value is 0
3394          * which means HPD hot plug not supported
3395          */
3396         switch (connector_type) {
3397         case DRM_MODE_CONNECTOR_HDMIA:
3398                 aconnector->base.polled = DRM_CONNECTOR_POLL_HPD;
3399                 break;
3400         case DRM_MODE_CONNECTOR_DisplayPort:
3401                 aconnector->base.polled = DRM_CONNECTOR_POLL_HPD;
3402                 break;
3403         case DRM_MODE_CONNECTOR_DVID:
3404                 aconnector->base.polled = DRM_CONNECTOR_POLL_HPD;
3405                 break;
3406         default:
3407                 break;
3408         }
3409
3410         drm_object_attach_property(&aconnector->base.base,
3411                                 dm->ddev->mode_config.scaling_mode_property,
3412                                 DRM_MODE_SCALE_NONE);
3413
3414         drm_object_attach_property(&aconnector->base.base,
3415                                 adev->mode_info.underscan_property,
3416                                 UNDERSCAN_OFF);
3417         drm_object_attach_property(&aconnector->base.base,
3418                                 adev->mode_info.underscan_hborder_property,
3419                                 0);
3420         drm_object_attach_property(&aconnector->base.base,
3421                                 adev->mode_info.underscan_vborder_property,
3422                                 0);
3423
3424 }
3425
3426 static int amdgpu_dm_i2c_xfer(struct i2c_adapter *i2c_adap,
3427                               struct i2c_msg *msgs, int num)
3428 {
3429         struct amdgpu_i2c_adapter *i2c = i2c_get_adapdata(i2c_adap);
3430         struct ddc_service *ddc_service = i2c->ddc_service;
3431         struct i2c_command cmd;
3432         int i;
3433         int result = -EIO;
3434
3435         cmd.payloads = kcalloc(num, sizeof(struct i2c_payload), GFP_KERNEL);
3436
3437         if (!cmd.payloads)
3438                 return result;
3439
3440         cmd.number_of_payloads = num;
3441         cmd.engine = I2C_COMMAND_ENGINE_DEFAULT;
3442         cmd.speed = 100;
3443
3444         for (i = 0; i < num; i++) {
3445                 cmd.payloads[i].write = !(msgs[i].flags & I2C_M_RD);
3446                 cmd.payloads[i].address = msgs[i].addr;
3447                 cmd.payloads[i].length = msgs[i].len;
3448                 cmd.payloads[i].data = msgs[i].buf;
3449         }
3450
3451         if (dal_i2caux_submit_i2c_command(
3452                         ddc_service->ctx->i2caux,
3453                         ddc_service->ddc_pin,
3454                         &cmd))
3455                 result = num;
3456
3457         kfree(cmd.payloads);
3458         return result;
3459 }
3460
3461 static u32 amdgpu_dm_i2c_func(struct i2c_adapter *adap)
3462 {
3463         return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL;
3464 }
3465
3466 static const struct i2c_algorithm amdgpu_dm_i2c_algo = {
3467         .master_xfer = amdgpu_dm_i2c_xfer,
3468         .functionality = amdgpu_dm_i2c_func,
3469 };
3470
3471 static struct amdgpu_i2c_adapter *
3472 create_i2c(struct ddc_service *ddc_service,
3473            int link_index,
3474            int *res)
3475 {
3476         struct amdgpu_device *adev = ddc_service->ctx->driver_context;
3477         struct amdgpu_i2c_adapter *i2c;
3478
3479         i2c = kzalloc(sizeof(struct amdgpu_i2c_adapter), GFP_KERNEL);
3480         if (!i2c)
3481                 return NULL;
3482         i2c->base.owner = THIS_MODULE;
3483         i2c->base.class = I2C_CLASS_DDC;
3484         i2c->base.dev.parent = &adev->pdev->dev;
3485         i2c->base.algo = &amdgpu_dm_i2c_algo;
3486         snprintf(i2c->base.name, sizeof(i2c->base.name), "AMDGPU DM i2c hw bus %d", link_index);
3487         i2c_set_adapdata(&i2c->base, i2c);
3488         i2c->ddc_service = ddc_service;
3489
3490         return i2c;
3491 }
3492
3493 /* Note: this function assumes that dc_link_detect() was called for the
3494  * dc_link which will be represented by this aconnector.
3495  */
3496 static int amdgpu_dm_connector_init(struct amdgpu_display_manager *dm,
3497                                     struct amdgpu_dm_connector *aconnector,
3498                                     uint32_t link_index,
3499                                     struct amdgpu_encoder *aencoder)
3500 {
3501         int res = 0;
3502         int connector_type;
3503         struct dc *dc = dm->dc;
3504         struct dc_link *link = dc_get_link_at_index(dc, link_index);
3505         struct amdgpu_i2c_adapter *i2c;
3506
3507         link->priv = aconnector;
3508
3509         DRM_DEBUG_DRIVER("%s()\n", __func__);
3510
3511         i2c = create_i2c(link->ddc, link->link_index, &res);
3512         if (!i2c) {
3513                 DRM_ERROR("Failed to create i2c adapter data\n");
3514                 return -ENOMEM;
3515         }
3516
3517         aconnector->i2c = i2c;
3518         res = i2c_add_adapter(&i2c->base);
3519
3520         if (res) {
3521                 DRM_ERROR("Failed to register hw i2c %d\n", link->link_index);
3522                 goto out_free;
3523         }
3524
3525         connector_type = to_drm_connector_type(link->connector_signal);
3526
3527         res = drm_connector_init(
3528                         dm->ddev,
3529                         &aconnector->base,
3530                         &amdgpu_dm_connector_funcs,
3531                         connector_type);
3532
3533         if (res) {
3534                 DRM_ERROR("connector_init failed\n");
3535                 aconnector->connector_id = -1;
3536                 goto out_free;
3537         }
3538
3539         drm_connector_helper_add(
3540                         &aconnector->base,
3541                         &amdgpu_dm_connector_helper_funcs);
3542
3543         if (aconnector->base.funcs->reset)
3544                 aconnector->base.funcs->reset(&aconnector->base);
3545
3546         amdgpu_dm_connector_init_helper(
3547                 dm,
3548                 aconnector,
3549                 connector_type,
3550                 link,
3551                 link_index);
3552
3553         drm_mode_connector_attach_encoder(
3554                 &aconnector->base, &aencoder->base);
3555
3556         drm_connector_register(&aconnector->base);
3557
3558         if (connector_type == DRM_MODE_CONNECTOR_DisplayPort
3559                 || connector_type == DRM_MODE_CONNECTOR_eDP)
3560                 amdgpu_dm_initialize_dp_connector(dm, aconnector);
3561
3562 #if defined(CONFIG_BACKLIGHT_CLASS_DEVICE) ||\
3563         defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE)
3564
3565         /* NOTE: this currently will create backlight device even if a panel
3566          * is not connected to the eDP/LVDS connector.
3567          *
3568          * This is less than ideal but we don't have sink information at this
3569          * stage since detection happens after. We can't do detection earlier
3570          * since MST detection needs connectors to be created first.
3571          */
3572         if (link->connector_signal & (SIGNAL_TYPE_EDP | SIGNAL_TYPE_LVDS)) {
3573                 /* Event if registration failed, we should continue with
3574                  * DM initialization because not having a backlight control
3575                  * is better then a black screen.
3576                  */
3577                 amdgpu_dm_register_backlight_device(dm);
3578
3579                 if (dm->backlight_dev)
3580                         dm->backlight_link = link;
3581         }
3582 #endif
3583
3584 out_free:
3585         if (res) {
3586                 kfree(i2c);
3587                 aconnector->i2c = NULL;
3588         }
3589         return res;
3590 }
3591
3592 int amdgpu_dm_get_encoder_crtc_mask(struct amdgpu_device *adev)
3593 {
3594         switch (adev->mode_info.num_crtc) {
3595         case 1:
3596                 return 0x1;
3597         case 2:
3598                 return 0x3;
3599         case 3:
3600                 return 0x7;
3601         case 4:
3602                 return 0xf;
3603         case 5:
3604                 return 0x1f;
3605         case 6:
3606         default:
3607                 return 0x3f;
3608         }
3609 }
3610
3611 static int amdgpu_dm_encoder_init(struct drm_device *dev,
3612                                   struct amdgpu_encoder *aencoder,
3613                                   uint32_t link_index)
3614 {
3615         struct amdgpu_device *adev = dev->dev_private;
3616
3617         int res = drm_encoder_init(dev,
3618                                    &aencoder->base,
3619                                    &amdgpu_dm_encoder_funcs,
3620                                    DRM_MODE_ENCODER_TMDS,
3621                                    NULL);
3622
3623         aencoder->base.possible_crtcs = amdgpu_dm_get_encoder_crtc_mask(adev);
3624
3625         if (!res)
3626                 aencoder->encoder_id = link_index;
3627         else
3628                 aencoder->encoder_id = -1;
3629
3630         drm_encoder_helper_add(&aencoder->base, &amdgpu_dm_encoder_helper_funcs);
3631
3632         return res;
3633 }
3634
3635 static void manage_dm_interrupts(struct amdgpu_device *adev,
3636                                  struct amdgpu_crtc *acrtc,
3637                                  bool enable)
3638 {
3639         /*
3640          * this is not correct translation but will work as soon as VBLANK
3641          * constant is the same as PFLIP
3642          */
3643         int irq_type =
3644                 amdgpu_crtc_idx_to_irq_type(
3645                         adev,
3646                         acrtc->crtc_id);
3647
3648         if (enable) {
3649                 drm_crtc_vblank_on(&acrtc->base);
3650                 amdgpu_irq_get(
3651                         adev,
3652                         &adev->pageflip_irq,
3653                         irq_type);
3654         } else {
3655
3656                 amdgpu_irq_put(
3657                         adev,
3658                         &adev->pageflip_irq,
3659                         irq_type);
3660                 drm_crtc_vblank_off(&acrtc->base);
3661         }
3662 }
3663
3664 static bool
3665 is_scaling_state_different(const struct dm_connector_state *dm_state,
3666                            const struct dm_connector_state *old_dm_state)
3667 {
3668         if (dm_state->scaling != old_dm_state->scaling)
3669                 return true;
3670         if (!dm_state->underscan_enable && old_dm_state->underscan_enable) {
3671                 if (old_dm_state->underscan_hborder != 0 && old_dm_state->underscan_vborder != 0)
3672                         return true;
3673         } else  if (dm_state->underscan_enable && !old_dm_state->underscan_enable) {
3674                 if (dm_state->underscan_hborder != 0 && dm_state->underscan_vborder != 0)
3675                         return true;
3676         } else if (dm_state->underscan_hborder != old_dm_state->underscan_hborder ||
3677                    dm_state->underscan_vborder != old_dm_state->underscan_vborder)
3678                 return true;
3679         return false;
3680 }
3681
3682 static void remove_stream(struct amdgpu_device *adev,
3683                           struct amdgpu_crtc *acrtc,
3684                           struct dc_stream_state *stream)
3685 {
3686         /* this is the update mode case */
3687         if (adev->dm.freesync_module)
3688                 mod_freesync_remove_stream(adev->dm.freesync_module, stream);
3689
3690         acrtc->otg_inst = -1;
3691         acrtc->enabled = false;
3692 }
3693
3694 static int get_cursor_position(struct drm_plane *plane, struct drm_crtc *crtc,
3695                                struct dc_cursor_position *position)
3696 {
3697         struct amdgpu_crtc *amdgpu_crtc = amdgpu_crtc = to_amdgpu_crtc(crtc);
3698         int x, y;
3699         int xorigin = 0, yorigin = 0;
3700
3701         if (!crtc || !plane->state->fb) {
3702                 position->enable = false;
3703                 position->x = 0;
3704                 position->y = 0;
3705                 return 0;
3706         }
3707
3708         if ((plane->state->crtc_w > amdgpu_crtc->max_cursor_width) ||
3709             (plane->state->crtc_h > amdgpu_crtc->max_cursor_height)) {
3710                 DRM_ERROR("%s: bad cursor width or height %d x %d\n",
3711                           __func__,
3712                           plane->state->crtc_w,
3713                           plane->state->crtc_h);
3714                 return -EINVAL;
3715         }
3716
3717         x = plane->state->crtc_x;
3718         y = plane->state->crtc_y;
3719         /* avivo cursor are offset into the total surface */
3720         x += crtc->primary->state->src_x >> 16;
3721         y += crtc->primary->state->src_y >> 16;
3722         if (x < 0) {
3723                 xorigin = min(-x, amdgpu_crtc->max_cursor_width - 1);
3724                 x = 0;
3725         }
3726         if (y < 0) {
3727                 yorigin = min(-y, amdgpu_crtc->max_cursor_height - 1);
3728                 y = 0;
3729         }
3730         position->enable = true;
3731         position->x = x;
3732         position->y = y;
3733         position->x_hotspot = xorigin;
3734         position->y_hotspot = yorigin;
3735
3736         return 0;
3737 }
3738
3739 static void handle_cursor_update(struct drm_plane *plane,
3740                                  struct drm_plane_state *old_plane_state)
3741 {
3742         struct amdgpu_framebuffer *afb = to_amdgpu_framebuffer(plane->state->fb);
3743         struct drm_crtc *crtc = afb ? plane->state->crtc : old_plane_state->crtc;
3744         struct dm_crtc_state *crtc_state = crtc ? to_dm_crtc_state(crtc->state) : NULL;
3745         struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
3746         uint64_t address = afb ? afb->address : 0;
3747         struct dc_cursor_position position;
3748         struct dc_cursor_attributes attributes;
3749         int ret;
3750
3751         if (!plane->state->fb && !old_plane_state->fb)
3752                 return;
3753
3754         DRM_DEBUG_DRIVER("%s: crtc_id=%d with size %d to %d\n",
3755                          __func__,
3756                          amdgpu_crtc->crtc_id,
3757                          plane->state->crtc_w,
3758                          plane->state->crtc_h);
3759
3760         ret = get_cursor_position(plane, crtc, &position);
3761         if (ret)
3762                 return;
3763
3764         if (!position.enable) {
3765                 /* turn off cursor */
3766                 if (crtc_state && crtc_state->stream)
3767                         dc_stream_set_cursor_position(crtc_state->stream,
3768                                                       &position);
3769                 return;
3770         }
3771
3772         amdgpu_crtc->cursor_width = plane->state->crtc_w;
3773         amdgpu_crtc->cursor_height = plane->state->crtc_h;
3774
3775         attributes.address.high_part = upper_32_bits(address);
3776         attributes.address.low_part  = lower_32_bits(address);
3777         attributes.width             = plane->state->crtc_w;
3778         attributes.height            = plane->state->crtc_h;
3779         attributes.color_format      = CURSOR_MODE_COLOR_PRE_MULTIPLIED_ALPHA;
3780         attributes.rotation_angle    = 0;
3781         attributes.attribute_flags.value = 0;
3782
3783         attributes.pitch = attributes.width;
3784
3785         if (crtc_state->stream) {
3786                 if (!dc_stream_set_cursor_attributes(crtc_state->stream,
3787                                                          &attributes))
3788                         DRM_ERROR("DC failed to set cursor attributes\n");
3789
3790                 if (!dc_stream_set_cursor_position(crtc_state->stream,
3791                                                    &position))
3792                         DRM_ERROR("DC failed to set cursor position\n");
3793         }
3794 }
3795
3796 static void prepare_flip_isr(struct amdgpu_crtc *acrtc)
3797 {
3798
3799         assert_spin_locked(&acrtc->base.dev->event_lock);
3800         WARN_ON(acrtc->event);
3801
3802         acrtc->event = acrtc->base.state->event;
3803
3804         /* Set the flip status */
3805         acrtc->pflip_status = AMDGPU_FLIP_SUBMITTED;
3806
3807         /* Mark this event as consumed */
3808         acrtc->base.state->event = NULL;
3809
3810         DRM_DEBUG_DRIVER("crtc:%d, pflip_stat:AMDGPU_FLIP_SUBMITTED\n",
3811                                                  acrtc->crtc_id);
3812 }
3813
3814 /*
3815  * Executes flip
3816  *
3817  * Waits on all BO's fences and for proper vblank count
3818  */
3819 static void amdgpu_dm_do_flip(struct drm_crtc *crtc,
3820                               struct drm_framebuffer *fb,
3821                               uint32_t target,
3822                               struct dc_state *state)
3823 {
3824         unsigned long flags;
3825         uint32_t target_vblank;
3826         int r, vpos, hpos;
3827         struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
3828         struct amdgpu_framebuffer *afb = to_amdgpu_framebuffer(fb);
3829         struct amdgpu_bo *abo = gem_to_amdgpu_bo(afb->obj);
3830         struct amdgpu_device *adev = crtc->dev->dev_private;
3831         bool async_flip = (crtc->state->pageflip_flags & DRM_MODE_PAGE_FLIP_ASYNC) != 0;
3832         struct dc_flip_addrs addr = { {0} };
3833         /* TODO eliminate or rename surface_update */
3834         struct dc_surface_update surface_updates[1] = { {0} };
3835         struct dm_crtc_state *acrtc_state = to_dm_crtc_state(crtc->state);
3836
3837
3838         /* Prepare wait for target vblank early - before the fence-waits */
3839         target_vblank = target - (uint32_t)drm_crtc_vblank_count(crtc) +
3840                         amdgpu_get_vblank_counter_kms(crtc->dev, acrtc->crtc_id);
3841
3842         /* TODO This might fail and hence better not used, wait
3843          * explicitly on fences instead
3844          * and in general should be called for
3845          * blocking commit to as per framework helpers
3846          */
3847         r = amdgpu_bo_reserve(abo, true);
3848         if (unlikely(r != 0)) {
3849                 DRM_ERROR("failed to reserve buffer before flip\n");
3850                 WARN_ON(1);
3851         }
3852
3853         /* Wait for all fences on this FB */
3854         WARN_ON(reservation_object_wait_timeout_rcu(abo->tbo.resv, true, false,
3855                                                                     MAX_SCHEDULE_TIMEOUT) < 0);
3856
3857         amdgpu_bo_unreserve(abo);
3858
3859         /* Wait until we're out of the vertical blank period before the one
3860          * targeted by the flip
3861          */
3862         while ((acrtc->enabled &&
3863                 (amdgpu_get_crtc_scanoutpos(adev->ddev, acrtc->crtc_id, 0,
3864                                         &vpos, &hpos, NULL, NULL,
3865                                         &crtc->hwmode)
3866                  & (DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_IN_VBLANK)) ==
3867                 (DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_IN_VBLANK) &&
3868                 (int)(target_vblank -
3869                   amdgpu_get_vblank_counter_kms(adev->ddev, acrtc->crtc_id)) > 0)) {
3870                 usleep_range(1000, 1100);
3871         }
3872
3873         /* Flip */
3874         spin_lock_irqsave(&crtc->dev->event_lock, flags);
3875         /* update crtc fb */
3876         crtc->primary->fb = fb;
3877
3878         WARN_ON(acrtc->pflip_status != AMDGPU_FLIP_NONE);
3879         WARN_ON(!acrtc_state->stream);
3880
3881         addr.address.grph.addr.low_part = lower_32_bits(afb->address);
3882         addr.address.grph.addr.high_part = upper_32_bits(afb->address);
3883         addr.flip_immediate = async_flip;
3884
3885
3886         if (acrtc->base.state->event)
3887                 prepare_flip_isr(acrtc);
3888
3889         surface_updates->surface = dc_stream_get_status(acrtc_state->stream)->plane_states[0];
3890         surface_updates->flip_addr = &addr;
3891
3892
3893         dc_commit_updates_for_stream(adev->dm.dc,
3894                                              surface_updates,
3895                                              1,
3896                                              acrtc_state->stream,
3897                                              NULL,
3898                                              &surface_updates->surface,
3899                                              state);
3900
3901         DRM_DEBUG_DRIVER("%s Flipping to hi: 0x%x, low: 0x%x \n",
3902                          __func__,
3903                          addr.address.grph.addr.high_part,
3904                          addr.address.grph.addr.low_part);
3905
3906
3907         spin_unlock_irqrestore(&crtc->dev->event_lock, flags);
3908 }
3909
3910 static void amdgpu_dm_commit_planes(struct drm_atomic_state *state,
3911                                     struct drm_device *dev,
3912                                     struct amdgpu_display_manager *dm,
3913                                     struct drm_crtc *pcrtc,
3914                                     bool *wait_for_vblank)
3915 {
3916         uint32_t i;
3917         struct drm_plane *plane;
3918         struct drm_plane_state *old_plane_state, *new_plane_state;
3919         struct dc_stream_state *dc_stream_attach;
3920         struct dc_plane_state *plane_states_constructed[MAX_SURFACES];
3921         struct amdgpu_crtc *acrtc_attach = to_amdgpu_crtc(pcrtc);
3922         struct drm_crtc_state *new_pcrtc_state =
3923                         drm_atomic_get_new_crtc_state(state, pcrtc);
3924         struct dm_crtc_state *acrtc_state = to_dm_crtc_state(new_pcrtc_state);
3925         struct dm_atomic_state *dm_state = to_dm_atomic_state(state);
3926         int planes_count = 0;
3927         unsigned long flags;
3928
3929         /* update planes when needed */
3930         for_each_oldnew_plane_in_state(state, plane, old_plane_state, new_plane_state, i) {
3931                 struct drm_crtc *crtc = new_plane_state->crtc;
3932                 struct drm_crtc_state *new_crtc_state;
3933                 struct drm_framebuffer *fb = new_plane_state->fb;
3934                 bool pflip_needed;
3935                 struct dm_plane_state *dm_new_plane_state = to_dm_plane_state(new_plane_state);
3936
3937                 if (plane->type == DRM_PLANE_TYPE_CURSOR) {
3938                         handle_cursor_update(plane, old_plane_state);
3939                         continue;
3940                 }
3941
3942                 if (!fb || !crtc || pcrtc != crtc)
3943                         continue;
3944
3945                 new_crtc_state = drm_atomic_get_new_crtc_state(state, crtc);
3946                 if (!new_crtc_state->active)
3947                         continue;
3948
3949                 pflip_needed = !state->allow_modeset;
3950
3951                 spin_lock_irqsave(&crtc->dev->event_lock, flags);
3952                 if (acrtc_attach->pflip_status != AMDGPU_FLIP_NONE) {
3953                         DRM_ERROR("%s: acrtc %d, already busy\n",
3954                                   __func__,
3955                                   acrtc_attach->crtc_id);
3956                         /* In commit tail framework this cannot happen */
3957                         WARN_ON(1);
3958                 }
3959                 spin_unlock_irqrestore(&crtc->dev->event_lock, flags);
3960
3961                 if (!pflip_needed) {
3962                         WARN_ON(!dm_new_plane_state->dc_state);
3963
3964                         plane_states_constructed[planes_count] = dm_new_plane_state->dc_state;
3965
3966                         dc_stream_attach = acrtc_state->stream;
3967                         planes_count++;
3968
3969                 } else if (new_crtc_state->planes_changed) {
3970                         /* Assume even ONE crtc with immediate flip means
3971                          * entire can't wait for VBLANK
3972                          * TODO Check if it's correct
3973                          */
3974                         *wait_for_vblank =
3975                                         new_pcrtc_state->pageflip_flags & DRM_MODE_PAGE_FLIP_ASYNC ?
3976                                 false : true;
3977
3978                         /* TODO: Needs rework for multiplane flip */
3979                         if (plane->type == DRM_PLANE_TYPE_PRIMARY)
3980                                 drm_crtc_vblank_get(crtc);
3981
3982                         amdgpu_dm_do_flip(
3983                                 crtc,
3984                                 fb,
3985                                 (uint32_t)drm_crtc_vblank_count(crtc) + *wait_for_vblank,
3986                                 dm_state->context);
3987                 }
3988
3989         }
3990
3991         if (planes_count) {
3992                 unsigned long flags;
3993
3994                 if (new_pcrtc_state->event) {
3995
3996                         drm_crtc_vblank_get(pcrtc);
3997
3998                         spin_lock_irqsave(&pcrtc->dev->event_lock, flags);
3999                         prepare_flip_isr(acrtc_attach);
4000                         spin_unlock_irqrestore(&pcrtc->dev->event_lock, flags);
4001                 }
4002
4003                 if (false == dc_commit_planes_to_stream(dm->dc,
4004                                                         plane_states_constructed,
4005                                                         planes_count,
4006                                                         dc_stream_attach,
4007                                                         dm_state->context))
4008                         dm_error("%s: Failed to attach plane!\n", __func__);
4009         } else {
4010                 /*TODO BUG Here should go disable planes on CRTC. */
4011         }
4012 }
4013
4014 /**
4015  * amdgpu_dm_crtc_copy_transient_flags - copy mirrored flags from DRM to DC
4016  * @crtc_state: the DRM CRTC state
4017  * @stream_state: the DC stream state.
4018  *
4019  * Copy the mirrored transient state flags from DRM, to DC. It is used to bring
4020  * a dc_stream_state's flags in sync with a drm_crtc_state's flags.
4021  */
4022 static void amdgpu_dm_crtc_copy_transient_flags(struct drm_crtc_state *crtc_state,
4023                                                 struct dc_stream_state *stream_state)
4024 {
4025         stream_state->mode_changed = crtc_state->mode_changed;
4026 }
4027
4028 static int amdgpu_dm_atomic_commit(struct drm_device *dev,
4029                                    struct drm_atomic_state *state,
4030                                    bool nonblock)
4031 {
4032         struct drm_crtc *crtc;
4033         struct drm_crtc_state *old_crtc_state, *new_crtc_state;
4034         struct amdgpu_device *adev = dev->dev_private;
4035         int i;
4036
4037         /*
4038          * We evade vblanks and pflips on crtc that
4039          * should be changed. We do it here to flush & disable
4040          * interrupts before drm_swap_state is called in drm_atomic_helper_commit
4041          * it will update crtc->dm_crtc_state->stream pointer which is used in
4042          * the ISRs.
4043          */
4044         for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
4045                 struct dm_crtc_state *dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
4046                 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
4047
4048                 if (drm_atomic_crtc_needs_modeset(new_crtc_state) && dm_old_crtc_state->stream)
4049                         manage_dm_interrupts(adev, acrtc, false);
4050         }
4051         /* Add check here for SoC's that support hardware cursor plane, to
4052          * unset legacy_cursor_update */
4053
4054         return drm_atomic_helper_commit(dev, state, nonblock);
4055
4056         /*TODO Handle EINTR, reenable IRQ*/
4057 }
4058
4059 static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state)
4060 {
4061         struct drm_device *dev = state->dev;
4062         struct amdgpu_device *adev = dev->dev_private;
4063         struct amdgpu_display_manager *dm = &adev->dm;
4064         struct dm_atomic_state *dm_state;
4065         uint32_t i, j;
4066         struct drm_crtc *crtc;
4067         struct drm_crtc_state *old_crtc_state, *new_crtc_state;
4068         unsigned long flags;
4069         bool wait_for_vblank = true;
4070         struct drm_connector *connector;
4071         struct drm_connector_state *old_con_state, *new_con_state;
4072         struct dm_crtc_state *dm_old_crtc_state, *dm_new_crtc_state;
4073
4074         drm_atomic_helper_update_legacy_modeset_state(dev, state);
4075
4076         dm_state = to_dm_atomic_state(state);
4077
4078         /* update changed items */
4079         for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
4080                 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
4081
4082                 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
4083                 dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
4084
4085                 DRM_DEBUG_DRIVER(
4086                         "amdgpu_crtc id:%d crtc_state_flags: enable:%d, active:%d, "
4087                         "planes_changed:%d, mode_changed:%d,active_changed:%d,"
4088                         "connectors_changed:%d\n",
4089                         acrtc->crtc_id,
4090                         new_crtc_state->enable,
4091                         new_crtc_state->active,
4092                         new_crtc_state->planes_changed,
4093                         new_crtc_state->mode_changed,
4094                         new_crtc_state->active_changed,
4095                         new_crtc_state->connectors_changed);
4096
4097                 /* Copy all transient state flags into dc state */
4098                 if (dm_new_crtc_state->stream) {
4099                         amdgpu_dm_crtc_copy_transient_flags(&dm_new_crtc_state->base,
4100                                                             dm_new_crtc_state->stream);
4101                 }
4102
4103                 /* handles headless hotplug case, updating new_state and
4104                  * aconnector as needed
4105                  */
4106
4107                 if (modeset_required(new_crtc_state, dm_new_crtc_state->stream, dm_old_crtc_state->stream)) {
4108
4109                         DRM_DEBUG_DRIVER("Atomic commit: SET crtc id %d: [%p]\n", acrtc->crtc_id, acrtc);
4110
4111                         if (!dm_new_crtc_state->stream) {
4112                                 /*
4113                                  * this could happen because of issues with
4114                                  * userspace notifications delivery.
4115                                  * In this case userspace tries to set mode on
4116                                  * display which is disconnect in fact.
4117                                  * dc_sink in NULL in this case on aconnector.
4118                                  * We expect reset mode will come soon.
4119                                  *
4120                                  * This can also happen when unplug is done
4121                                  * during resume sequence ended
4122                                  *
4123                                  * In this case, we want to pretend we still
4124                                  * have a sink to keep the pipe running so that
4125                                  * hw state is consistent with the sw state
4126                                  */
4127                                 DRM_DEBUG_DRIVER("%s: Failed to create new stream for crtc %d\n",
4128                                                 __func__, acrtc->base.base.id);
4129                                 continue;
4130                         }
4131
4132                         if (dm_old_crtc_state->stream)
4133                                 remove_stream(adev, acrtc, dm_old_crtc_state->stream);
4134
4135                         acrtc->enabled = true;
4136                         acrtc->hw_mode = new_crtc_state->mode;
4137                         crtc->hwmode = new_crtc_state->mode;
4138                 } else if (modereset_required(new_crtc_state)) {
4139                         DRM_DEBUG_DRIVER("Atomic commit: RESET. crtc id %d:[%p]\n", acrtc->crtc_id, acrtc);
4140
4141                         /* i.e. reset mode */
4142                         if (dm_old_crtc_state->stream)
4143                                 remove_stream(adev, acrtc, dm_old_crtc_state->stream);
4144                 }
4145         } /* for_each_crtc_in_state() */
4146
4147         /*
4148          * Add streams after required streams from new and replaced streams
4149          * are removed from freesync module
4150          */
4151         if (adev->dm.freesync_module) {
4152                 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state,
4153                                               new_crtc_state, i) {
4154                         struct amdgpu_dm_connector *aconnector = NULL;
4155                         struct dm_connector_state *dm_new_con_state = NULL;
4156                         struct amdgpu_crtc *acrtc = NULL;
4157                         bool modeset_needed;
4158
4159                         dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
4160                         dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
4161                         modeset_needed = modeset_required(
4162                                         new_crtc_state,
4163                                         dm_new_crtc_state->stream,
4164                                         dm_old_crtc_state->stream);
4165                         /* We add stream to freesync if:
4166                          * 1. Said stream is not null, and
4167                          * 2. A modeset is requested. This means that the
4168                          *    stream was removed previously, and needs to be
4169                          *    replaced.
4170                          */
4171                         if (dm_new_crtc_state->stream == NULL ||
4172                                         !modeset_needed)
4173                                 continue;
4174
4175                         acrtc = to_amdgpu_crtc(crtc);
4176
4177                         aconnector =
4178                                 amdgpu_dm_find_first_crtc_matching_connector(
4179                                         state, crtc);
4180                         if (!aconnector) {
4181                                 DRM_DEBUG_DRIVER("Atomic commit: Failed to "
4182                                                  "find connector for acrtc "
4183                                                  "id:%d skipping freesync "
4184                                                  "init\n",
4185                                                  acrtc->crtc_id);
4186                                 continue;
4187                         }
4188
4189                         mod_freesync_add_stream(adev->dm.freesync_module,
4190                                                 dm_new_crtc_state->stream,
4191                                                 &aconnector->caps);
4192                         new_con_state = drm_atomic_get_new_connector_state(
4193                                         state, &aconnector->base);
4194                         dm_new_con_state = to_dm_connector_state(new_con_state);
4195
4196                         mod_freesync_set_user_enable(adev->dm.freesync_module,
4197                                                      &dm_new_crtc_state->stream,
4198                                                      1,
4199                                                      &dm_new_con_state->user_enable);
4200                 }
4201         }
4202
4203         if (dm_state->context) {
4204                 dm_enable_per_frame_crtc_master_sync(dm_state->context);
4205                 WARN_ON(!dc_commit_state(dm->dc, dm_state->context));
4206         }
4207
4208         for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) {
4209                 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
4210
4211                 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
4212
4213                 if (dm_new_crtc_state->stream != NULL) {
4214                         const struct dc_stream_status *status =
4215                                         dc_stream_get_status(dm_new_crtc_state->stream);
4216
4217                         if (!status)
4218                                 DC_ERR("got no status for stream %p on acrtc%p\n", dm_new_crtc_state->stream, acrtc);
4219                         else
4220                                 acrtc->otg_inst = status->primary_otg_inst;
4221                 }
4222         }
4223
4224         /* Handle scaling and underscan changes*/
4225         for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
4226                 struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state);
4227                 struct dm_connector_state *dm_old_con_state = to_dm_connector_state(old_con_state);
4228                 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(dm_new_con_state->base.crtc);
4229                 struct dc_stream_status *status = NULL;
4230
4231                 if (acrtc)
4232                         new_crtc_state = drm_atomic_get_new_crtc_state(state, &acrtc->base);
4233
4234                 /* Skip any modesets/resets */
4235                 if (!acrtc || drm_atomic_crtc_needs_modeset(new_crtc_state))
4236                         continue;
4237
4238                 /* Skip any thing not scale or underscan changes */
4239                 if (!is_scaling_state_different(dm_new_con_state, dm_old_con_state))
4240                         continue;
4241
4242                 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
4243
4244                 update_stream_scaling_settings(&dm_new_con_state->base.crtc->mode,
4245                                 dm_new_con_state, (struct dc_stream_state *)dm_new_crtc_state->stream);
4246
4247                 if (!dm_new_crtc_state->stream)
4248                         continue;
4249
4250                 status = dc_stream_get_status(dm_new_crtc_state->stream);
4251                 WARN_ON(!status);
4252                 WARN_ON(!status->plane_count);
4253
4254                 /*TODO How it works with MPO ?*/
4255                 if (!dc_commit_planes_to_stream(
4256                                 dm->dc,
4257                                 status->plane_states,
4258                                 status->plane_count,
4259                                 dm_new_crtc_state->stream,
4260                                 dm_state->context))
4261                         dm_error("%s: Failed to update stream scaling!\n", __func__);
4262         }
4263
4264         for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state,
4265                         new_crtc_state, i) {
4266                 /*
4267                  * loop to enable interrupts on newly arrived crtc
4268                  */
4269                 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
4270                 bool modeset_needed;
4271
4272                 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
4273                 dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
4274                 modeset_needed = modeset_required(
4275                                 new_crtc_state,
4276                                 dm_new_crtc_state->stream,
4277                                 dm_old_crtc_state->stream);
4278
4279                 if (dm_new_crtc_state->stream == NULL || !modeset_needed)
4280                         continue;
4281
4282                 if (adev->dm.freesync_module)
4283                         mod_freesync_notify_mode_change(
4284                                 adev->dm.freesync_module,
4285                                 &dm_new_crtc_state->stream, 1);
4286
4287                 manage_dm_interrupts(adev, acrtc, true);
4288         }
4289
4290         /* update planes when needed per crtc*/
4291         for_each_new_crtc_in_state(state, crtc, new_crtc_state, j) {
4292                 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
4293
4294                 if (dm_new_crtc_state->stream)
4295                         amdgpu_dm_commit_planes(state, dev, dm, crtc, &wait_for_vblank);
4296         }
4297
4298
4299         /*
4300          * send vblank event on all events not handled in flip and
4301          * mark consumed event for drm_atomic_helper_commit_hw_done
4302          */
4303         spin_lock_irqsave(&adev->ddev->event_lock, flags);
4304         for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) {
4305
4306                 if (new_crtc_state->event)
4307                         drm_send_event_locked(dev, &new_crtc_state->event->base);
4308
4309                 new_crtc_state->event = NULL;
4310         }
4311         spin_unlock_irqrestore(&adev->ddev->event_lock, flags);
4312
4313         /* Signal HW programming completion */
4314         drm_atomic_helper_commit_hw_done(state);
4315
4316         if (wait_for_vblank)
4317                 drm_atomic_helper_wait_for_flip_done(dev, state);
4318
4319         drm_atomic_helper_cleanup_planes(dev, state);
4320 }
4321
4322
4323 static int dm_force_atomic_commit(struct drm_connector *connector)
4324 {
4325         int ret = 0;
4326         struct drm_device *ddev = connector->dev;
4327         struct drm_atomic_state *state = drm_atomic_state_alloc(ddev);
4328         struct amdgpu_crtc *disconnected_acrtc = to_amdgpu_crtc(connector->encoder->crtc);
4329         struct drm_plane *plane = disconnected_acrtc->base.primary;
4330         struct drm_connector_state *conn_state;
4331         struct drm_crtc_state *crtc_state;
4332         struct drm_plane_state *plane_state;
4333
4334         if (!state)
4335                 return -ENOMEM;
4336
4337         state->acquire_ctx = ddev->mode_config.acquire_ctx;
4338
4339         /* Construct an atomic state to restore previous display setting */
4340
4341         /*
4342          * Attach connectors to drm_atomic_state
4343          */
4344         conn_state = drm_atomic_get_connector_state(state, connector);
4345
4346         ret = PTR_ERR_OR_ZERO(conn_state);
4347         if (ret)
4348                 goto err;
4349
4350         /* Attach crtc to drm_atomic_state*/
4351         crtc_state = drm_atomic_get_crtc_state(state, &disconnected_acrtc->base);
4352
4353         ret = PTR_ERR_OR_ZERO(crtc_state);
4354         if (ret)
4355                 goto err;
4356
4357         /* force a restore */
4358         crtc_state->mode_changed = true;
4359
4360         /* Attach plane to drm_atomic_state */
4361         plane_state = drm_atomic_get_plane_state(state, plane);
4362
4363         ret = PTR_ERR_OR_ZERO(plane_state);
4364         if (ret)
4365                 goto err;
4366
4367
4368         /* Call commit internally with the state we just constructed */
4369         ret = drm_atomic_commit(state);
4370         if (!ret)
4371                 return 0;
4372
4373 err:
4374         DRM_ERROR("Restoring old state failed with %i\n", ret);
4375         drm_atomic_state_put(state);
4376
4377         return ret;
4378 }
4379
4380 /*
4381  * This functions handle all cases when set mode does not come upon hotplug.
4382  * This include when the same display is unplugged then plugged back into the
4383  * same port and when we are running without usermode desktop manager supprot
4384  */
4385 void dm_restore_drm_connector_state(struct drm_device *dev,
4386                                     struct drm_connector *connector)
4387 {
4388         struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
4389         struct amdgpu_crtc *disconnected_acrtc;
4390         struct dm_crtc_state *acrtc_state;
4391
4392         if (!aconnector->dc_sink || !connector->state || !connector->encoder)
4393                 return;
4394
4395         disconnected_acrtc = to_amdgpu_crtc(connector->encoder->crtc);
4396         if (!disconnected_acrtc)
4397                 return;
4398
4399         acrtc_state = to_dm_crtc_state(disconnected_acrtc->base.state);
4400         if (!acrtc_state->stream)
4401                 return;
4402
4403         /*
4404          * If the previous sink is not released and different from the current,
4405          * we deduce we are in a state where we can not rely on usermode call
4406          * to turn on the display, so we do it here
4407          */
4408         if (acrtc_state->stream->sink != aconnector->dc_sink)
4409                 dm_force_atomic_commit(&aconnector->base);
4410 }
4411
4412 /*`
4413  * Grabs all modesetting locks to serialize against any blocking commits,
4414  * Waits for completion of all non blocking commits.
4415  */
4416 static int do_aquire_global_lock(struct drm_device *dev,
4417                                  struct drm_atomic_state *state)
4418 {
4419         struct drm_crtc *crtc;
4420         struct drm_crtc_commit *commit;
4421         long ret;
4422
4423         /* Adding all modeset locks to aquire_ctx will
4424          * ensure that when the framework release it the
4425          * extra locks we are locking here will get released to
4426          */
4427         ret = drm_modeset_lock_all_ctx(dev, state->acquire_ctx);
4428         if (ret)
4429                 return ret;
4430
4431         list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
4432                 spin_lock(&crtc->commit_lock);
4433                 commit = list_first_entry_or_null(&crtc->commit_list,
4434                                 struct drm_crtc_commit, commit_entry);
4435                 if (commit)
4436                         drm_crtc_commit_get(commit);
4437                 spin_unlock(&crtc->commit_lock);
4438
4439                 if (!commit)
4440                         continue;
4441
4442                 /* Make sure all pending HW programming completed and
4443                  * page flips done
4444                  */
4445                 ret = wait_for_completion_interruptible_timeout(&commit->hw_done, 10*HZ);
4446
4447                 if (ret > 0)
4448                         ret = wait_for_completion_interruptible_timeout(
4449                                         &commit->flip_done, 10*HZ);
4450
4451                 if (ret == 0)
4452                         DRM_ERROR("[CRTC:%d:%s] hw_done or flip_done "
4453                                   "timed out\n", crtc->base.id, crtc->name);
4454
4455                 drm_crtc_commit_put(commit);
4456         }
4457
4458         return ret < 0 ? ret : 0;
4459 }
4460
4461 static int dm_update_crtcs_state(struct dc *dc,
4462                                  struct drm_atomic_state *state,
4463                                  bool enable,
4464                                  bool *lock_and_validation_needed)
4465 {
4466         struct drm_crtc *crtc;
4467         struct drm_crtc_state *old_crtc_state, *new_crtc_state;
4468         int i;
4469         struct dm_crtc_state *dm_old_crtc_state, *dm_new_crtc_state;
4470         struct dm_atomic_state *dm_state = to_dm_atomic_state(state);
4471         struct dc_stream_state *new_stream;
4472         int ret = 0;
4473
4474         /*TODO Move this code into dm_crtc_atomic_check once we get rid of dc_validation_set */
4475         /* update changed items */
4476         for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
4477                 struct amdgpu_crtc *acrtc = NULL;
4478                 struct amdgpu_dm_connector *aconnector = NULL;
4479                 struct drm_connector_state *new_con_state = NULL;
4480                 struct dm_connector_state *dm_conn_state = NULL;
4481
4482                 new_stream = NULL;
4483
4484                 dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
4485                 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
4486                 acrtc = to_amdgpu_crtc(crtc);
4487
4488                 aconnector = amdgpu_dm_find_first_crtc_matching_connector(state, crtc);
4489
4490                 /* TODO This hack should go away */
4491                 if (aconnector && enable) {
4492                         // Make sure fake sink is created in plug-in scenario
4493                         new_con_state = drm_atomic_get_connector_state(state,
4494                                                                     &aconnector->base);
4495
4496                         if (IS_ERR(new_con_state)) {
4497                                 ret = PTR_ERR_OR_ZERO(new_con_state);
4498                                 break;
4499                         }
4500
4501                         dm_conn_state = to_dm_connector_state(new_con_state);
4502
4503                         new_stream = create_stream_for_sink(aconnector,
4504                                                              &new_crtc_state->mode,
4505                                                             dm_conn_state);
4506
4507                         /*
4508                          * we can have no stream on ACTION_SET if a display
4509                          * was disconnected during S3, in this case it not and
4510                          * error, the OS will be updated after detection, and
4511                          * do the right thing on next atomic commit
4512                          */
4513
4514                         if (!new_stream) {
4515                                 DRM_DEBUG_DRIVER("%s: Failed to create new stream for crtc %d\n",
4516                                                 __func__, acrtc->base.base.id);
4517                                 break;
4518                         }
4519
4520                         if (dc_is_stream_unchanged(new_stream, dm_old_crtc_state->stream) &&
4521                             dc_is_stream_scaling_unchanged(new_stream, dm_old_crtc_state->stream)) {
4522                                 new_crtc_state->mode_changed = false;
4523                                 DRM_DEBUG_DRIVER("Mode change not required, setting mode_changed to %d",
4524                                                  new_crtc_state->mode_changed);
4525                         }
4526                 }
4527
4528                 if (!drm_atomic_crtc_needs_modeset(new_crtc_state))
4529                         goto next_crtc;
4530
4531                 DRM_DEBUG_DRIVER(
4532                         "amdgpu_crtc id:%d crtc_state_flags: enable:%d, active:%d, "
4533                         "planes_changed:%d, mode_changed:%d,active_changed:%d,"
4534                         "connectors_changed:%d\n",
4535                         acrtc->crtc_id,
4536                         new_crtc_state->enable,
4537                         new_crtc_state->active,
4538                         new_crtc_state->planes_changed,
4539                         new_crtc_state->mode_changed,
4540                         new_crtc_state->active_changed,
4541                         new_crtc_state->connectors_changed);
4542
4543                 /* Remove stream for any changed/disabled CRTC */
4544                 if (!enable) {
4545
4546                         if (!dm_old_crtc_state->stream)
4547                                 goto next_crtc;
4548
4549                         DRM_DEBUG_DRIVER("Disabling DRM crtc: %d\n",
4550                                         crtc->base.id);
4551
4552                         /* i.e. reset mode */
4553                         if (dc_remove_stream_from_ctx(
4554                                         dc,
4555                                         dm_state->context,
4556                                         dm_old_crtc_state->stream) != DC_OK) {
4557                                 ret = -EINVAL;
4558                                 goto fail;
4559                         }
4560
4561                         dc_stream_release(dm_old_crtc_state->stream);
4562                         dm_new_crtc_state->stream = NULL;
4563
4564                         *lock_and_validation_needed = true;
4565
4566                 } else {/* Add stream for any updated/enabled CRTC */
4567                         /*
4568                          * Quick fix to prevent NULL pointer on new_stream when
4569                          * added MST connectors not found in existing crtc_state in the chained mode
4570                          * TODO: need to dig out the root cause of that
4571                          */
4572                         if (!aconnector || (!aconnector->dc_sink && aconnector->mst_port))
4573                                 goto next_crtc;
4574
4575                         if (modereset_required(new_crtc_state))
4576                                 goto next_crtc;
4577
4578                         if (modeset_required(new_crtc_state, new_stream,
4579                                              dm_old_crtc_state->stream)) {
4580
4581                                 WARN_ON(dm_new_crtc_state->stream);
4582
4583                                 dm_new_crtc_state->stream = new_stream;
4584
4585                                 dc_stream_retain(new_stream);
4586
4587                                 DRM_DEBUG_DRIVER("Enabling DRM crtc: %d\n",
4588                                                         crtc->base.id);
4589
4590                                 if (dc_add_stream_to_ctx(
4591                                                 dc,
4592                                                 dm_state->context,
4593                                                 dm_new_crtc_state->stream) != DC_OK) {
4594                                         ret = -EINVAL;
4595                                         goto fail;
4596                                 }
4597
4598                                 *lock_and_validation_needed = true;
4599                         }
4600                 }
4601
4602 next_crtc:
4603                 /* Release extra reference */
4604                 if (new_stream)
4605                          dc_stream_release(new_stream);
4606         }
4607
4608         return ret;
4609
4610 fail:
4611         if (new_stream)
4612                 dc_stream_release(new_stream);
4613         return ret;
4614 }
4615
4616 static int dm_update_planes_state(struct dc *dc,
4617                                   struct drm_atomic_state *state,
4618                                   bool enable,
4619                                   bool *lock_and_validation_needed)
4620 {
4621         struct drm_crtc *new_plane_crtc, *old_plane_crtc;
4622         struct drm_crtc_state *old_crtc_state, *new_crtc_state;
4623         struct drm_plane *plane;
4624         struct drm_plane_state *old_plane_state, *new_plane_state;
4625         struct dm_crtc_state *dm_new_crtc_state, *dm_old_crtc_state;
4626         struct dm_atomic_state *dm_state = to_dm_atomic_state(state);
4627         struct dm_plane_state *dm_new_plane_state, *dm_old_plane_state;
4628         int i ;
4629         /* TODO return page_flip_needed() function */
4630         bool pflip_needed  = !state->allow_modeset;
4631         int ret = 0;
4632
4633         if (pflip_needed)
4634                 return ret;
4635
4636         /* Add new planes */
4637         for_each_oldnew_plane_in_state(state, plane, old_plane_state, new_plane_state, i) {
4638                 new_plane_crtc = new_plane_state->crtc;
4639                 old_plane_crtc = old_plane_state->crtc;
4640                 dm_new_plane_state = to_dm_plane_state(new_plane_state);
4641                 dm_old_plane_state = to_dm_plane_state(old_plane_state);
4642
4643                 /*TODO Implement atomic check for cursor plane */
4644                 if (plane->type == DRM_PLANE_TYPE_CURSOR)
4645                         continue;
4646
4647                 /* Remove any changed/removed planes */
4648                 if (!enable) {
4649
4650                         if (!old_plane_crtc)
4651                                 continue;
4652
4653                         old_crtc_state = drm_atomic_get_old_crtc_state(
4654                                         state, old_plane_crtc);
4655                         dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
4656
4657                         if (!dm_old_crtc_state->stream)
4658                                 continue;
4659
4660                         DRM_DEBUG_DRIVER("Disabling DRM plane: %d on DRM crtc %d\n",
4661                                         plane->base.id, old_plane_crtc->base.id);
4662
4663                         if (!dc_remove_plane_from_context(
4664                                         dc,
4665                                         dm_old_crtc_state->stream,
4666                                         dm_old_plane_state->dc_state,
4667                                         dm_state->context)) {
4668
4669                                 ret = EINVAL;
4670                                 return ret;
4671                         }
4672
4673
4674                         dc_plane_state_release(dm_old_plane_state->dc_state);
4675                         dm_new_plane_state->dc_state = NULL;
4676
4677                         *lock_and_validation_needed = true;
4678
4679                 } else { /* Add new planes */
4680
4681                         if (drm_atomic_plane_disabling(plane->state, new_plane_state))
4682                                 continue;
4683
4684                         if (!new_plane_crtc)
4685                                 continue;
4686
4687                         new_crtc_state = drm_atomic_get_new_crtc_state(state, new_plane_crtc);
4688                         dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
4689
4690                         if (!dm_new_crtc_state->stream)
4691                                 continue;
4692
4693
4694                         WARN_ON(dm_new_plane_state->dc_state);
4695
4696                         dm_new_plane_state->dc_state = dc_create_plane_state(dc);
4697
4698                         DRM_DEBUG_DRIVER("Enabling DRM plane: %d on DRM crtc %d\n",
4699                                         plane->base.id, new_plane_crtc->base.id);
4700
4701                         if (!dm_new_plane_state->dc_state) {
4702                                 ret = -EINVAL;
4703                                 return ret;
4704                         }
4705
4706                         ret = fill_plane_attributes(
4707                                 new_plane_crtc->dev->dev_private,
4708                                 dm_new_plane_state->dc_state,
4709                                 new_plane_state,
4710                                 new_crtc_state);
4711                         if (ret)
4712                                 return ret;
4713
4714
4715                         if (!dc_add_plane_to_context(
4716                                         dc,
4717                                         dm_new_crtc_state->stream,
4718                                         dm_new_plane_state->dc_state,
4719                                         dm_state->context)) {
4720
4721                                 ret = -EINVAL;
4722                                 return ret;
4723                         }
4724
4725                         /* Tell DC to do a full surface update every time there
4726                          * is a plane change. Inefficient, but works for now.
4727                          */
4728                         dm_new_plane_state->dc_state->update_flags.bits.full_update = 1;
4729
4730                         *lock_and_validation_needed = true;
4731                 }
4732         }
4733
4734
4735         return ret;
4736 }
4737
4738 static int amdgpu_dm_atomic_check(struct drm_device *dev,
4739                                   struct drm_atomic_state *state)
4740 {
4741         struct amdgpu_device *adev = dev->dev_private;
4742         struct dc *dc = adev->dm.dc;
4743         struct dm_atomic_state *dm_state = to_dm_atomic_state(state);
4744         struct drm_connector *connector;
4745         struct drm_connector_state *old_con_state, *new_con_state;
4746         struct drm_crtc *crtc;
4747         struct drm_crtc_state *old_crtc_state, *new_crtc_state;
4748         int ret, i;
4749
4750         /*
4751          * This bool will be set for true for any modeset/reset
4752          * or plane update which implies non fast surface update.
4753          */
4754         bool lock_and_validation_needed = false;
4755
4756         ret = drm_atomic_helper_check_modeset(dev, state);
4757         if (ret)
4758                 goto fail;
4759
4760         for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
4761                 if (!drm_atomic_crtc_needs_modeset(new_crtc_state) &&
4762                     !new_crtc_state->color_mgmt_changed)
4763                         continue;
4764
4765                 if (!new_crtc_state->enable)
4766                         continue;
4767
4768                 ret = drm_atomic_add_affected_connectors(state, crtc);
4769                 if (ret)
4770                         return ret;
4771
4772                 ret = drm_atomic_add_affected_planes(state, crtc);
4773                 if (ret)
4774                         goto fail;
4775         }
4776
4777         dm_state->context = dc_create_state();
4778         ASSERT(dm_state->context);
4779         dc_resource_state_copy_construct_current(dc, dm_state->context);
4780
4781         /* Remove exiting planes if they are modified */
4782         ret = dm_update_planes_state(dc, state, false, &lock_and_validation_needed);
4783         if (ret) {
4784                 goto fail;
4785         }
4786
4787         /* Disable all crtcs which require disable */
4788         ret = dm_update_crtcs_state(dc, state, false, &lock_and_validation_needed);
4789         if (ret) {
4790                 goto fail;
4791         }
4792
4793         /* Enable all crtcs which require enable */
4794         ret = dm_update_crtcs_state(dc, state, true, &lock_and_validation_needed);
4795         if (ret) {
4796                 goto fail;
4797         }
4798
4799         /* Add new/modified planes */
4800         ret = dm_update_planes_state(dc, state, true, &lock_and_validation_needed);
4801         if (ret) {
4802                 goto fail;
4803         }
4804
4805         /* Run this here since we want to validate the streams we created */
4806         ret = drm_atomic_helper_check_planes(dev, state);
4807         if (ret)
4808                 goto fail;
4809
4810         /* Check scaling and underscan changes*/
4811         /*TODO Removed scaling changes validation due to inability to commit
4812          * new stream into context w\o causing full reset. Need to
4813          * decide how to handle.
4814          */
4815         for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
4816                 struct dm_connector_state *dm_old_con_state = to_dm_connector_state(old_con_state);
4817                 struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state);
4818                 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(dm_new_con_state->base.crtc);
4819
4820                 /* Skip any modesets/resets */
4821                 if (!acrtc || drm_atomic_crtc_needs_modeset(
4822                                 drm_atomic_get_new_crtc_state(state, &acrtc->base)))
4823                         continue;
4824
4825                 /* Skip any thing not scale or underscan changes */
4826                 if (!is_scaling_state_different(dm_new_con_state, dm_old_con_state))
4827                         continue;
4828
4829                 lock_and_validation_needed = true;
4830         }
4831
4832         /*
4833          * For full updates case when
4834          * removing/adding/updating  streams on once CRTC while flipping
4835          * on another CRTC,
4836          * acquiring global lock  will guarantee that any such full
4837          * update commit
4838          * will wait for completion of any outstanding flip using DRMs
4839          * synchronization events.
4840          */
4841
4842         if (lock_and_validation_needed) {
4843
4844                 ret = do_aquire_global_lock(dev, state);
4845                 if (ret)
4846                         goto fail;
4847
4848                 if (dc_validate_global_state(dc, dm_state->context) != DC_OK) {
4849                         ret = -EINVAL;
4850                         goto fail;
4851                 }
4852         }
4853
4854         /* Must be success */
4855         WARN_ON(ret);
4856         return ret;
4857
4858 fail:
4859         if (ret == -EDEADLK)
4860                 DRM_DEBUG_DRIVER("Atomic check stopped to avoid deadlock.\n");
4861         else if (ret == -EINTR || ret == -EAGAIN || ret == -ERESTARTSYS)
4862                 DRM_DEBUG_DRIVER("Atomic check stopped due to signal.\n");
4863         else
4864                 DRM_DEBUG_DRIVER("Atomic check failed with err: %d \n", ret);
4865
4866         return ret;
4867 }
4868
4869 static bool is_dp_capable_without_timing_msa(struct dc *dc,
4870                                              struct amdgpu_dm_connector *amdgpu_dm_connector)
4871 {
4872         uint8_t dpcd_data;
4873         bool capable = false;
4874
4875         if (amdgpu_dm_connector->dc_link &&
4876                 dm_helpers_dp_read_dpcd(
4877                                 NULL,
4878                                 amdgpu_dm_connector->dc_link,
4879                                 DP_DOWN_STREAM_PORT_COUNT,
4880                                 &dpcd_data,
4881                                 sizeof(dpcd_data))) {
4882                 capable = (dpcd_data & DP_MSA_TIMING_PAR_IGNORED) ? true:false;
4883         }
4884
4885         return capable;
4886 }
4887 void amdgpu_dm_add_sink_to_freesync_module(struct drm_connector *connector,
4888                                            struct edid *edid)
4889 {
4890         int i;
4891         uint64_t val_capable;
4892         bool edid_check_required;
4893         struct detailed_timing *timing;
4894         struct detailed_non_pixel *data;
4895         struct detailed_data_monitor_range *range;
4896         struct amdgpu_dm_connector *amdgpu_dm_connector =
4897                         to_amdgpu_dm_connector(connector);
4898
4899         struct drm_device *dev = connector->dev;
4900         struct amdgpu_device *adev = dev->dev_private;
4901
4902         edid_check_required = false;
4903         if (!amdgpu_dm_connector->dc_sink) {
4904                 DRM_ERROR("dc_sink NULL, could not add free_sync module.\n");
4905                 return;
4906         }
4907         if (!adev->dm.freesync_module)
4908                 return;
4909         /*
4910          * if edid non zero restrict freesync only for dp and edp
4911          */
4912         if (edid) {
4913                 if (amdgpu_dm_connector->dc_sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT
4914                         || amdgpu_dm_connector->dc_sink->sink_signal == SIGNAL_TYPE_EDP) {
4915                         edid_check_required = is_dp_capable_without_timing_msa(
4916                                                 adev->dm.dc,
4917                                                 amdgpu_dm_connector);
4918                 }
4919         }
4920         val_capable = 0;
4921         if (edid_check_required == true && (edid->version > 1 ||
4922            (edid->version == 1 && edid->revision > 1))) {
4923                 for (i = 0; i < 4; i++) {
4924
4925                         timing  = &edid->detailed_timings[i];
4926                         data    = &timing->data.other_data;
4927                         range   = &data->data.range;
4928                         /*
4929                          * Check if monitor has continuous frequency mode
4930                          */
4931                         if (data->type != EDID_DETAIL_MONITOR_RANGE)
4932                                 continue;
4933                         /*
4934                          * Check for flag range limits only. If flag == 1 then
4935                          * no additional timing information provided.
4936                          * Default GTF, GTF Secondary curve and CVT are not
4937                          * supported
4938                          */
4939                         if (range->flags != 1)
4940                                 continue;
4941
4942                         amdgpu_dm_connector->min_vfreq = range->min_vfreq;
4943                         amdgpu_dm_connector->max_vfreq = range->max_vfreq;
4944                         amdgpu_dm_connector->pixel_clock_mhz =
4945                                 range->pixel_clock_mhz * 10;
4946                         break;
4947                 }
4948
4949                 if (amdgpu_dm_connector->max_vfreq -
4950                                 amdgpu_dm_connector->min_vfreq > 10) {
4951                         amdgpu_dm_connector->caps.supported = true;
4952                         amdgpu_dm_connector->caps.min_refresh_in_micro_hz =
4953                                         amdgpu_dm_connector->min_vfreq * 1000000;
4954                         amdgpu_dm_connector->caps.max_refresh_in_micro_hz =
4955                                         amdgpu_dm_connector->max_vfreq * 1000000;
4956                                 val_capable = 1;
4957                 }
4958         }
4959
4960         /*
4961          * TODO figure out how to notify user-mode or DRM of freesync caps
4962          * once we figure out how to deal with freesync in an upstreamable
4963          * fashion
4964          */
4965
4966 }
4967
4968 void amdgpu_dm_remove_sink_from_freesync_module(struct drm_connector *connector)
4969 {
4970         /*
4971          * TODO fill in once we figure out how to deal with freesync in
4972          * an upstreamable fashion
4973          */
4974 }
This page took 0.361235 seconds and 4 git commands to generate.