]> Git Repo - linux.git/blob - drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
drm/amdgpu/acpi: make ATPX/ATCS structures global (v2)
[linux.git] / drivers / gpu / drm / amd / display / amdgpu_dm / amdgpu_dm.c
1 /*
2  * Copyright 2015 Advanced Micro Devices, Inc.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20  * OTHER DEALINGS IN THE SOFTWARE.
21  *
22  * Authors: AMD
23  *
24  */
25
26 /* The caprices of the preprocessor require that this be declared right here */
27 #define CREATE_TRACE_POINTS
28
29 #include "dm_services_types.h"
30 #include "dc.h"
31 #include "dc_link_dp.h"
32 #include "dc/inc/core_types.h"
33 #include "dal_asic_id.h"
34 #include "dmub/dmub_srv.h"
35 #include "dc/inc/hw/dmcu.h"
36 #include "dc/inc/hw/abm.h"
37 #include "dc/dc_dmub_srv.h"
38 #include "dc/dc_edid_parser.h"
39 #include "dc/dc_stat.h"
40 #include "amdgpu_dm_trace.h"
41
42 #include "vid.h"
43 #include "amdgpu.h"
44 #include "amdgpu_display.h"
45 #include "amdgpu_ucode.h"
46 #include "atom.h"
47 #include "amdgpu_dm.h"
48 #ifdef CONFIG_DRM_AMD_DC_HDCP
49 #include "amdgpu_dm_hdcp.h"
50 #include <drm/drm_hdcp.h>
51 #endif
52 #include "amdgpu_pm.h"
53
54 #include "amd_shared.h"
55 #include "amdgpu_dm_irq.h"
56 #include "dm_helpers.h"
57 #include "amdgpu_dm_mst_types.h"
58 #if defined(CONFIG_DEBUG_FS)
59 #include "amdgpu_dm_debugfs.h"
60 #endif
61
62 #include "ivsrcid/ivsrcid_vislands30.h"
63
64 #include "i2caux_interface.h"
65 #include <linux/module.h>
66 #include <linux/moduleparam.h>
67 #include <linux/types.h>
68 #include <linux/pm_runtime.h>
69 #include <linux/pci.h>
70 #include <linux/firmware.h>
71 #include <linux/component.h>
72
73 #include <drm/drm_atomic.h>
74 #include <drm/drm_atomic_uapi.h>
75 #include <drm/drm_atomic_helper.h>
76 #include <drm/drm_dp_mst_helper.h>
77 #include <drm/drm_fb_helper.h>
78 #include <drm/drm_fourcc.h>
79 #include <drm/drm_edid.h>
80 #include <drm/drm_vblank.h>
81 #include <drm/drm_audio_component.h>
82
83 #if defined(CONFIG_DRM_AMD_DC_DCN)
84 #include "ivsrcid/dcn/irqsrcs_dcn_1_0.h"
85
86 #include "dcn/dcn_1_0_offset.h"
87 #include "dcn/dcn_1_0_sh_mask.h"
88 #include "soc15_hw_ip.h"
89 #include "vega10_ip_offset.h"
90
91 #include "soc15_common.h"
92 #endif
93
94 #include "modules/inc/mod_freesync.h"
95 #include "modules/power/power_helpers.h"
96 #include "modules/inc/mod_info_packet.h"
97
98 #define FIRMWARE_RENOIR_DMUB "amdgpu/renoir_dmcub.bin"
99 MODULE_FIRMWARE(FIRMWARE_RENOIR_DMUB);
100 #define FIRMWARE_SIENNA_CICHLID_DMUB "amdgpu/sienna_cichlid_dmcub.bin"
101 MODULE_FIRMWARE(FIRMWARE_SIENNA_CICHLID_DMUB);
102 #define FIRMWARE_NAVY_FLOUNDER_DMUB "amdgpu/navy_flounder_dmcub.bin"
103 MODULE_FIRMWARE(FIRMWARE_NAVY_FLOUNDER_DMUB);
104 #define FIRMWARE_GREEN_SARDINE_DMUB "amdgpu/green_sardine_dmcub.bin"
105 MODULE_FIRMWARE(FIRMWARE_GREEN_SARDINE_DMUB);
106 #define FIRMWARE_VANGOGH_DMUB "amdgpu/vangogh_dmcub.bin"
107 MODULE_FIRMWARE(FIRMWARE_VANGOGH_DMUB);
108 #define FIRMWARE_DIMGREY_CAVEFISH_DMUB "amdgpu/dimgrey_cavefish_dmcub.bin"
109 MODULE_FIRMWARE(FIRMWARE_DIMGREY_CAVEFISH_DMUB);
110 #define FIRMWARE_BEIGE_GOBY_DMUB "amdgpu/beige_goby_dmcub.bin"
111 MODULE_FIRMWARE(FIRMWARE_BEIGE_GOBY_DMUB);
112
113 #define FIRMWARE_RAVEN_DMCU             "amdgpu/raven_dmcu.bin"
114 MODULE_FIRMWARE(FIRMWARE_RAVEN_DMCU);
115
116 #define FIRMWARE_NAVI12_DMCU            "amdgpu/navi12_dmcu.bin"
117 MODULE_FIRMWARE(FIRMWARE_NAVI12_DMCU);
118
119 /* Number of bytes in PSP header for firmware. */
120 #define PSP_HEADER_BYTES 0x100
121
122 /* Number of bytes in PSP footer for firmware. */
123 #define PSP_FOOTER_BYTES 0x100
124
125 /**
126  * DOC: overview
127  *
128  * The AMDgpu display manager, **amdgpu_dm** (or even simpler,
129  * **dm**) sits between DRM and DC. It acts as a liaison, converting DRM
130  * requests into DC requests, and DC responses into DRM responses.
131  *
132  * The root control structure is &struct amdgpu_display_manager.
133  */
134
135 /* basic init/fini API */
136 static int amdgpu_dm_init(struct amdgpu_device *adev);
137 static void amdgpu_dm_fini(struct amdgpu_device *adev);
138 static bool is_freesync_video_mode(const struct drm_display_mode *mode, struct amdgpu_dm_connector *aconnector);
139
140 static enum drm_mode_subconnector get_subconnector_type(struct dc_link *link)
141 {
142         switch (link->dpcd_caps.dongle_type) {
143         case DISPLAY_DONGLE_NONE:
144                 return DRM_MODE_SUBCONNECTOR_Native;
145         case DISPLAY_DONGLE_DP_VGA_CONVERTER:
146                 return DRM_MODE_SUBCONNECTOR_VGA;
147         case DISPLAY_DONGLE_DP_DVI_CONVERTER:
148         case DISPLAY_DONGLE_DP_DVI_DONGLE:
149                 return DRM_MODE_SUBCONNECTOR_DVID;
150         case DISPLAY_DONGLE_DP_HDMI_CONVERTER:
151         case DISPLAY_DONGLE_DP_HDMI_DONGLE:
152                 return DRM_MODE_SUBCONNECTOR_HDMIA;
153         case DISPLAY_DONGLE_DP_HDMI_MISMATCHED_DONGLE:
154         default:
155                 return DRM_MODE_SUBCONNECTOR_Unknown;
156         }
157 }
158
159 static void update_subconnector_property(struct amdgpu_dm_connector *aconnector)
160 {
161         struct dc_link *link = aconnector->dc_link;
162         struct drm_connector *connector = &aconnector->base;
163         enum drm_mode_subconnector subconnector = DRM_MODE_SUBCONNECTOR_Unknown;
164
165         if (connector->connector_type != DRM_MODE_CONNECTOR_DisplayPort)
166                 return;
167
168         if (aconnector->dc_sink)
169                 subconnector = get_subconnector_type(link);
170
171         drm_object_property_set_value(&connector->base,
172                         connector->dev->mode_config.dp_subconnector_property,
173                         subconnector);
174 }
175
176 /*
177  * initializes drm_device display related structures, based on the information
178  * provided by DAL. The drm strcutures are: drm_crtc, drm_connector,
179  * drm_encoder, drm_mode_config
180  *
181  * Returns 0 on success
182  */
183 static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev);
184 /* removes and deallocates the drm structures, created by the above function */
185 static void amdgpu_dm_destroy_drm_device(struct amdgpu_display_manager *dm);
186
187 static int amdgpu_dm_plane_init(struct amdgpu_display_manager *dm,
188                                 struct drm_plane *plane,
189                                 unsigned long possible_crtcs,
190                                 const struct dc_plane_cap *plane_cap);
191 static int amdgpu_dm_crtc_init(struct amdgpu_display_manager *dm,
192                                struct drm_plane *plane,
193                                uint32_t link_index);
194 static int amdgpu_dm_connector_init(struct amdgpu_display_manager *dm,
195                                     struct amdgpu_dm_connector *amdgpu_dm_connector,
196                                     uint32_t link_index,
197                                     struct amdgpu_encoder *amdgpu_encoder);
198 static int amdgpu_dm_encoder_init(struct drm_device *dev,
199                                   struct amdgpu_encoder *aencoder,
200                                   uint32_t link_index);
201
202 static int amdgpu_dm_connector_get_modes(struct drm_connector *connector);
203
204 static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state);
205
206 static int amdgpu_dm_atomic_check(struct drm_device *dev,
207                                   struct drm_atomic_state *state);
208
209 static void handle_cursor_update(struct drm_plane *plane,
210                                  struct drm_plane_state *old_plane_state);
211
212 static void amdgpu_dm_set_psr_caps(struct dc_link *link);
213 static bool amdgpu_dm_psr_enable(struct dc_stream_state *stream);
214 static bool amdgpu_dm_link_setup_psr(struct dc_stream_state *stream);
215 static bool amdgpu_dm_psr_disable(struct dc_stream_state *stream);
216 static bool amdgpu_dm_psr_disable_all(struct amdgpu_display_manager *dm);
217
218 static const struct drm_format_info *
219 amd_get_format_info(const struct drm_mode_fb_cmd2 *cmd);
220
221 static bool
222 is_timing_unchanged_for_freesync(struct drm_crtc_state *old_crtc_state,
223                                  struct drm_crtc_state *new_crtc_state);
224 /*
225  * dm_vblank_get_counter
226  *
227  * @brief
228  * Get counter for number of vertical blanks
229  *
230  * @param
231  * struct amdgpu_device *adev - [in] desired amdgpu device
232  * int disp_idx - [in] which CRTC to get the counter from
233  *
234  * @return
235  * Counter for vertical blanks
236  */
237 static u32 dm_vblank_get_counter(struct amdgpu_device *adev, int crtc)
238 {
239         if (crtc >= adev->mode_info.num_crtc)
240                 return 0;
241         else {
242                 struct amdgpu_crtc *acrtc = adev->mode_info.crtcs[crtc];
243
244                 if (acrtc->dm_irq_params.stream == NULL) {
245                         DRM_ERROR("dc_stream_state is NULL for crtc '%d'!\n",
246                                   crtc);
247                         return 0;
248                 }
249
250                 return dc_stream_get_vblank_counter(acrtc->dm_irq_params.stream);
251         }
252 }
253
254 static int dm_crtc_get_scanoutpos(struct amdgpu_device *adev, int crtc,
255                                   u32 *vbl, u32 *position)
256 {
257         uint32_t v_blank_start, v_blank_end, h_position, v_position;
258
259         if ((crtc < 0) || (crtc >= adev->mode_info.num_crtc))
260                 return -EINVAL;
261         else {
262                 struct amdgpu_crtc *acrtc = adev->mode_info.crtcs[crtc];
263
264                 if (acrtc->dm_irq_params.stream ==  NULL) {
265                         DRM_ERROR("dc_stream_state is NULL for crtc '%d'!\n",
266                                   crtc);
267                         return 0;
268                 }
269
270                 /*
271                  * TODO rework base driver to use values directly.
272                  * for now parse it back into reg-format
273                  */
274                 dc_stream_get_scanoutpos(acrtc->dm_irq_params.stream,
275                                          &v_blank_start,
276                                          &v_blank_end,
277                                          &h_position,
278                                          &v_position);
279
280                 *position = v_position | (h_position << 16);
281                 *vbl = v_blank_start | (v_blank_end << 16);
282         }
283
284         return 0;
285 }
286
287 static bool dm_is_idle(void *handle)
288 {
289         /* XXX todo */
290         return true;
291 }
292
293 static int dm_wait_for_idle(void *handle)
294 {
295         /* XXX todo */
296         return 0;
297 }
298
299 static bool dm_check_soft_reset(void *handle)
300 {
301         return false;
302 }
303
304 static int dm_soft_reset(void *handle)
305 {
306         /* XXX todo */
307         return 0;
308 }
309
310 static struct amdgpu_crtc *
311 get_crtc_by_otg_inst(struct amdgpu_device *adev,
312                      int otg_inst)
313 {
314         struct drm_device *dev = adev_to_drm(adev);
315         struct drm_crtc *crtc;
316         struct amdgpu_crtc *amdgpu_crtc;
317
318         if (otg_inst == -1) {
319                 WARN_ON(1);
320                 return adev->mode_info.crtcs[0];
321         }
322
323         list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
324                 amdgpu_crtc = to_amdgpu_crtc(crtc);
325
326                 if (amdgpu_crtc->otg_inst == otg_inst)
327                         return amdgpu_crtc;
328         }
329
330         return NULL;
331 }
332
333 static inline bool amdgpu_dm_vrr_active_irq(struct amdgpu_crtc *acrtc)
334 {
335         return acrtc->dm_irq_params.freesync_config.state ==
336                        VRR_STATE_ACTIVE_VARIABLE ||
337                acrtc->dm_irq_params.freesync_config.state ==
338                        VRR_STATE_ACTIVE_FIXED;
339 }
340
341 static inline bool amdgpu_dm_vrr_active(struct dm_crtc_state *dm_state)
342 {
343         return dm_state->freesync_config.state == VRR_STATE_ACTIVE_VARIABLE ||
344                dm_state->freesync_config.state == VRR_STATE_ACTIVE_FIXED;
345 }
346
347 static inline bool is_dc_timing_adjust_needed(struct dm_crtc_state *old_state,
348                                               struct dm_crtc_state *new_state)
349 {
350         if (new_state->freesync_config.state ==  VRR_STATE_ACTIVE_FIXED)
351                 return true;
352         else if (amdgpu_dm_vrr_active(old_state) != amdgpu_dm_vrr_active(new_state))
353                 return true;
354         else
355                 return false;
356 }
357
358 /**
359  * dm_pflip_high_irq() - Handle pageflip interrupt
360  * @interrupt_params: ignored
361  *
362  * Handles the pageflip interrupt by notifying all interested parties
363  * that the pageflip has been completed.
364  */
365 static void dm_pflip_high_irq(void *interrupt_params)
366 {
367         struct amdgpu_crtc *amdgpu_crtc;
368         struct common_irq_params *irq_params = interrupt_params;
369         struct amdgpu_device *adev = irq_params->adev;
370         unsigned long flags;
371         struct drm_pending_vblank_event *e;
372         uint32_t vpos, hpos, v_blank_start, v_blank_end;
373         bool vrr_active;
374
375         amdgpu_crtc = get_crtc_by_otg_inst(adev, irq_params->irq_src - IRQ_TYPE_PFLIP);
376
377         /* IRQ could occur when in initial stage */
378         /* TODO work and BO cleanup */
379         if (amdgpu_crtc == NULL) {
380                 DC_LOG_PFLIP("CRTC is null, returning.\n");
381                 return;
382         }
383
384         spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
385
386         if (amdgpu_crtc->pflip_status != AMDGPU_FLIP_SUBMITTED){
387                 DC_LOG_PFLIP("amdgpu_crtc->pflip_status = %d !=AMDGPU_FLIP_SUBMITTED(%d) on crtc:%d[%p] \n",
388                                                  amdgpu_crtc->pflip_status,
389                                                  AMDGPU_FLIP_SUBMITTED,
390                                                  amdgpu_crtc->crtc_id,
391                                                  amdgpu_crtc);
392                 spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
393                 return;
394         }
395
396         /* page flip completed. */
397         e = amdgpu_crtc->event;
398         amdgpu_crtc->event = NULL;
399
400         if (!e)
401                 WARN_ON(1);
402
403         vrr_active = amdgpu_dm_vrr_active_irq(amdgpu_crtc);
404
405         /* Fixed refresh rate, or VRR scanout position outside front-porch? */
406         if (!vrr_active ||
407             !dc_stream_get_scanoutpos(amdgpu_crtc->dm_irq_params.stream, &v_blank_start,
408                                       &v_blank_end, &hpos, &vpos) ||
409             (vpos < v_blank_start)) {
410                 /* Update to correct count and vblank timestamp if racing with
411                  * vblank irq. This also updates to the correct vblank timestamp
412                  * even in VRR mode, as scanout is past the front-porch atm.
413                  */
414                 drm_crtc_accurate_vblank_count(&amdgpu_crtc->base);
415
416                 /* Wake up userspace by sending the pageflip event with proper
417                  * count and timestamp of vblank of flip completion.
418                  */
419                 if (e) {
420                         drm_crtc_send_vblank_event(&amdgpu_crtc->base, e);
421
422                         /* Event sent, so done with vblank for this flip */
423                         drm_crtc_vblank_put(&amdgpu_crtc->base);
424                 }
425         } else if (e) {
426                 /* VRR active and inside front-porch: vblank count and
427                  * timestamp for pageflip event will only be up to date after
428                  * drm_crtc_handle_vblank() has been executed from late vblank
429                  * irq handler after start of back-porch (vline 0). We queue the
430                  * pageflip event for send-out by drm_crtc_handle_vblank() with
431                  * updated timestamp and count, once it runs after us.
432                  *
433                  * We need to open-code this instead of using the helper
434                  * drm_crtc_arm_vblank_event(), as that helper would
435                  * call drm_crtc_accurate_vblank_count(), which we must
436                  * not call in VRR mode while we are in front-porch!
437                  */
438
439                 /* sequence will be replaced by real count during send-out. */
440                 e->sequence = drm_crtc_vblank_count(&amdgpu_crtc->base);
441                 e->pipe = amdgpu_crtc->crtc_id;
442
443                 list_add_tail(&e->base.link, &adev_to_drm(adev)->vblank_event_list);
444                 e = NULL;
445         }
446
447         /* Keep track of vblank of this flip for flip throttling. We use the
448          * cooked hw counter, as that one incremented at start of this vblank
449          * of pageflip completion, so last_flip_vblank is the forbidden count
450          * for queueing new pageflips if vsync + VRR is enabled.
451          */
452         amdgpu_crtc->dm_irq_params.last_flip_vblank =
453                 amdgpu_get_vblank_counter_kms(&amdgpu_crtc->base);
454
455         amdgpu_crtc->pflip_status = AMDGPU_FLIP_NONE;
456         spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
457
458         DC_LOG_PFLIP("crtc:%d[%p], pflip_stat:AMDGPU_FLIP_NONE, vrr[%d]-fp %d\n",
459                      amdgpu_crtc->crtc_id, amdgpu_crtc,
460                      vrr_active, (int) !e);
461 }
462
463 static void dm_vupdate_high_irq(void *interrupt_params)
464 {
465         struct common_irq_params *irq_params = interrupt_params;
466         struct amdgpu_device *adev = irq_params->adev;
467         struct amdgpu_crtc *acrtc;
468         struct drm_device *drm_dev;
469         struct drm_vblank_crtc *vblank;
470         ktime_t frame_duration_ns, previous_timestamp;
471         unsigned long flags;
472         int vrr_active;
473
474         acrtc = get_crtc_by_otg_inst(adev, irq_params->irq_src - IRQ_TYPE_VUPDATE);
475
476         if (acrtc) {
477                 vrr_active = amdgpu_dm_vrr_active_irq(acrtc);
478                 drm_dev = acrtc->base.dev;
479                 vblank = &drm_dev->vblank[acrtc->base.index];
480                 previous_timestamp = atomic64_read(&irq_params->previous_timestamp);
481                 frame_duration_ns = vblank->time - previous_timestamp;
482
483                 if (frame_duration_ns > 0) {
484                         trace_amdgpu_refresh_rate_track(acrtc->base.index,
485                                                 frame_duration_ns,
486                                                 ktime_divns(NSEC_PER_SEC, frame_duration_ns));
487                         atomic64_set(&irq_params->previous_timestamp, vblank->time);
488                 }
489
490                 DC_LOG_VBLANK("crtc:%d, vupdate-vrr:%d\n",
491                               acrtc->crtc_id,
492                               vrr_active);
493
494                 /* Core vblank handling is done here after end of front-porch in
495                  * vrr mode, as vblank timestamping will give valid results
496                  * while now done after front-porch. This will also deliver
497                  * page-flip completion events that have been queued to us
498                  * if a pageflip happened inside front-porch.
499                  */
500                 if (vrr_active) {
501                         drm_crtc_handle_vblank(&acrtc->base);
502
503                         /* BTR processing for pre-DCE12 ASICs */
504                         if (acrtc->dm_irq_params.stream &&
505                             adev->family < AMDGPU_FAMILY_AI) {
506                                 spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
507                                 mod_freesync_handle_v_update(
508                                     adev->dm.freesync_module,
509                                     acrtc->dm_irq_params.stream,
510                                     &acrtc->dm_irq_params.vrr_params);
511
512                                 dc_stream_adjust_vmin_vmax(
513                                     adev->dm.dc,
514                                     acrtc->dm_irq_params.stream,
515                                     &acrtc->dm_irq_params.vrr_params.adjust);
516                                 spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
517                         }
518                 }
519         }
520 }
521
522 /**
523  * dm_crtc_high_irq() - Handles CRTC interrupt
524  * @interrupt_params: used for determining the CRTC instance
525  *
526  * Handles the CRTC/VSYNC interrupt by notfying DRM's VBLANK
527  * event handler.
528  */
529 static void dm_crtc_high_irq(void *interrupt_params)
530 {
531         struct common_irq_params *irq_params = interrupt_params;
532         struct amdgpu_device *adev = irq_params->adev;
533         struct amdgpu_crtc *acrtc;
534         unsigned long flags;
535         int vrr_active;
536
537         acrtc = get_crtc_by_otg_inst(adev, irq_params->irq_src - IRQ_TYPE_VBLANK);
538         if (!acrtc)
539                 return;
540
541         vrr_active = amdgpu_dm_vrr_active_irq(acrtc);
542
543         DC_LOG_VBLANK("crtc:%d, vupdate-vrr:%d, planes:%d\n", acrtc->crtc_id,
544                       vrr_active, acrtc->dm_irq_params.active_planes);
545
546         /**
547          * Core vblank handling at start of front-porch is only possible
548          * in non-vrr mode, as only there vblank timestamping will give
549          * valid results while done in front-porch. Otherwise defer it
550          * to dm_vupdate_high_irq after end of front-porch.
551          */
552         if (!vrr_active)
553                 drm_crtc_handle_vblank(&acrtc->base);
554
555         /**
556          * Following stuff must happen at start of vblank, for crc
557          * computation and below-the-range btr support in vrr mode.
558          */
559         amdgpu_dm_crtc_handle_crc_irq(&acrtc->base);
560
561         /* BTR updates need to happen before VUPDATE on Vega and above. */
562         if (adev->family < AMDGPU_FAMILY_AI)
563                 return;
564
565         spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
566
567         if (acrtc->dm_irq_params.stream &&
568             acrtc->dm_irq_params.vrr_params.supported &&
569             acrtc->dm_irq_params.freesync_config.state ==
570                     VRR_STATE_ACTIVE_VARIABLE) {
571                 mod_freesync_handle_v_update(adev->dm.freesync_module,
572                                              acrtc->dm_irq_params.stream,
573                                              &acrtc->dm_irq_params.vrr_params);
574
575                 dc_stream_adjust_vmin_vmax(adev->dm.dc, acrtc->dm_irq_params.stream,
576                                            &acrtc->dm_irq_params.vrr_params.adjust);
577         }
578
579         /*
580          * If there aren't any active_planes then DCH HUBP may be clock-gated.
581          * In that case, pageflip completion interrupts won't fire and pageflip
582          * completion events won't get delivered. Prevent this by sending
583          * pending pageflip events from here if a flip is still pending.
584          *
585          * If any planes are enabled, use dm_pflip_high_irq() instead, to
586          * avoid race conditions between flip programming and completion,
587          * which could cause too early flip completion events.
588          */
589         if (adev->family >= AMDGPU_FAMILY_RV &&
590             acrtc->pflip_status == AMDGPU_FLIP_SUBMITTED &&
591             acrtc->dm_irq_params.active_planes == 0) {
592                 if (acrtc->event) {
593                         drm_crtc_send_vblank_event(&acrtc->base, acrtc->event);
594                         acrtc->event = NULL;
595                         drm_crtc_vblank_put(&acrtc->base);
596                 }
597                 acrtc->pflip_status = AMDGPU_FLIP_NONE;
598         }
599
600         spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
601 }
602
603 #if defined(CONFIG_DRM_AMD_DC_DCN)
604 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
605 /**
606  * dm_dcn_vertical_interrupt0_high_irq() - Handles OTG Vertical interrupt0 for
607  * DCN generation ASICs
608  * @interrupt_params: interrupt parameters
609  *
610  * Used to set crc window/read out crc value at vertical line 0 position
611  */
612 static void dm_dcn_vertical_interrupt0_high_irq(void *interrupt_params)
613 {
614         struct common_irq_params *irq_params = interrupt_params;
615         struct amdgpu_device *adev = irq_params->adev;
616         struct amdgpu_crtc *acrtc;
617
618         acrtc = get_crtc_by_otg_inst(adev, irq_params->irq_src - IRQ_TYPE_VLINE0);
619
620         if (!acrtc)
621                 return;
622
623         amdgpu_dm_crtc_handle_crc_window_irq(&acrtc->base);
624 }
625 #endif
626
627 /**
628  * dm_dmub_outbox1_low_irq() - Handles Outbox interrupt
629  * @interrupt_params: used for determining the Outbox instance
630  *
631  * Handles the Outbox Interrupt
632  * event handler.
633  */
634 #define DMUB_TRACE_MAX_READ 64
635 static void dm_dmub_outbox1_low_irq(void *interrupt_params)
636 {
637         struct dmub_notification notify;
638         struct common_irq_params *irq_params = interrupt_params;
639         struct amdgpu_device *adev = irq_params->adev;
640         struct amdgpu_display_manager *dm = &adev->dm;
641         struct dmcub_trace_buf_entry entry = { 0 };
642         uint32_t count = 0;
643
644         if (dc_enable_dmub_notifications(adev->dm.dc)) {
645                 if (irq_params->irq_src == DC_IRQ_SOURCE_DMCUB_OUTBOX) {
646                         do {
647                                 dc_stat_get_dmub_notification(adev->dm.dc, &notify);
648                         } while (notify.pending_notification);
649
650                         if (adev->dm.dmub_notify)
651                                 memcpy(adev->dm.dmub_notify, &notify, sizeof(struct dmub_notification));
652                         if (notify.type == DMUB_NOTIFICATION_AUX_REPLY)
653                                 complete(&adev->dm.dmub_aux_transfer_done);
654                         // TODO : HPD Implementation
655
656                 } else {
657                         DRM_ERROR("DM: Failed to receive correct outbox IRQ !");
658                 }
659         }
660
661
662         do {
663                 if (dc_dmub_srv_get_dmub_outbox0_msg(dm->dc, &entry)) {
664                         trace_amdgpu_dmub_trace_high_irq(entry.trace_code, entry.tick_count,
665                                                         entry.param0, entry.param1);
666
667                         DRM_DEBUG_DRIVER("trace_code:%u, tick_count:%u, param0:%u, param1:%u\n",
668                                  entry.trace_code, entry.tick_count, entry.param0, entry.param1);
669                 } else
670                         break;
671
672                 count++;
673
674         } while (count <= DMUB_TRACE_MAX_READ);
675
676         ASSERT(count <= DMUB_TRACE_MAX_READ);
677 }
678 #endif
679
680 static int dm_set_clockgating_state(void *handle,
681                   enum amd_clockgating_state state)
682 {
683         return 0;
684 }
685
686 static int dm_set_powergating_state(void *handle,
687                   enum amd_powergating_state state)
688 {
689         return 0;
690 }
691
692 /* Prototypes of private functions */
693 static int dm_early_init(void* handle);
694
695 /* Allocate memory for FBC compressed data  */
696 static void amdgpu_dm_fbc_init(struct drm_connector *connector)
697 {
698         struct drm_device *dev = connector->dev;
699         struct amdgpu_device *adev = drm_to_adev(dev);
700         struct dm_compressor_info *compressor = &adev->dm.compressor;
701         struct amdgpu_dm_connector *aconn = to_amdgpu_dm_connector(connector);
702         struct drm_display_mode *mode;
703         unsigned long max_size = 0;
704
705         if (adev->dm.dc->fbc_compressor == NULL)
706                 return;
707
708         if (aconn->dc_link->connector_signal != SIGNAL_TYPE_EDP)
709                 return;
710
711         if (compressor->bo_ptr)
712                 return;
713
714
715         list_for_each_entry(mode, &connector->modes, head) {
716                 if (max_size < mode->htotal * mode->vtotal)
717                         max_size = mode->htotal * mode->vtotal;
718         }
719
720         if (max_size) {
721                 int r = amdgpu_bo_create_kernel(adev, max_size * 4, PAGE_SIZE,
722                             AMDGPU_GEM_DOMAIN_GTT, &compressor->bo_ptr,
723                             &compressor->gpu_addr, &compressor->cpu_addr);
724
725                 if (r)
726                         DRM_ERROR("DM: Failed to initialize FBC\n");
727                 else {
728                         adev->dm.dc->ctx->fbc_gpu_addr = compressor->gpu_addr;
729                         DRM_INFO("DM: FBC alloc %lu\n", max_size*4);
730                 }
731
732         }
733
734 }
735
736 static int amdgpu_dm_audio_component_get_eld(struct device *kdev, int port,
737                                           int pipe, bool *enabled,
738                                           unsigned char *buf, int max_bytes)
739 {
740         struct drm_device *dev = dev_get_drvdata(kdev);
741         struct amdgpu_device *adev = drm_to_adev(dev);
742         struct drm_connector *connector;
743         struct drm_connector_list_iter conn_iter;
744         struct amdgpu_dm_connector *aconnector;
745         int ret = 0;
746
747         *enabled = false;
748
749         mutex_lock(&adev->dm.audio_lock);
750
751         drm_connector_list_iter_begin(dev, &conn_iter);
752         drm_for_each_connector_iter(connector, &conn_iter) {
753                 aconnector = to_amdgpu_dm_connector(connector);
754                 if (aconnector->audio_inst != port)
755                         continue;
756
757                 *enabled = true;
758                 ret = drm_eld_size(connector->eld);
759                 memcpy(buf, connector->eld, min(max_bytes, ret));
760
761                 break;
762         }
763         drm_connector_list_iter_end(&conn_iter);
764
765         mutex_unlock(&adev->dm.audio_lock);
766
767         DRM_DEBUG_KMS("Get ELD : idx=%d ret=%d en=%d\n", port, ret, *enabled);
768
769         return ret;
770 }
771
772 static const struct drm_audio_component_ops amdgpu_dm_audio_component_ops = {
773         .get_eld = amdgpu_dm_audio_component_get_eld,
774 };
775
776 static int amdgpu_dm_audio_component_bind(struct device *kdev,
777                                        struct device *hda_kdev, void *data)
778 {
779         struct drm_device *dev = dev_get_drvdata(kdev);
780         struct amdgpu_device *adev = drm_to_adev(dev);
781         struct drm_audio_component *acomp = data;
782
783         acomp->ops = &amdgpu_dm_audio_component_ops;
784         acomp->dev = kdev;
785         adev->dm.audio_component = acomp;
786
787         return 0;
788 }
789
790 static void amdgpu_dm_audio_component_unbind(struct device *kdev,
791                                           struct device *hda_kdev, void *data)
792 {
793         struct drm_device *dev = dev_get_drvdata(kdev);
794         struct amdgpu_device *adev = drm_to_adev(dev);
795         struct drm_audio_component *acomp = data;
796
797         acomp->ops = NULL;
798         acomp->dev = NULL;
799         adev->dm.audio_component = NULL;
800 }
801
802 static const struct component_ops amdgpu_dm_audio_component_bind_ops = {
803         .bind   = amdgpu_dm_audio_component_bind,
804         .unbind = amdgpu_dm_audio_component_unbind,
805 };
806
807 static int amdgpu_dm_audio_init(struct amdgpu_device *adev)
808 {
809         int i, ret;
810
811         if (!amdgpu_audio)
812                 return 0;
813
814         adev->mode_info.audio.enabled = true;
815
816         adev->mode_info.audio.num_pins = adev->dm.dc->res_pool->audio_count;
817
818         for (i = 0; i < adev->mode_info.audio.num_pins; i++) {
819                 adev->mode_info.audio.pin[i].channels = -1;
820                 adev->mode_info.audio.pin[i].rate = -1;
821                 adev->mode_info.audio.pin[i].bits_per_sample = -1;
822                 adev->mode_info.audio.pin[i].status_bits = 0;
823                 adev->mode_info.audio.pin[i].category_code = 0;
824                 adev->mode_info.audio.pin[i].connected = false;
825                 adev->mode_info.audio.pin[i].id =
826                         adev->dm.dc->res_pool->audios[i]->inst;
827                 adev->mode_info.audio.pin[i].offset = 0;
828         }
829
830         ret = component_add(adev->dev, &amdgpu_dm_audio_component_bind_ops);
831         if (ret < 0)
832                 return ret;
833
834         adev->dm.audio_registered = true;
835
836         return 0;
837 }
838
839 static void amdgpu_dm_audio_fini(struct amdgpu_device *adev)
840 {
841         if (!amdgpu_audio)
842                 return;
843
844         if (!adev->mode_info.audio.enabled)
845                 return;
846
847         if (adev->dm.audio_registered) {
848                 component_del(adev->dev, &amdgpu_dm_audio_component_bind_ops);
849                 adev->dm.audio_registered = false;
850         }
851
852         /* TODO: Disable audio? */
853
854         adev->mode_info.audio.enabled = false;
855 }
856
857 static  void amdgpu_dm_audio_eld_notify(struct amdgpu_device *adev, int pin)
858 {
859         struct drm_audio_component *acomp = adev->dm.audio_component;
860
861         if (acomp && acomp->audio_ops && acomp->audio_ops->pin_eld_notify) {
862                 DRM_DEBUG_KMS("Notify ELD: %d\n", pin);
863
864                 acomp->audio_ops->pin_eld_notify(acomp->audio_ops->audio_ptr,
865                                                  pin, -1);
866         }
867 }
868
869 static int dm_dmub_hw_init(struct amdgpu_device *adev)
870 {
871         const struct dmcub_firmware_header_v1_0 *hdr;
872         struct dmub_srv *dmub_srv = adev->dm.dmub_srv;
873         struct dmub_srv_fb_info *fb_info = adev->dm.dmub_fb_info;
874         const struct firmware *dmub_fw = adev->dm.dmub_fw;
875         struct dmcu *dmcu = adev->dm.dc->res_pool->dmcu;
876         struct abm *abm = adev->dm.dc->res_pool->abm;
877         struct dmub_srv_hw_params hw_params;
878         enum dmub_status status;
879         const unsigned char *fw_inst_const, *fw_bss_data;
880         uint32_t i, fw_inst_const_size, fw_bss_data_size;
881         bool has_hw_support;
882
883         if (!dmub_srv)
884                 /* DMUB isn't supported on the ASIC. */
885                 return 0;
886
887         if (!fb_info) {
888                 DRM_ERROR("No framebuffer info for DMUB service.\n");
889                 return -EINVAL;
890         }
891
892         if (!dmub_fw) {
893                 /* Firmware required for DMUB support. */
894                 DRM_ERROR("No firmware provided for DMUB.\n");
895                 return -EINVAL;
896         }
897
898         status = dmub_srv_has_hw_support(dmub_srv, &has_hw_support);
899         if (status != DMUB_STATUS_OK) {
900                 DRM_ERROR("Error checking HW support for DMUB: %d\n", status);
901                 return -EINVAL;
902         }
903
904         if (!has_hw_support) {
905                 DRM_INFO("DMUB unsupported on ASIC\n");
906                 return 0;
907         }
908
909         hdr = (const struct dmcub_firmware_header_v1_0 *)dmub_fw->data;
910
911         fw_inst_const = dmub_fw->data +
912                         le32_to_cpu(hdr->header.ucode_array_offset_bytes) +
913                         PSP_HEADER_BYTES;
914
915         fw_bss_data = dmub_fw->data +
916                       le32_to_cpu(hdr->header.ucode_array_offset_bytes) +
917                       le32_to_cpu(hdr->inst_const_bytes);
918
919         /* Copy firmware and bios info into FB memory. */
920         fw_inst_const_size = le32_to_cpu(hdr->inst_const_bytes) -
921                              PSP_HEADER_BYTES - PSP_FOOTER_BYTES;
922
923         fw_bss_data_size = le32_to_cpu(hdr->bss_data_bytes);
924
925         /* if adev->firmware.load_type == AMDGPU_FW_LOAD_PSP,
926          * amdgpu_ucode_init_single_fw will load dmub firmware
927          * fw_inst_const part to cw0; otherwise, the firmware back door load
928          * will be done by dm_dmub_hw_init
929          */
930         if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) {
931                 memcpy(fb_info->fb[DMUB_WINDOW_0_INST_CONST].cpu_addr, fw_inst_const,
932                                 fw_inst_const_size);
933         }
934
935         if (fw_bss_data_size)
936                 memcpy(fb_info->fb[DMUB_WINDOW_2_BSS_DATA].cpu_addr,
937                        fw_bss_data, fw_bss_data_size);
938
939         /* Copy firmware bios info into FB memory. */
940         memcpy(fb_info->fb[DMUB_WINDOW_3_VBIOS].cpu_addr, adev->bios,
941                adev->bios_size);
942
943         /* Reset regions that need to be reset. */
944         memset(fb_info->fb[DMUB_WINDOW_4_MAILBOX].cpu_addr, 0,
945         fb_info->fb[DMUB_WINDOW_4_MAILBOX].size);
946
947         memset(fb_info->fb[DMUB_WINDOW_5_TRACEBUFF].cpu_addr, 0,
948                fb_info->fb[DMUB_WINDOW_5_TRACEBUFF].size);
949
950         memset(fb_info->fb[DMUB_WINDOW_6_FW_STATE].cpu_addr, 0,
951                fb_info->fb[DMUB_WINDOW_6_FW_STATE].size);
952
953         /* Initialize hardware. */
954         memset(&hw_params, 0, sizeof(hw_params));
955         hw_params.fb_base = adev->gmc.fb_start;
956         hw_params.fb_offset = adev->gmc.aper_base;
957
958         /* backdoor load firmware and trigger dmub running */
959         if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP)
960                 hw_params.load_inst_const = true;
961
962         if (dmcu)
963                 hw_params.psp_version = dmcu->psp_version;
964
965         for (i = 0; i < fb_info->num_fb; ++i)
966                 hw_params.fb[i] = &fb_info->fb[i];
967
968         status = dmub_srv_hw_init(dmub_srv, &hw_params);
969         if (status != DMUB_STATUS_OK) {
970                 DRM_ERROR("Error initializing DMUB HW: %d\n", status);
971                 return -EINVAL;
972         }
973
974         /* Wait for firmware load to finish. */
975         status = dmub_srv_wait_for_auto_load(dmub_srv, 100000);
976         if (status != DMUB_STATUS_OK)
977                 DRM_WARN("Wait for DMUB auto-load failed: %d\n", status);
978
979         /* Init DMCU and ABM if available. */
980         if (dmcu && abm) {
981                 dmcu->funcs->dmcu_init(dmcu);
982                 abm->dmcu_is_running = dmcu->funcs->is_dmcu_initialized(dmcu);
983         }
984
985         if (!adev->dm.dc->ctx->dmub_srv)
986                 adev->dm.dc->ctx->dmub_srv = dc_dmub_srv_create(adev->dm.dc, dmub_srv);
987         if (!adev->dm.dc->ctx->dmub_srv) {
988                 DRM_ERROR("Couldn't allocate DC DMUB server!\n");
989                 return -ENOMEM;
990         }
991
992         DRM_INFO("DMUB hardware initialized: version=0x%08X\n",
993                  adev->dm.dmcub_fw_version);
994
995         return 0;
996 }
997
998 #if defined(CONFIG_DRM_AMD_DC_DCN)
999 static void mmhub_read_system_context(struct amdgpu_device *adev, struct dc_phy_addr_space_config *pa_config)
1000 {
1001         uint64_t pt_base;
1002         uint32_t logical_addr_low;
1003         uint32_t logical_addr_high;
1004         uint32_t agp_base, agp_bot, agp_top;
1005         PHYSICAL_ADDRESS_LOC page_table_start, page_table_end, page_table_base;
1006
1007         logical_addr_low  = min(adev->gmc.fb_start, adev->gmc.agp_start) >> 18;
1008         pt_base = amdgpu_gmc_pd_addr(adev->gart.bo);
1009
1010         if (adev->apu_flags & AMD_APU_IS_RAVEN2)
1011                 /*
1012                  * Raven2 has a HW issue that it is unable to use the vram which
1013                  * is out of MC_VM_SYSTEM_APERTURE_HIGH_ADDR. So here is the
1014                  * workaround that increase system aperture high address (add 1)
1015                  * to get rid of the VM fault and hardware hang.
1016                  */
1017                 logical_addr_high = max((adev->gmc.fb_end >> 18) + 0x1, adev->gmc.agp_end >> 18);
1018         else
1019                 logical_addr_high = max(adev->gmc.fb_end, adev->gmc.agp_end) >> 18;
1020
1021         agp_base = 0;
1022         agp_bot = adev->gmc.agp_start >> 24;
1023         agp_top = adev->gmc.agp_end >> 24;
1024
1025
1026         page_table_start.high_part = (u32)(adev->gmc.gart_start >> 44) & 0xF;
1027         page_table_start.low_part = (u32)(adev->gmc.gart_start >> 12);
1028         page_table_end.high_part = (u32)(adev->gmc.gart_end >> 44) & 0xF;
1029         page_table_end.low_part = (u32)(adev->gmc.gart_end >> 12);
1030         page_table_base.high_part = upper_32_bits(pt_base) & 0xF;
1031         page_table_base.low_part = lower_32_bits(pt_base);
1032
1033         pa_config->system_aperture.start_addr = (uint64_t)logical_addr_low << 18;
1034         pa_config->system_aperture.end_addr = (uint64_t)logical_addr_high << 18;
1035
1036         pa_config->system_aperture.agp_base = (uint64_t)agp_base << 24 ;
1037         pa_config->system_aperture.agp_bot = (uint64_t)agp_bot << 24;
1038         pa_config->system_aperture.agp_top = (uint64_t)agp_top << 24;
1039
1040         pa_config->system_aperture.fb_base = adev->gmc.fb_start;
1041         pa_config->system_aperture.fb_offset = adev->gmc.aper_base;
1042         pa_config->system_aperture.fb_top = adev->gmc.fb_end;
1043
1044         pa_config->gart_config.page_table_start_addr = page_table_start.quad_part << 12;
1045         pa_config->gart_config.page_table_end_addr = page_table_end.quad_part << 12;
1046         pa_config->gart_config.page_table_base_addr = page_table_base.quad_part;
1047
1048         pa_config->is_hvm_enabled = 0;
1049
1050 }
1051 #endif
1052 #if defined(CONFIG_DRM_AMD_DC_DCN)
1053 static void event_mall_stutter(struct work_struct *work)
1054 {
1055
1056         struct vblank_workqueue *vblank_work = container_of(work, struct vblank_workqueue, mall_work);
1057         struct amdgpu_display_manager *dm = vblank_work->dm;
1058
1059         mutex_lock(&dm->dc_lock);
1060
1061         if (vblank_work->enable)
1062                 dm->active_vblank_irq_count++;
1063         else if(dm->active_vblank_irq_count)
1064                 dm->active_vblank_irq_count--;
1065
1066         dc_allow_idle_optimizations(dm->dc, dm->active_vblank_irq_count == 0);
1067
1068         DRM_DEBUG_KMS("Allow idle optimizations (MALL): %d\n", dm->active_vblank_irq_count == 0);
1069
1070         mutex_unlock(&dm->dc_lock);
1071 }
1072
1073 static struct vblank_workqueue *vblank_create_workqueue(struct amdgpu_device *adev, struct dc *dc)
1074 {
1075
1076         int max_caps = dc->caps.max_links;
1077         struct vblank_workqueue *vblank_work;
1078         int i = 0;
1079
1080         vblank_work = kcalloc(max_caps, sizeof(*vblank_work), GFP_KERNEL);
1081         if (ZERO_OR_NULL_PTR(vblank_work)) {
1082                 kfree(vblank_work);
1083                 return NULL;
1084         }
1085
1086         for (i = 0; i < max_caps; i++)
1087                 INIT_WORK(&vblank_work[i].mall_work, event_mall_stutter);
1088
1089         return vblank_work;
1090 }
1091 #endif
1092 static int amdgpu_dm_init(struct amdgpu_device *adev)
1093 {
1094         struct dc_init_data init_data;
1095 #ifdef CONFIG_DRM_AMD_DC_HDCP
1096         struct dc_callback_init init_params;
1097 #endif
1098         int r;
1099
1100         adev->dm.ddev = adev_to_drm(adev);
1101         adev->dm.adev = adev;
1102
1103         /* Zero all the fields */
1104         memset(&init_data, 0, sizeof(init_data));
1105 #ifdef CONFIG_DRM_AMD_DC_HDCP
1106         memset(&init_params, 0, sizeof(init_params));
1107 #endif
1108
1109         mutex_init(&adev->dm.dc_lock);
1110         mutex_init(&adev->dm.audio_lock);
1111 #if defined(CONFIG_DRM_AMD_DC_DCN)
1112         spin_lock_init(&adev->dm.vblank_lock);
1113 #endif
1114
1115         if(amdgpu_dm_irq_init(adev)) {
1116                 DRM_ERROR("amdgpu: failed to initialize DM IRQ support.\n");
1117                 goto error;
1118         }
1119
1120         init_data.asic_id.chip_family = adev->family;
1121
1122         init_data.asic_id.pci_revision_id = adev->pdev->revision;
1123         init_data.asic_id.hw_internal_rev = adev->external_rev_id;
1124
1125         init_data.asic_id.vram_width = adev->gmc.vram_width;
1126         /* TODO: initialize init_data.asic_id.vram_type here!!!! */
1127         init_data.asic_id.atombios_base_address =
1128                 adev->mode_info.atom_context->bios;
1129
1130         init_data.driver = adev;
1131
1132         adev->dm.cgs_device = amdgpu_cgs_create_device(adev);
1133
1134         if (!adev->dm.cgs_device) {
1135                 DRM_ERROR("amdgpu: failed to create cgs device.\n");
1136                 goto error;
1137         }
1138
1139         init_data.cgs_device = adev->dm.cgs_device;
1140
1141         init_data.dce_environment = DCE_ENV_PRODUCTION_DRV;
1142
1143         switch (adev->asic_type) {
1144         case CHIP_CARRIZO:
1145         case CHIP_STONEY:
1146         case CHIP_RAVEN:
1147         case CHIP_RENOIR:
1148                 init_data.flags.gpu_vm_support = true;
1149                 if (ASICREV_IS_GREEN_SARDINE(adev->external_rev_id))
1150                         init_data.flags.disable_dmcu = true;
1151                 break;
1152 #if defined(CONFIG_DRM_AMD_DC_DCN)
1153         case CHIP_VANGOGH:
1154                 init_data.flags.gpu_vm_support = true;
1155                 break;
1156 #endif
1157         default:
1158                 break;
1159         }
1160
1161         if (amdgpu_dc_feature_mask & DC_FBC_MASK)
1162                 init_data.flags.fbc_support = true;
1163
1164         if (amdgpu_dc_feature_mask & DC_MULTI_MON_PP_MCLK_SWITCH_MASK)
1165                 init_data.flags.multi_mon_pp_mclk_switch = true;
1166
1167         if (amdgpu_dc_feature_mask & DC_DISABLE_FRACTIONAL_PWM_MASK)
1168                 init_data.flags.disable_fractional_pwm = true;
1169
1170         init_data.flags.power_down_display_on_boot = true;
1171
1172         INIT_LIST_HEAD(&adev->dm.da_list);
1173         /* Display Core create. */
1174         adev->dm.dc = dc_create(&init_data);
1175
1176         if (adev->dm.dc) {
1177                 DRM_INFO("Display Core initialized with v%s!\n", DC_VER);
1178         } else {
1179                 DRM_INFO("Display Core failed to initialize with v%s!\n", DC_VER);
1180                 goto error;
1181         }
1182
1183         if (amdgpu_dc_debug_mask & DC_DISABLE_PIPE_SPLIT) {
1184                 adev->dm.dc->debug.force_single_disp_pipe_split = false;
1185                 adev->dm.dc->debug.pipe_split_policy = MPC_SPLIT_AVOID;
1186         }
1187
1188         if (adev->asic_type != CHIP_CARRIZO && adev->asic_type != CHIP_STONEY)
1189                 adev->dm.dc->debug.disable_stutter = amdgpu_pp_feature_mask & PP_STUTTER_MODE ? false : true;
1190
1191         if (amdgpu_dc_debug_mask & DC_DISABLE_STUTTER)
1192                 adev->dm.dc->debug.disable_stutter = true;
1193
1194         if (amdgpu_dc_debug_mask & DC_DISABLE_DSC)
1195                 adev->dm.dc->debug.disable_dsc = true;
1196
1197         if (amdgpu_dc_debug_mask & DC_DISABLE_CLOCK_GATING)
1198                 adev->dm.dc->debug.disable_clock_gate = true;
1199
1200         r = dm_dmub_hw_init(adev);
1201         if (r) {
1202                 DRM_ERROR("DMUB interface failed to initialize: status=%d\n", r);
1203                 goto error;
1204         }
1205
1206         dc_hardware_init(adev->dm.dc);
1207
1208 #if defined(CONFIG_DRM_AMD_DC_DCN)
1209         if (adev->apu_flags) {
1210                 struct dc_phy_addr_space_config pa_config;
1211
1212                 mmhub_read_system_context(adev, &pa_config);
1213
1214                 // Call the DC init_memory func
1215                 dc_setup_system_context(adev->dm.dc, &pa_config);
1216         }
1217 #endif
1218
1219         adev->dm.freesync_module = mod_freesync_create(adev->dm.dc);
1220         if (!adev->dm.freesync_module) {
1221                 DRM_ERROR(
1222                 "amdgpu: failed to initialize freesync_module.\n");
1223         } else
1224                 DRM_DEBUG_DRIVER("amdgpu: freesync_module init done %p.\n",
1225                                 adev->dm.freesync_module);
1226
1227         amdgpu_dm_init_color_mod();
1228
1229 #if defined(CONFIG_DRM_AMD_DC_DCN)
1230         if (adev->dm.dc->caps.max_links > 0) {
1231                 adev->dm.vblank_workqueue = vblank_create_workqueue(adev, adev->dm.dc);
1232
1233                 if (!adev->dm.vblank_workqueue)
1234                         DRM_ERROR("amdgpu: failed to initialize vblank_workqueue.\n");
1235                 else
1236                         DRM_DEBUG_DRIVER("amdgpu: vblank_workqueue init done %p.\n", adev->dm.vblank_workqueue);
1237         }
1238 #endif
1239
1240 #ifdef CONFIG_DRM_AMD_DC_HDCP
1241         if (adev->dm.dc->caps.max_links > 0 && adev->asic_type >= CHIP_RAVEN) {
1242                 adev->dm.hdcp_workqueue = hdcp_create_workqueue(adev, &init_params.cp_psp, adev->dm.dc);
1243
1244                 if (!adev->dm.hdcp_workqueue)
1245                         DRM_ERROR("amdgpu: failed to initialize hdcp_workqueue.\n");
1246                 else
1247                         DRM_DEBUG_DRIVER("amdgpu: hdcp_workqueue init done %p.\n", adev->dm.hdcp_workqueue);
1248
1249                 dc_init_callbacks(adev->dm.dc, &init_params);
1250         }
1251 #endif
1252 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
1253         adev->dm.crc_rd_wrk = amdgpu_dm_crtc_secure_display_create_work();
1254 #endif
1255         if (dc_enable_dmub_notifications(adev->dm.dc)) {
1256                 init_completion(&adev->dm.dmub_aux_transfer_done);
1257                 adev->dm.dmub_notify = kzalloc(sizeof(struct dmub_notification), GFP_KERNEL);
1258                 if (!adev->dm.dmub_notify) {
1259                         DRM_INFO("amdgpu: fail to allocate adev->dm.dmub_notify");
1260                         goto error;
1261                 }
1262                 amdgpu_dm_outbox_init(adev);
1263         }
1264
1265         if (amdgpu_dm_initialize_drm_device(adev)) {
1266                 DRM_ERROR(
1267                 "amdgpu: failed to initialize sw for display support.\n");
1268                 goto error;
1269         }
1270
1271         /* create fake encoders for MST */
1272         dm_dp_create_fake_mst_encoders(adev);
1273
1274         /* TODO: Add_display_info? */
1275
1276         /* TODO use dynamic cursor width */
1277         adev_to_drm(adev)->mode_config.cursor_width = adev->dm.dc->caps.max_cursor_size;
1278         adev_to_drm(adev)->mode_config.cursor_height = adev->dm.dc->caps.max_cursor_size;
1279
1280         if (drm_vblank_init(adev_to_drm(adev), adev->dm.display_indexes_num)) {
1281                 DRM_ERROR(
1282                 "amdgpu: failed to initialize sw for display support.\n");
1283                 goto error;
1284         }
1285
1286
1287         DRM_DEBUG_DRIVER("KMS initialized.\n");
1288
1289         return 0;
1290 error:
1291         amdgpu_dm_fini(adev);
1292
1293         return -EINVAL;
1294 }
1295
1296 static void amdgpu_dm_fini(struct amdgpu_device *adev)
1297 {
1298         int i;
1299
1300         for (i = 0; i < adev->dm.display_indexes_num; i++) {
1301                 drm_encoder_cleanup(&adev->dm.mst_encoders[i].base);
1302         }
1303
1304         amdgpu_dm_audio_fini(adev);
1305
1306         amdgpu_dm_destroy_drm_device(&adev->dm);
1307
1308 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
1309         if (adev->dm.crc_rd_wrk) {
1310                 flush_work(&adev->dm.crc_rd_wrk->notify_ta_work);
1311                 kfree(adev->dm.crc_rd_wrk);
1312                 adev->dm.crc_rd_wrk = NULL;
1313         }
1314 #endif
1315 #ifdef CONFIG_DRM_AMD_DC_HDCP
1316         if (adev->dm.hdcp_workqueue) {
1317                 hdcp_destroy(&adev->dev->kobj, adev->dm.hdcp_workqueue);
1318                 adev->dm.hdcp_workqueue = NULL;
1319         }
1320
1321         if (adev->dm.dc)
1322                 dc_deinit_callbacks(adev->dm.dc);
1323 #endif
1324
1325 #if defined(CONFIG_DRM_AMD_DC_DCN)
1326         if (adev->dm.vblank_workqueue) {
1327                 adev->dm.vblank_workqueue->dm = NULL;
1328                 kfree(adev->dm.vblank_workqueue);
1329                 adev->dm.vblank_workqueue = NULL;
1330         }
1331 #endif
1332
1333         dc_dmub_srv_destroy(&adev->dm.dc->ctx->dmub_srv);
1334
1335         if (dc_enable_dmub_notifications(adev->dm.dc)) {
1336                 kfree(adev->dm.dmub_notify);
1337                 adev->dm.dmub_notify = NULL;
1338         }
1339
1340         if (adev->dm.dmub_bo)
1341                 amdgpu_bo_free_kernel(&adev->dm.dmub_bo,
1342                                       &adev->dm.dmub_bo_gpu_addr,
1343                                       &adev->dm.dmub_bo_cpu_addr);
1344
1345         /* DC Destroy TODO: Replace destroy DAL */
1346         if (adev->dm.dc)
1347                 dc_destroy(&adev->dm.dc);
1348         /*
1349          * TODO: pageflip, vlank interrupt
1350          *
1351          * amdgpu_dm_irq_fini(adev);
1352          */
1353
1354         if (adev->dm.cgs_device) {
1355                 amdgpu_cgs_destroy_device(adev->dm.cgs_device);
1356                 adev->dm.cgs_device = NULL;
1357         }
1358         if (adev->dm.freesync_module) {
1359                 mod_freesync_destroy(adev->dm.freesync_module);
1360                 adev->dm.freesync_module = NULL;
1361         }
1362
1363         mutex_destroy(&adev->dm.audio_lock);
1364         mutex_destroy(&adev->dm.dc_lock);
1365
1366         return;
1367 }
1368
1369 static int load_dmcu_fw(struct amdgpu_device *adev)
1370 {
1371         const char *fw_name_dmcu = NULL;
1372         int r;
1373         const struct dmcu_firmware_header_v1_0 *hdr;
1374
1375         switch(adev->asic_type) {
1376 #if defined(CONFIG_DRM_AMD_DC_SI)
1377         case CHIP_TAHITI:
1378         case CHIP_PITCAIRN:
1379         case CHIP_VERDE:
1380         case CHIP_OLAND:
1381 #endif
1382         case CHIP_BONAIRE:
1383         case CHIP_HAWAII:
1384         case CHIP_KAVERI:
1385         case CHIP_KABINI:
1386         case CHIP_MULLINS:
1387         case CHIP_TONGA:
1388         case CHIP_FIJI:
1389         case CHIP_CARRIZO:
1390         case CHIP_STONEY:
1391         case CHIP_POLARIS11:
1392         case CHIP_POLARIS10:
1393         case CHIP_POLARIS12:
1394         case CHIP_VEGAM:
1395         case CHIP_VEGA10:
1396         case CHIP_VEGA12:
1397         case CHIP_VEGA20:
1398         case CHIP_NAVI10:
1399         case CHIP_NAVI14:
1400         case CHIP_RENOIR:
1401         case CHIP_SIENNA_CICHLID:
1402         case CHIP_NAVY_FLOUNDER:
1403         case CHIP_DIMGREY_CAVEFISH:
1404         case CHIP_BEIGE_GOBY:
1405         case CHIP_VANGOGH:
1406                 return 0;
1407         case CHIP_NAVI12:
1408                 fw_name_dmcu = FIRMWARE_NAVI12_DMCU;
1409                 break;
1410         case CHIP_RAVEN:
1411                 if (ASICREV_IS_PICASSO(adev->external_rev_id))
1412                         fw_name_dmcu = FIRMWARE_RAVEN_DMCU;
1413                 else if (ASICREV_IS_RAVEN2(adev->external_rev_id))
1414                         fw_name_dmcu = FIRMWARE_RAVEN_DMCU;
1415                 else
1416                         return 0;
1417                 break;
1418         default:
1419                 DRM_ERROR("Unsupported ASIC type: 0x%X\n", adev->asic_type);
1420                 return -EINVAL;
1421         }
1422
1423         if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) {
1424                 DRM_DEBUG_KMS("dm: DMCU firmware not supported on direct or SMU loading\n");
1425                 return 0;
1426         }
1427
1428         r = request_firmware_direct(&adev->dm.fw_dmcu, fw_name_dmcu, adev->dev);
1429         if (r == -ENOENT) {
1430                 /* DMCU firmware is not necessary, so don't raise a fuss if it's missing */
1431                 DRM_DEBUG_KMS("dm: DMCU firmware not found\n");
1432                 adev->dm.fw_dmcu = NULL;
1433                 return 0;
1434         }
1435         if (r) {
1436                 dev_err(adev->dev, "amdgpu_dm: Can't load firmware \"%s\"\n",
1437                         fw_name_dmcu);
1438                 return r;
1439         }
1440
1441         r = amdgpu_ucode_validate(adev->dm.fw_dmcu);
1442         if (r) {
1443                 dev_err(adev->dev, "amdgpu_dm: Can't validate firmware \"%s\"\n",
1444                         fw_name_dmcu);
1445                 release_firmware(adev->dm.fw_dmcu);
1446                 adev->dm.fw_dmcu = NULL;
1447                 return r;
1448         }
1449
1450         hdr = (const struct dmcu_firmware_header_v1_0 *)adev->dm.fw_dmcu->data;
1451         adev->firmware.ucode[AMDGPU_UCODE_ID_DMCU_ERAM].ucode_id = AMDGPU_UCODE_ID_DMCU_ERAM;
1452         adev->firmware.ucode[AMDGPU_UCODE_ID_DMCU_ERAM].fw = adev->dm.fw_dmcu;
1453         adev->firmware.fw_size +=
1454                 ALIGN(le32_to_cpu(hdr->header.ucode_size_bytes) - le32_to_cpu(hdr->intv_size_bytes), PAGE_SIZE);
1455
1456         adev->firmware.ucode[AMDGPU_UCODE_ID_DMCU_INTV].ucode_id = AMDGPU_UCODE_ID_DMCU_INTV;
1457         adev->firmware.ucode[AMDGPU_UCODE_ID_DMCU_INTV].fw = adev->dm.fw_dmcu;
1458         adev->firmware.fw_size +=
1459                 ALIGN(le32_to_cpu(hdr->intv_size_bytes), PAGE_SIZE);
1460
1461         adev->dm.dmcu_fw_version = le32_to_cpu(hdr->header.ucode_version);
1462
1463         DRM_DEBUG_KMS("PSP loading DMCU firmware\n");
1464
1465         return 0;
1466 }
1467
1468 static uint32_t amdgpu_dm_dmub_reg_read(void *ctx, uint32_t address)
1469 {
1470         struct amdgpu_device *adev = ctx;
1471
1472         return dm_read_reg(adev->dm.dc->ctx, address);
1473 }
1474
1475 static void amdgpu_dm_dmub_reg_write(void *ctx, uint32_t address,
1476                                      uint32_t value)
1477 {
1478         struct amdgpu_device *adev = ctx;
1479
1480         return dm_write_reg(adev->dm.dc->ctx, address, value);
1481 }
1482
1483 static int dm_dmub_sw_init(struct amdgpu_device *adev)
1484 {
1485         struct dmub_srv_create_params create_params;
1486         struct dmub_srv_region_params region_params;
1487         struct dmub_srv_region_info region_info;
1488         struct dmub_srv_fb_params fb_params;
1489         struct dmub_srv_fb_info *fb_info;
1490         struct dmub_srv *dmub_srv;
1491         const struct dmcub_firmware_header_v1_0 *hdr;
1492         const char *fw_name_dmub;
1493         enum dmub_asic dmub_asic;
1494         enum dmub_status status;
1495         int r;
1496
1497         switch (adev->asic_type) {
1498         case CHIP_RENOIR:
1499                 dmub_asic = DMUB_ASIC_DCN21;
1500                 fw_name_dmub = FIRMWARE_RENOIR_DMUB;
1501                 if (ASICREV_IS_GREEN_SARDINE(adev->external_rev_id))
1502                         fw_name_dmub = FIRMWARE_GREEN_SARDINE_DMUB;
1503                 break;
1504         case CHIP_SIENNA_CICHLID:
1505                 dmub_asic = DMUB_ASIC_DCN30;
1506                 fw_name_dmub = FIRMWARE_SIENNA_CICHLID_DMUB;
1507                 break;
1508         case CHIP_NAVY_FLOUNDER:
1509                 dmub_asic = DMUB_ASIC_DCN30;
1510                 fw_name_dmub = FIRMWARE_NAVY_FLOUNDER_DMUB;
1511                 break;
1512         case CHIP_VANGOGH:
1513                 dmub_asic = DMUB_ASIC_DCN301;
1514                 fw_name_dmub = FIRMWARE_VANGOGH_DMUB;
1515                 break;
1516         case CHIP_DIMGREY_CAVEFISH:
1517                 dmub_asic = DMUB_ASIC_DCN302;
1518                 fw_name_dmub = FIRMWARE_DIMGREY_CAVEFISH_DMUB;
1519                 break;
1520         case CHIP_BEIGE_GOBY:
1521                 dmub_asic = DMUB_ASIC_DCN303;
1522                 fw_name_dmub = FIRMWARE_BEIGE_GOBY_DMUB;
1523                 break;
1524
1525         default:
1526                 /* ASIC doesn't support DMUB. */
1527                 return 0;
1528         }
1529
1530         r = request_firmware_direct(&adev->dm.dmub_fw, fw_name_dmub, adev->dev);
1531         if (r) {
1532                 DRM_ERROR("DMUB firmware loading failed: %d\n", r);
1533                 return 0;
1534         }
1535
1536         r = amdgpu_ucode_validate(adev->dm.dmub_fw);
1537         if (r) {
1538                 DRM_ERROR("Couldn't validate DMUB firmware: %d\n", r);
1539                 return 0;
1540         }
1541
1542         hdr = (const struct dmcub_firmware_header_v1_0 *)adev->dm.dmub_fw->data;
1543
1544         if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
1545                 adev->firmware.ucode[AMDGPU_UCODE_ID_DMCUB].ucode_id =
1546                         AMDGPU_UCODE_ID_DMCUB;
1547                 adev->firmware.ucode[AMDGPU_UCODE_ID_DMCUB].fw =
1548                         adev->dm.dmub_fw;
1549                 adev->firmware.fw_size +=
1550                         ALIGN(le32_to_cpu(hdr->inst_const_bytes), PAGE_SIZE);
1551
1552                 DRM_INFO("Loading DMUB firmware via PSP: version=0x%08X\n",
1553                          adev->dm.dmcub_fw_version);
1554         }
1555
1556         adev->dm.dmcub_fw_version = le32_to_cpu(hdr->header.ucode_version);
1557
1558         adev->dm.dmub_srv = kzalloc(sizeof(*adev->dm.dmub_srv), GFP_KERNEL);
1559         dmub_srv = adev->dm.dmub_srv;
1560
1561         if (!dmub_srv) {
1562                 DRM_ERROR("Failed to allocate DMUB service!\n");
1563                 return -ENOMEM;
1564         }
1565
1566         memset(&create_params, 0, sizeof(create_params));
1567         create_params.user_ctx = adev;
1568         create_params.funcs.reg_read = amdgpu_dm_dmub_reg_read;
1569         create_params.funcs.reg_write = amdgpu_dm_dmub_reg_write;
1570         create_params.asic = dmub_asic;
1571
1572         /* Create the DMUB service. */
1573         status = dmub_srv_create(dmub_srv, &create_params);
1574         if (status != DMUB_STATUS_OK) {
1575                 DRM_ERROR("Error creating DMUB service: %d\n", status);
1576                 return -EINVAL;
1577         }
1578
1579         /* Calculate the size of all the regions for the DMUB service. */
1580         memset(&region_params, 0, sizeof(region_params));
1581
1582         region_params.inst_const_size = le32_to_cpu(hdr->inst_const_bytes) -
1583                                         PSP_HEADER_BYTES - PSP_FOOTER_BYTES;
1584         region_params.bss_data_size = le32_to_cpu(hdr->bss_data_bytes);
1585         region_params.vbios_size = adev->bios_size;
1586         region_params.fw_bss_data = region_params.bss_data_size ?
1587                 adev->dm.dmub_fw->data +
1588                 le32_to_cpu(hdr->header.ucode_array_offset_bytes) +
1589                 le32_to_cpu(hdr->inst_const_bytes) : NULL;
1590         region_params.fw_inst_const =
1591                 adev->dm.dmub_fw->data +
1592                 le32_to_cpu(hdr->header.ucode_array_offset_bytes) +
1593                 PSP_HEADER_BYTES;
1594
1595         status = dmub_srv_calc_region_info(dmub_srv, &region_params,
1596                                            &region_info);
1597
1598         if (status != DMUB_STATUS_OK) {
1599                 DRM_ERROR("Error calculating DMUB region info: %d\n", status);
1600                 return -EINVAL;
1601         }
1602
1603         /*
1604          * Allocate a framebuffer based on the total size of all the regions.
1605          * TODO: Move this into GART.
1606          */
1607         r = amdgpu_bo_create_kernel(adev, region_info.fb_size, PAGE_SIZE,
1608                                     AMDGPU_GEM_DOMAIN_VRAM, &adev->dm.dmub_bo,
1609                                     &adev->dm.dmub_bo_gpu_addr,
1610                                     &adev->dm.dmub_bo_cpu_addr);
1611         if (r)
1612                 return r;
1613
1614         /* Rebase the regions on the framebuffer address. */
1615         memset(&fb_params, 0, sizeof(fb_params));
1616         fb_params.cpu_addr = adev->dm.dmub_bo_cpu_addr;
1617         fb_params.gpu_addr = adev->dm.dmub_bo_gpu_addr;
1618         fb_params.region_info = &region_info;
1619
1620         adev->dm.dmub_fb_info =
1621                 kzalloc(sizeof(*adev->dm.dmub_fb_info), GFP_KERNEL);
1622         fb_info = adev->dm.dmub_fb_info;
1623
1624         if (!fb_info) {
1625                 DRM_ERROR(
1626                         "Failed to allocate framebuffer info for DMUB service!\n");
1627                 return -ENOMEM;
1628         }
1629
1630         status = dmub_srv_calc_fb_info(dmub_srv, &fb_params, fb_info);
1631         if (status != DMUB_STATUS_OK) {
1632                 DRM_ERROR("Error calculating DMUB FB info: %d\n", status);
1633                 return -EINVAL;
1634         }
1635
1636         return 0;
1637 }
1638
1639 static int dm_sw_init(void *handle)
1640 {
1641         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1642         int r;
1643
1644         r = dm_dmub_sw_init(adev);
1645         if (r)
1646                 return r;
1647
1648         return load_dmcu_fw(adev);
1649 }
1650
1651 static int dm_sw_fini(void *handle)
1652 {
1653         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1654
1655         kfree(adev->dm.dmub_fb_info);
1656         adev->dm.dmub_fb_info = NULL;
1657
1658         if (adev->dm.dmub_srv) {
1659                 dmub_srv_destroy(adev->dm.dmub_srv);
1660                 adev->dm.dmub_srv = NULL;
1661         }
1662
1663         release_firmware(adev->dm.dmub_fw);
1664         adev->dm.dmub_fw = NULL;
1665
1666         release_firmware(adev->dm.fw_dmcu);
1667         adev->dm.fw_dmcu = NULL;
1668
1669         return 0;
1670 }
1671
1672 static int detect_mst_link_for_all_connectors(struct drm_device *dev)
1673 {
1674         struct amdgpu_dm_connector *aconnector;
1675         struct drm_connector *connector;
1676         struct drm_connector_list_iter iter;
1677         int ret = 0;
1678
1679         drm_connector_list_iter_begin(dev, &iter);
1680         drm_for_each_connector_iter(connector, &iter) {
1681                 aconnector = to_amdgpu_dm_connector(connector);
1682                 if (aconnector->dc_link->type == dc_connection_mst_branch &&
1683                     aconnector->mst_mgr.aux) {
1684                         DRM_DEBUG_DRIVER("DM_MST: starting TM on aconnector: %p [id: %d]\n",
1685                                          aconnector,
1686                                          aconnector->base.base.id);
1687
1688                         ret = drm_dp_mst_topology_mgr_set_mst(&aconnector->mst_mgr, true);
1689                         if (ret < 0) {
1690                                 DRM_ERROR("DM_MST: Failed to start MST\n");
1691                                 aconnector->dc_link->type =
1692                                         dc_connection_single;
1693                                 break;
1694                         }
1695                 }
1696         }
1697         drm_connector_list_iter_end(&iter);
1698
1699         return ret;
1700 }
1701
1702 static int dm_late_init(void *handle)
1703 {
1704         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1705
1706         struct dmcu_iram_parameters params;
1707         unsigned int linear_lut[16];
1708         int i;
1709         struct dmcu *dmcu = NULL;
1710
1711         dmcu = adev->dm.dc->res_pool->dmcu;
1712
1713         for (i = 0; i < 16; i++)
1714                 linear_lut[i] = 0xFFFF * i / 15;
1715
1716         params.set = 0;
1717         params.backlight_ramping_start = 0xCCCC;
1718         params.backlight_ramping_reduction = 0xCCCCCCCC;
1719         params.backlight_lut_array_size = 16;
1720         params.backlight_lut_array = linear_lut;
1721
1722         /* Min backlight level after ABM reduction,  Don't allow below 1%
1723          * 0xFFFF x 0.01 = 0x28F
1724          */
1725         params.min_abm_backlight = 0x28F;
1726         /* In the case where abm is implemented on dmcub,
1727         * dmcu object will be null.
1728         * ABM 2.4 and up are implemented on dmcub.
1729         */
1730         if (dmcu) {
1731                 if (!dmcu_load_iram(dmcu, params))
1732                         return -EINVAL;
1733         } else if (adev->dm.dc->ctx->dmub_srv) {
1734                 struct dc_link *edp_links[MAX_NUM_EDP];
1735                 int edp_num;
1736
1737                 get_edp_links(adev->dm.dc, edp_links, &edp_num);
1738                 for (i = 0; i < edp_num; i++) {
1739                         if (!dmub_init_abm_config(adev->dm.dc->res_pool, params, i))
1740                                 return -EINVAL;
1741                 }
1742         }
1743
1744         return detect_mst_link_for_all_connectors(adev_to_drm(adev));
1745 }
1746
1747 static void s3_handle_mst(struct drm_device *dev, bool suspend)
1748 {
1749         struct amdgpu_dm_connector *aconnector;
1750         struct drm_connector *connector;
1751         struct drm_connector_list_iter iter;
1752         struct drm_dp_mst_topology_mgr *mgr;
1753         int ret;
1754         bool need_hotplug = false;
1755
1756         drm_connector_list_iter_begin(dev, &iter);
1757         drm_for_each_connector_iter(connector, &iter) {
1758                 aconnector = to_amdgpu_dm_connector(connector);
1759                 if (aconnector->dc_link->type != dc_connection_mst_branch ||
1760                     aconnector->mst_port)
1761                         continue;
1762
1763                 mgr = &aconnector->mst_mgr;
1764
1765                 if (suspend) {
1766                         drm_dp_mst_topology_mgr_suspend(mgr);
1767                 } else {
1768                         ret = drm_dp_mst_topology_mgr_resume(mgr, true);
1769                         if (ret < 0) {
1770                                 drm_dp_mst_topology_mgr_set_mst(mgr, false);
1771                                 need_hotplug = true;
1772                         }
1773                 }
1774         }
1775         drm_connector_list_iter_end(&iter);
1776
1777         if (need_hotplug)
1778                 drm_kms_helper_hotplug_event(dev);
1779 }
1780
1781 static int amdgpu_dm_smu_write_watermarks_table(struct amdgpu_device *adev)
1782 {
1783         struct smu_context *smu = &adev->smu;
1784         int ret = 0;
1785
1786         if (!is_support_sw_smu(adev))
1787                 return 0;
1788
1789         /* This interface is for dGPU Navi1x.Linux dc-pplib interface depends
1790          * on window driver dc implementation.
1791          * For Navi1x, clock settings of dcn watermarks are fixed. the settings
1792          * should be passed to smu during boot up and resume from s3.
1793          * boot up: dc calculate dcn watermark clock settings within dc_create,
1794          * dcn20_resource_construct
1795          * then call pplib functions below to pass the settings to smu:
1796          * smu_set_watermarks_for_clock_ranges
1797          * smu_set_watermarks_table
1798          * navi10_set_watermarks_table
1799          * smu_write_watermarks_table
1800          *
1801          * For Renoir, clock settings of dcn watermark are also fixed values.
1802          * dc has implemented different flow for window driver:
1803          * dc_hardware_init / dc_set_power_state
1804          * dcn10_init_hw
1805          * notify_wm_ranges
1806          * set_wm_ranges
1807          * -- Linux
1808          * smu_set_watermarks_for_clock_ranges
1809          * renoir_set_watermarks_table
1810          * smu_write_watermarks_table
1811          *
1812          * For Linux,
1813          * dc_hardware_init -> amdgpu_dm_init
1814          * dc_set_power_state --> dm_resume
1815          *
1816          * therefore, this function apply to navi10/12/14 but not Renoir
1817          * *
1818          */
1819         switch(adev->asic_type) {
1820         case CHIP_NAVI10:
1821         case CHIP_NAVI14:
1822         case CHIP_NAVI12:
1823                 break;
1824         default:
1825                 return 0;
1826         }
1827
1828         ret = smu_write_watermarks_table(smu);
1829         if (ret) {
1830                 DRM_ERROR("Failed to update WMTABLE!\n");
1831                 return ret;
1832         }
1833
1834         return 0;
1835 }
1836
1837 /**
1838  * dm_hw_init() - Initialize DC device
1839  * @handle: The base driver device containing the amdgpu_dm device.
1840  *
1841  * Initialize the &struct amdgpu_display_manager device. This involves calling
1842  * the initializers of each DM component, then populating the struct with them.
1843  *
1844  * Although the function implies hardware initialization, both hardware and
1845  * software are initialized here. Splitting them out to their relevant init
1846  * hooks is a future TODO item.
1847  *
1848  * Some notable things that are initialized here:
1849  *
1850  * - Display Core, both software and hardware
1851  * - DC modules that we need (freesync and color management)
1852  * - DRM software states
1853  * - Interrupt sources and handlers
1854  * - Vblank support
1855  * - Debug FS entries, if enabled
1856  */
1857 static int dm_hw_init(void *handle)
1858 {
1859         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1860         /* Create DAL display manager */
1861         amdgpu_dm_init(adev);
1862         amdgpu_dm_hpd_init(adev);
1863
1864         return 0;
1865 }
1866
1867 /**
1868  * dm_hw_fini() - Teardown DC device
1869  * @handle: The base driver device containing the amdgpu_dm device.
1870  *
1871  * Teardown components within &struct amdgpu_display_manager that require
1872  * cleanup. This involves cleaning up the DRM device, DC, and any modules that
1873  * were loaded. Also flush IRQ workqueues and disable them.
1874  */
1875 static int dm_hw_fini(void *handle)
1876 {
1877         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1878
1879         amdgpu_dm_hpd_fini(adev);
1880
1881         amdgpu_dm_irq_fini(adev);
1882         amdgpu_dm_fini(adev);
1883         return 0;
1884 }
1885
1886
1887 static int dm_enable_vblank(struct drm_crtc *crtc);
1888 static void dm_disable_vblank(struct drm_crtc *crtc);
1889
1890 static void dm_gpureset_toggle_interrupts(struct amdgpu_device *adev,
1891                                  struct dc_state *state, bool enable)
1892 {
1893         enum dc_irq_source irq_source;
1894         struct amdgpu_crtc *acrtc;
1895         int rc = -EBUSY;
1896         int i = 0;
1897
1898         for (i = 0; i < state->stream_count; i++) {
1899                 acrtc = get_crtc_by_otg_inst(
1900                                 adev, state->stream_status[i].primary_otg_inst);
1901
1902                 if (acrtc && state->stream_status[i].plane_count != 0) {
1903                         irq_source = IRQ_TYPE_PFLIP + acrtc->otg_inst;
1904                         rc = dc_interrupt_set(adev->dm.dc, irq_source, enable) ? 0 : -EBUSY;
1905                         DRM_DEBUG_VBL("crtc %d - vupdate irq %sabling: r=%d\n",
1906                                       acrtc->crtc_id, enable ? "en" : "dis", rc);
1907                         if (rc)
1908                                 DRM_WARN("Failed to %s pflip interrupts\n",
1909                                          enable ? "enable" : "disable");
1910
1911                         if (enable) {
1912                                 rc = dm_enable_vblank(&acrtc->base);
1913                                 if (rc)
1914                                         DRM_WARN("Failed to enable vblank interrupts\n");
1915                         } else {
1916                                 dm_disable_vblank(&acrtc->base);
1917                         }
1918
1919                 }
1920         }
1921
1922 }
1923
1924 static enum dc_status amdgpu_dm_commit_zero_streams(struct dc *dc)
1925 {
1926         struct dc_state *context = NULL;
1927         enum dc_status res = DC_ERROR_UNEXPECTED;
1928         int i;
1929         struct dc_stream_state *del_streams[MAX_PIPES];
1930         int del_streams_count = 0;
1931
1932         memset(del_streams, 0, sizeof(del_streams));
1933
1934         context = dc_create_state(dc);
1935         if (context == NULL)
1936                 goto context_alloc_fail;
1937
1938         dc_resource_state_copy_construct_current(dc, context);
1939
1940         /* First remove from context all streams */
1941         for (i = 0; i < context->stream_count; i++) {
1942                 struct dc_stream_state *stream = context->streams[i];
1943
1944                 del_streams[del_streams_count++] = stream;
1945         }
1946
1947         /* Remove all planes for removed streams and then remove the streams */
1948         for (i = 0; i < del_streams_count; i++) {
1949                 if (!dc_rem_all_planes_for_stream(dc, del_streams[i], context)) {
1950                         res = DC_FAIL_DETACH_SURFACES;
1951                         goto fail;
1952                 }
1953
1954                 res = dc_remove_stream_from_ctx(dc, context, del_streams[i]);
1955                 if (res != DC_OK)
1956                         goto fail;
1957         }
1958
1959
1960         res = dc_validate_global_state(dc, context, false);
1961
1962         if (res != DC_OK) {
1963                 DRM_ERROR("%s:resource validation failed, dc_status:%d\n", __func__, res);
1964                 goto fail;
1965         }
1966
1967         res = dc_commit_state(dc, context);
1968
1969 fail:
1970         dc_release_state(context);
1971
1972 context_alloc_fail:
1973         return res;
1974 }
1975
1976 static int dm_suspend(void *handle)
1977 {
1978         struct amdgpu_device *adev = handle;
1979         struct amdgpu_display_manager *dm = &adev->dm;
1980         int ret = 0;
1981
1982         if (amdgpu_in_reset(adev)) {
1983                 mutex_lock(&dm->dc_lock);
1984
1985 #if defined(CONFIG_DRM_AMD_DC_DCN)
1986                 dc_allow_idle_optimizations(adev->dm.dc, false);
1987 #endif
1988
1989                 dm->cached_dc_state = dc_copy_state(dm->dc->current_state);
1990
1991                 dm_gpureset_toggle_interrupts(adev, dm->cached_dc_state, false);
1992
1993                 amdgpu_dm_commit_zero_streams(dm->dc);
1994
1995                 amdgpu_dm_irq_suspend(adev);
1996
1997                 return ret;
1998         }
1999
2000         WARN_ON(adev->dm.cached_state);
2001         adev->dm.cached_state = drm_atomic_helper_suspend(adev_to_drm(adev));
2002
2003         s3_handle_mst(adev_to_drm(adev), true);
2004
2005         amdgpu_dm_irq_suspend(adev);
2006
2007         dc_set_power_state(dm->dc, DC_ACPI_CM_POWER_STATE_D3);
2008
2009         return 0;
2010 }
2011
2012 static struct amdgpu_dm_connector *
2013 amdgpu_dm_find_first_crtc_matching_connector(struct drm_atomic_state *state,
2014                                              struct drm_crtc *crtc)
2015 {
2016         uint32_t i;
2017         struct drm_connector_state *new_con_state;
2018         struct drm_connector *connector;
2019         struct drm_crtc *crtc_from_state;
2020
2021         for_each_new_connector_in_state(state, connector, new_con_state, i) {
2022                 crtc_from_state = new_con_state->crtc;
2023
2024                 if (crtc_from_state == crtc)
2025                         return to_amdgpu_dm_connector(connector);
2026         }
2027
2028         return NULL;
2029 }
2030
2031 static void emulated_link_detect(struct dc_link *link)
2032 {
2033         struct dc_sink_init_data sink_init_data = { 0 };
2034         struct display_sink_capability sink_caps = { 0 };
2035         enum dc_edid_status edid_status;
2036         struct dc_context *dc_ctx = link->ctx;
2037         struct dc_sink *sink = NULL;
2038         struct dc_sink *prev_sink = NULL;
2039
2040         link->type = dc_connection_none;
2041         prev_sink = link->local_sink;
2042
2043         if (prev_sink)
2044                 dc_sink_release(prev_sink);
2045
2046         switch (link->connector_signal) {
2047         case SIGNAL_TYPE_HDMI_TYPE_A: {
2048                 sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C;
2049                 sink_caps.signal = SIGNAL_TYPE_HDMI_TYPE_A;
2050                 break;
2051         }
2052
2053         case SIGNAL_TYPE_DVI_SINGLE_LINK: {
2054                 sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C;
2055                 sink_caps.signal = SIGNAL_TYPE_DVI_SINGLE_LINK;
2056                 break;
2057         }
2058
2059         case SIGNAL_TYPE_DVI_DUAL_LINK: {
2060                 sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C;
2061                 sink_caps.signal = SIGNAL_TYPE_DVI_DUAL_LINK;
2062                 break;
2063         }
2064
2065         case SIGNAL_TYPE_LVDS: {
2066                 sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C;
2067                 sink_caps.signal = SIGNAL_TYPE_LVDS;
2068                 break;
2069         }
2070
2071         case SIGNAL_TYPE_EDP: {
2072                 sink_caps.transaction_type =
2073                         DDC_TRANSACTION_TYPE_I2C_OVER_AUX;
2074                 sink_caps.signal = SIGNAL_TYPE_EDP;
2075                 break;
2076         }
2077
2078         case SIGNAL_TYPE_DISPLAY_PORT: {
2079                 sink_caps.transaction_type =
2080                         DDC_TRANSACTION_TYPE_I2C_OVER_AUX;
2081                 sink_caps.signal = SIGNAL_TYPE_VIRTUAL;
2082                 break;
2083         }
2084
2085         default:
2086                 DC_ERROR("Invalid connector type! signal:%d\n",
2087                         link->connector_signal);
2088                 return;
2089         }
2090
2091         sink_init_data.link = link;
2092         sink_init_data.sink_signal = sink_caps.signal;
2093
2094         sink = dc_sink_create(&sink_init_data);
2095         if (!sink) {
2096                 DC_ERROR("Failed to create sink!\n");
2097                 return;
2098         }
2099
2100         /* dc_sink_create returns a new reference */
2101         link->local_sink = sink;
2102
2103         edid_status = dm_helpers_read_local_edid(
2104                         link->ctx,
2105                         link,
2106                         sink);
2107
2108         if (edid_status != EDID_OK)
2109                 DC_ERROR("Failed to read EDID");
2110
2111 }
2112
2113 static void dm_gpureset_commit_state(struct dc_state *dc_state,
2114                                      struct amdgpu_display_manager *dm)
2115 {
2116         struct {
2117                 struct dc_surface_update surface_updates[MAX_SURFACES];
2118                 struct dc_plane_info plane_infos[MAX_SURFACES];
2119                 struct dc_scaling_info scaling_infos[MAX_SURFACES];
2120                 struct dc_flip_addrs flip_addrs[MAX_SURFACES];
2121                 struct dc_stream_update stream_update;
2122         } * bundle;
2123         int k, m;
2124
2125         bundle = kzalloc(sizeof(*bundle), GFP_KERNEL);
2126
2127         if (!bundle) {
2128                 dm_error("Failed to allocate update bundle\n");
2129                 goto cleanup;
2130         }
2131
2132         for (k = 0; k < dc_state->stream_count; k++) {
2133                 bundle->stream_update.stream = dc_state->streams[k];
2134
2135                 for (m = 0; m < dc_state->stream_status->plane_count; m++) {
2136                         bundle->surface_updates[m].surface =
2137                                 dc_state->stream_status->plane_states[m];
2138                         bundle->surface_updates[m].surface->force_full_update =
2139                                 true;
2140                 }
2141                 dc_commit_updates_for_stream(
2142                         dm->dc, bundle->surface_updates,
2143                         dc_state->stream_status->plane_count,
2144                         dc_state->streams[k], &bundle->stream_update, dc_state);
2145         }
2146
2147 cleanup:
2148         kfree(bundle);
2149
2150         return;
2151 }
2152
2153 static void dm_set_dpms_off(struct dc_link *link)
2154 {
2155         struct dc_stream_state *stream_state;
2156         struct amdgpu_dm_connector *aconnector = link->priv;
2157         struct amdgpu_device *adev = drm_to_adev(aconnector->base.dev);
2158         struct dc_stream_update stream_update;
2159         bool dpms_off = true;
2160
2161         memset(&stream_update, 0, sizeof(stream_update));
2162         stream_update.dpms_off = &dpms_off;
2163
2164         mutex_lock(&adev->dm.dc_lock);
2165         stream_state = dc_stream_find_from_link(link);
2166
2167         if (stream_state == NULL) {
2168                 DRM_DEBUG_DRIVER("Error finding stream state associated with link!\n");
2169                 mutex_unlock(&adev->dm.dc_lock);
2170                 return;
2171         }
2172
2173         stream_update.stream = stream_state;
2174         dc_commit_updates_for_stream(stream_state->ctx->dc, NULL, 0,
2175                                      stream_state, &stream_update,
2176                                      stream_state->ctx->dc->current_state);
2177         mutex_unlock(&adev->dm.dc_lock);
2178 }
2179
2180 static int dm_resume(void *handle)
2181 {
2182         struct amdgpu_device *adev = handle;
2183         struct drm_device *ddev = adev_to_drm(adev);
2184         struct amdgpu_display_manager *dm = &adev->dm;
2185         struct amdgpu_dm_connector *aconnector;
2186         struct drm_connector *connector;
2187         struct drm_connector_list_iter iter;
2188         struct drm_crtc *crtc;
2189         struct drm_crtc_state *new_crtc_state;
2190         struct dm_crtc_state *dm_new_crtc_state;
2191         struct drm_plane *plane;
2192         struct drm_plane_state *new_plane_state;
2193         struct dm_plane_state *dm_new_plane_state;
2194         struct dm_atomic_state *dm_state = to_dm_atomic_state(dm->atomic_obj.state);
2195         enum dc_connection_type new_connection_type = dc_connection_none;
2196         struct dc_state *dc_state;
2197         int i, r, j;
2198
2199         if (amdgpu_in_reset(adev)) {
2200                 dc_state = dm->cached_dc_state;
2201
2202                 r = dm_dmub_hw_init(adev);
2203                 if (r)
2204                         DRM_ERROR("DMUB interface failed to initialize: status=%d\n", r);
2205
2206                 dc_set_power_state(dm->dc, DC_ACPI_CM_POWER_STATE_D0);
2207                 dc_resume(dm->dc);
2208
2209                 amdgpu_dm_irq_resume_early(adev);
2210
2211                 for (i = 0; i < dc_state->stream_count; i++) {
2212                         dc_state->streams[i]->mode_changed = true;
2213                         for (j = 0; j < dc_state->stream_status->plane_count; j++) {
2214                                 dc_state->stream_status->plane_states[j]->update_flags.raw
2215                                         = 0xffffffff;
2216                         }
2217                 }
2218
2219                 WARN_ON(!dc_commit_state(dm->dc, dc_state));
2220
2221                 dm_gpureset_commit_state(dm->cached_dc_state, dm);
2222
2223                 dm_gpureset_toggle_interrupts(adev, dm->cached_dc_state, true);
2224
2225                 dc_release_state(dm->cached_dc_state);
2226                 dm->cached_dc_state = NULL;
2227
2228                 amdgpu_dm_irq_resume_late(adev);
2229
2230                 mutex_unlock(&dm->dc_lock);
2231
2232                 return 0;
2233         }
2234         /* Recreate dc_state - DC invalidates it when setting power state to S3. */
2235         dc_release_state(dm_state->context);
2236         dm_state->context = dc_create_state(dm->dc);
2237         /* TODO: Remove dc_state->dccg, use dc->dccg directly. */
2238         dc_resource_state_construct(dm->dc, dm_state->context);
2239
2240         /* Before powering on DC we need to re-initialize DMUB. */
2241         r = dm_dmub_hw_init(adev);
2242         if (r)
2243                 DRM_ERROR("DMUB interface failed to initialize: status=%d\n", r);
2244
2245         /* power on hardware */
2246         dc_set_power_state(dm->dc, DC_ACPI_CM_POWER_STATE_D0);
2247
2248         /* program HPD filter */
2249         dc_resume(dm->dc);
2250
2251         /*
2252          * early enable HPD Rx IRQ, should be done before set mode as short
2253          * pulse interrupts are used for MST
2254          */
2255         amdgpu_dm_irq_resume_early(adev);
2256
2257         /* On resume we need to rewrite the MSTM control bits to enable MST*/
2258         s3_handle_mst(ddev, false);
2259
2260         /* Do detection*/
2261         drm_connector_list_iter_begin(ddev, &iter);
2262         drm_for_each_connector_iter(connector, &iter) {
2263                 aconnector = to_amdgpu_dm_connector(connector);
2264
2265                 /*
2266                  * this is the case when traversing through already created
2267                  * MST connectors, should be skipped
2268                  */
2269                 if (aconnector->mst_port)
2270                         continue;
2271
2272                 mutex_lock(&aconnector->hpd_lock);
2273                 if (!dc_link_detect_sink(aconnector->dc_link, &new_connection_type))
2274                         DRM_ERROR("KMS: Failed to detect connector\n");
2275
2276                 if (aconnector->base.force && new_connection_type == dc_connection_none)
2277                         emulated_link_detect(aconnector->dc_link);
2278                 else
2279                         dc_link_detect(aconnector->dc_link, DETECT_REASON_HPD);
2280
2281                 if (aconnector->fake_enable && aconnector->dc_link->local_sink)
2282                         aconnector->fake_enable = false;
2283
2284                 if (aconnector->dc_sink)
2285                         dc_sink_release(aconnector->dc_sink);
2286                 aconnector->dc_sink = NULL;
2287                 amdgpu_dm_update_connector_after_detect(aconnector);
2288                 mutex_unlock(&aconnector->hpd_lock);
2289         }
2290         drm_connector_list_iter_end(&iter);
2291
2292         /* Force mode set in atomic commit */
2293         for_each_new_crtc_in_state(dm->cached_state, crtc, new_crtc_state, i)
2294                 new_crtc_state->active_changed = true;
2295
2296         /*
2297          * atomic_check is expected to create the dc states. We need to release
2298          * them here, since they were duplicated as part of the suspend
2299          * procedure.
2300          */
2301         for_each_new_crtc_in_state(dm->cached_state, crtc, new_crtc_state, i) {
2302                 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
2303                 if (dm_new_crtc_state->stream) {
2304                         WARN_ON(kref_read(&dm_new_crtc_state->stream->refcount) > 1);
2305                         dc_stream_release(dm_new_crtc_state->stream);
2306                         dm_new_crtc_state->stream = NULL;
2307                 }
2308         }
2309
2310         for_each_new_plane_in_state(dm->cached_state, plane, new_plane_state, i) {
2311                 dm_new_plane_state = to_dm_plane_state(new_plane_state);
2312                 if (dm_new_plane_state->dc_state) {
2313                         WARN_ON(kref_read(&dm_new_plane_state->dc_state->refcount) > 1);
2314                         dc_plane_state_release(dm_new_plane_state->dc_state);
2315                         dm_new_plane_state->dc_state = NULL;
2316                 }
2317         }
2318
2319         drm_atomic_helper_resume(ddev, dm->cached_state);
2320
2321         dm->cached_state = NULL;
2322
2323         amdgpu_dm_irq_resume_late(adev);
2324
2325         amdgpu_dm_smu_write_watermarks_table(adev);
2326
2327         return 0;
2328 }
2329
2330 /**
2331  * DOC: DM Lifecycle
2332  *
2333  * DM (and consequently DC) is registered in the amdgpu base driver as a IP
2334  * block. When CONFIG_DRM_AMD_DC is enabled, the DM device IP block is added to
2335  * the base driver's device list to be initialized and torn down accordingly.
2336  *
2337  * The functions to do so are provided as hooks in &struct amd_ip_funcs.
2338  */
2339
2340 static const struct amd_ip_funcs amdgpu_dm_funcs = {
2341         .name = "dm",
2342         .early_init = dm_early_init,
2343         .late_init = dm_late_init,
2344         .sw_init = dm_sw_init,
2345         .sw_fini = dm_sw_fini,
2346         .hw_init = dm_hw_init,
2347         .hw_fini = dm_hw_fini,
2348         .suspend = dm_suspend,
2349         .resume = dm_resume,
2350         .is_idle = dm_is_idle,
2351         .wait_for_idle = dm_wait_for_idle,
2352         .check_soft_reset = dm_check_soft_reset,
2353         .soft_reset = dm_soft_reset,
2354         .set_clockgating_state = dm_set_clockgating_state,
2355         .set_powergating_state = dm_set_powergating_state,
2356 };
2357
2358 const struct amdgpu_ip_block_version dm_ip_block =
2359 {
2360         .type = AMD_IP_BLOCK_TYPE_DCE,
2361         .major = 1,
2362         .minor = 0,
2363         .rev = 0,
2364         .funcs = &amdgpu_dm_funcs,
2365 };
2366
2367
2368 /**
2369  * DOC: atomic
2370  *
2371  * *WIP*
2372  */
2373
2374 static const struct drm_mode_config_funcs amdgpu_dm_mode_funcs = {
2375         .fb_create = amdgpu_display_user_framebuffer_create,
2376         .get_format_info = amd_get_format_info,
2377         .output_poll_changed = drm_fb_helper_output_poll_changed,
2378         .atomic_check = amdgpu_dm_atomic_check,
2379         .atomic_commit = drm_atomic_helper_commit,
2380 };
2381
2382 static struct drm_mode_config_helper_funcs amdgpu_dm_mode_config_helperfuncs = {
2383         .atomic_commit_tail = amdgpu_dm_atomic_commit_tail
2384 };
2385
2386 static void update_connector_ext_caps(struct amdgpu_dm_connector *aconnector)
2387 {
2388         u32 max_cll, min_cll, max, min, q, r;
2389         struct amdgpu_dm_backlight_caps *caps;
2390         struct amdgpu_display_manager *dm;
2391         struct drm_connector *conn_base;
2392         struct amdgpu_device *adev;
2393         struct dc_link *link = NULL;
2394         static const u8 pre_computed_values[] = {
2395                 50, 51, 52, 53, 55, 56, 57, 58, 59, 61, 62, 63, 65, 66, 68, 69,
2396                 71, 72, 74, 75, 77, 79, 81, 82, 84, 86, 88, 90, 92, 94, 96, 98};
2397
2398         if (!aconnector || !aconnector->dc_link)
2399                 return;
2400
2401         link = aconnector->dc_link;
2402         if (link->connector_signal != SIGNAL_TYPE_EDP)
2403                 return;
2404
2405         conn_base = &aconnector->base;
2406         adev = drm_to_adev(conn_base->dev);
2407         dm = &adev->dm;
2408         caps = &dm->backlight_caps;
2409         caps->ext_caps = &aconnector->dc_link->dpcd_sink_ext_caps;
2410         caps->aux_support = false;
2411         max_cll = conn_base->hdr_sink_metadata.hdmi_type1.max_cll;
2412         min_cll = conn_base->hdr_sink_metadata.hdmi_type1.min_cll;
2413
2414         if (caps->ext_caps->bits.oled == 1 ||
2415             caps->ext_caps->bits.sdr_aux_backlight_control == 1 ||
2416             caps->ext_caps->bits.hdr_aux_backlight_control == 1)
2417                 caps->aux_support = true;
2418
2419         if (amdgpu_backlight == 0)
2420                 caps->aux_support = false;
2421         else if (amdgpu_backlight == 1)
2422                 caps->aux_support = true;
2423
2424         /* From the specification (CTA-861-G), for calculating the maximum
2425          * luminance we need to use:
2426          *      Luminance = 50*2**(CV/32)
2427          * Where CV is a one-byte value.
2428          * For calculating this expression we may need float point precision;
2429          * to avoid this complexity level, we take advantage that CV is divided
2430          * by a constant. From the Euclids division algorithm, we know that CV
2431          * can be written as: CV = 32*q + r. Next, we replace CV in the
2432          * Luminance expression and get 50*(2**q)*(2**(r/32)), hence we just
2433          * need to pre-compute the value of r/32. For pre-computing the values
2434          * We just used the following Ruby line:
2435          *      (0...32).each {|cv| puts (50*2**(cv/32.0)).round}
2436          * The results of the above expressions can be verified at
2437          * pre_computed_values.
2438          */
2439         q = max_cll >> 5;
2440         r = max_cll % 32;
2441         max = (1 << q) * pre_computed_values[r];
2442
2443         // min luminance: maxLum * (CV/255)^2 / 100
2444         q = DIV_ROUND_CLOSEST(min_cll, 255);
2445         min = max * DIV_ROUND_CLOSEST((q * q), 100);
2446
2447         caps->aux_max_input_signal = max;
2448         caps->aux_min_input_signal = min;
2449 }
2450
2451 void amdgpu_dm_update_connector_after_detect(
2452                 struct amdgpu_dm_connector *aconnector)
2453 {
2454         struct drm_connector *connector = &aconnector->base;
2455         struct drm_device *dev = connector->dev;
2456         struct dc_sink *sink;
2457
2458         /* MST handled by drm_mst framework */
2459         if (aconnector->mst_mgr.mst_state == true)
2460                 return;
2461
2462         sink = aconnector->dc_link->local_sink;
2463         if (sink)
2464                 dc_sink_retain(sink);
2465
2466         /*
2467          * Edid mgmt connector gets first update only in mode_valid hook and then
2468          * the connector sink is set to either fake or physical sink depends on link status.
2469          * Skip if already done during boot.
2470          */
2471         if (aconnector->base.force != DRM_FORCE_UNSPECIFIED
2472                         && aconnector->dc_em_sink) {
2473
2474                 /*
2475                  * For S3 resume with headless use eml_sink to fake stream
2476                  * because on resume connector->sink is set to NULL
2477                  */
2478                 mutex_lock(&dev->mode_config.mutex);
2479
2480                 if (sink) {
2481                         if (aconnector->dc_sink) {
2482                                 amdgpu_dm_update_freesync_caps(connector, NULL);
2483                                 /*
2484                                  * retain and release below are used to
2485                                  * bump up refcount for sink because the link doesn't point
2486                                  * to it anymore after disconnect, so on next crtc to connector
2487                                  * reshuffle by UMD we will get into unwanted dc_sink release
2488                                  */
2489                                 dc_sink_release(aconnector->dc_sink);
2490                         }
2491                         aconnector->dc_sink = sink;
2492                         dc_sink_retain(aconnector->dc_sink);
2493                         amdgpu_dm_update_freesync_caps(connector,
2494                                         aconnector->edid);
2495                 } else {
2496                         amdgpu_dm_update_freesync_caps(connector, NULL);
2497                         if (!aconnector->dc_sink) {
2498                                 aconnector->dc_sink = aconnector->dc_em_sink;
2499                                 dc_sink_retain(aconnector->dc_sink);
2500                         }
2501                 }
2502
2503                 mutex_unlock(&dev->mode_config.mutex);
2504
2505                 if (sink)
2506                         dc_sink_release(sink);
2507                 return;
2508         }
2509
2510         /*
2511          * TODO: temporary guard to look for proper fix
2512          * if this sink is MST sink, we should not do anything
2513          */
2514         if (sink && sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT_MST) {
2515                 dc_sink_release(sink);
2516                 return;
2517         }
2518
2519         if (aconnector->dc_sink == sink) {
2520                 /*
2521                  * We got a DP short pulse (Link Loss, DP CTS, etc...).
2522                  * Do nothing!!
2523                  */
2524                 DRM_DEBUG_DRIVER("DCHPD: connector_id=%d: dc_sink didn't change.\n",
2525                                 aconnector->connector_id);
2526                 if (sink)
2527                         dc_sink_release(sink);
2528                 return;
2529         }
2530
2531         DRM_DEBUG_DRIVER("DCHPD: connector_id=%d: Old sink=%p New sink=%p\n",
2532                 aconnector->connector_id, aconnector->dc_sink, sink);
2533
2534         mutex_lock(&dev->mode_config.mutex);
2535
2536         /*
2537          * 1. Update status of the drm connector
2538          * 2. Send an event and let userspace tell us what to do
2539          */
2540         if (sink) {
2541                 /*
2542                  * TODO: check if we still need the S3 mode update workaround.
2543                  * If yes, put it here.
2544                  */
2545                 if (aconnector->dc_sink) {
2546                         amdgpu_dm_update_freesync_caps(connector, NULL);
2547                         dc_sink_release(aconnector->dc_sink);
2548                 }
2549
2550                 aconnector->dc_sink = sink;
2551                 dc_sink_retain(aconnector->dc_sink);
2552                 if (sink->dc_edid.length == 0) {
2553                         aconnector->edid = NULL;
2554                         if (aconnector->dc_link->aux_mode) {
2555                                 drm_dp_cec_unset_edid(
2556                                         &aconnector->dm_dp_aux.aux);
2557                         }
2558                 } else {
2559                         aconnector->edid =
2560                                 (struct edid *)sink->dc_edid.raw_edid;
2561
2562                         drm_connector_update_edid_property(connector,
2563                                                            aconnector->edid);
2564                         if (aconnector->dc_link->aux_mode)
2565                                 drm_dp_cec_set_edid(&aconnector->dm_dp_aux.aux,
2566                                                     aconnector->edid);
2567                 }
2568
2569                 amdgpu_dm_update_freesync_caps(connector, aconnector->edid);
2570                 update_connector_ext_caps(aconnector);
2571         } else {
2572                 drm_dp_cec_unset_edid(&aconnector->dm_dp_aux.aux);
2573                 amdgpu_dm_update_freesync_caps(connector, NULL);
2574                 drm_connector_update_edid_property(connector, NULL);
2575                 aconnector->num_modes = 0;
2576                 dc_sink_release(aconnector->dc_sink);
2577                 aconnector->dc_sink = NULL;
2578                 aconnector->edid = NULL;
2579 #ifdef CONFIG_DRM_AMD_DC_HDCP
2580                 /* Set CP to DESIRED if it was ENABLED, so we can re-enable it again on hotplug */
2581                 if (connector->state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED)
2582                         connector->state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED;
2583 #endif
2584         }
2585
2586         mutex_unlock(&dev->mode_config.mutex);
2587
2588         update_subconnector_property(aconnector);
2589
2590         if (sink)
2591                 dc_sink_release(sink);
2592 }
2593
2594 static void handle_hpd_irq(void *param)
2595 {
2596         struct amdgpu_dm_connector *aconnector = (struct amdgpu_dm_connector *)param;
2597         struct drm_connector *connector = &aconnector->base;
2598         struct drm_device *dev = connector->dev;
2599         enum dc_connection_type new_connection_type = dc_connection_none;
2600         struct amdgpu_device *adev = drm_to_adev(dev);
2601 #ifdef CONFIG_DRM_AMD_DC_HDCP
2602         struct dm_connector_state *dm_con_state = to_dm_connector_state(connector->state);
2603 #endif
2604
2605         if (adev->dm.disable_hpd_irq)
2606                 return;
2607
2608         /*
2609          * In case of failure or MST no need to update connector status or notify the OS
2610          * since (for MST case) MST does this in its own context.
2611          */
2612         mutex_lock(&aconnector->hpd_lock);
2613
2614 #ifdef CONFIG_DRM_AMD_DC_HDCP
2615         if (adev->dm.hdcp_workqueue) {
2616                 hdcp_reset_display(adev->dm.hdcp_workqueue, aconnector->dc_link->link_index);
2617                 dm_con_state->update_hdcp = true;
2618         }
2619 #endif
2620         if (aconnector->fake_enable)
2621                 aconnector->fake_enable = false;
2622
2623         if (!dc_link_detect_sink(aconnector->dc_link, &new_connection_type))
2624                 DRM_ERROR("KMS: Failed to detect connector\n");
2625
2626         if (aconnector->base.force && new_connection_type == dc_connection_none) {
2627                 emulated_link_detect(aconnector->dc_link);
2628
2629
2630                 drm_modeset_lock_all(dev);
2631                 dm_restore_drm_connector_state(dev, connector);
2632                 drm_modeset_unlock_all(dev);
2633
2634                 if (aconnector->base.force == DRM_FORCE_UNSPECIFIED)
2635                         drm_kms_helper_hotplug_event(dev);
2636
2637         } else if (dc_link_detect(aconnector->dc_link, DETECT_REASON_HPD)) {
2638                 if (new_connection_type == dc_connection_none &&
2639                     aconnector->dc_link->type == dc_connection_none)
2640                         dm_set_dpms_off(aconnector->dc_link);
2641
2642                 amdgpu_dm_update_connector_after_detect(aconnector);
2643
2644                 drm_modeset_lock_all(dev);
2645                 dm_restore_drm_connector_state(dev, connector);
2646                 drm_modeset_unlock_all(dev);
2647
2648                 if (aconnector->base.force == DRM_FORCE_UNSPECIFIED)
2649                         drm_kms_helper_hotplug_event(dev);
2650         }
2651         mutex_unlock(&aconnector->hpd_lock);
2652
2653 }
2654
2655 static void dm_handle_hpd_rx_irq(struct amdgpu_dm_connector *aconnector)
2656 {
2657         uint8_t esi[DP_PSR_ERROR_STATUS - DP_SINK_COUNT_ESI] = { 0 };
2658         uint8_t dret;
2659         bool new_irq_handled = false;
2660         int dpcd_addr;
2661         int dpcd_bytes_to_read;
2662
2663         const int max_process_count = 30;
2664         int process_count = 0;
2665
2666         const struct dc_link_status *link_status = dc_link_get_status(aconnector->dc_link);
2667
2668         if (link_status->dpcd_caps->dpcd_rev.raw < 0x12) {
2669                 dpcd_bytes_to_read = DP_LANE0_1_STATUS - DP_SINK_COUNT;
2670                 /* DPCD 0x200 - 0x201 for downstream IRQ */
2671                 dpcd_addr = DP_SINK_COUNT;
2672         } else {
2673                 dpcd_bytes_to_read = DP_PSR_ERROR_STATUS - DP_SINK_COUNT_ESI;
2674                 /* DPCD 0x2002 - 0x2005 for downstream IRQ */
2675                 dpcd_addr = DP_SINK_COUNT_ESI;
2676         }
2677
2678         dret = drm_dp_dpcd_read(
2679                 &aconnector->dm_dp_aux.aux,
2680                 dpcd_addr,
2681                 esi,
2682                 dpcd_bytes_to_read);
2683
2684         while (dret == dpcd_bytes_to_read &&
2685                 process_count < max_process_count) {
2686                 uint8_t retry;
2687                 dret = 0;
2688
2689                 process_count++;
2690
2691                 DRM_DEBUG_DRIVER("ESI %02x %02x %02x\n", esi[0], esi[1], esi[2]);
2692                 /* handle HPD short pulse irq */
2693                 if (aconnector->mst_mgr.mst_state)
2694                         drm_dp_mst_hpd_irq(
2695                                 &aconnector->mst_mgr,
2696                                 esi,
2697                                 &new_irq_handled);
2698
2699                 if (new_irq_handled) {
2700                         /* ACK at DPCD to notify down stream */
2701                         const int ack_dpcd_bytes_to_write =
2702                                 dpcd_bytes_to_read - 1;
2703
2704                         for (retry = 0; retry < 3; retry++) {
2705                                 uint8_t wret;
2706
2707                                 wret = drm_dp_dpcd_write(
2708                                         &aconnector->dm_dp_aux.aux,
2709                                         dpcd_addr + 1,
2710                                         &esi[1],
2711                                         ack_dpcd_bytes_to_write);
2712                                 if (wret == ack_dpcd_bytes_to_write)
2713                                         break;
2714                         }
2715
2716                         /* check if there is new irq to be handled */
2717                         dret = drm_dp_dpcd_read(
2718                                 &aconnector->dm_dp_aux.aux,
2719                                 dpcd_addr,
2720                                 esi,
2721                                 dpcd_bytes_to_read);
2722
2723                         new_irq_handled = false;
2724                 } else {
2725                         break;
2726                 }
2727         }
2728
2729         if (process_count == max_process_count)
2730                 DRM_DEBUG_DRIVER("Loop exceeded max iterations\n");
2731 }
2732
2733 static void handle_hpd_rx_irq(void *param)
2734 {
2735         struct amdgpu_dm_connector *aconnector = (struct amdgpu_dm_connector *)param;
2736         struct drm_connector *connector = &aconnector->base;
2737         struct drm_device *dev = connector->dev;
2738         struct dc_link *dc_link = aconnector->dc_link;
2739         bool is_mst_root_connector = aconnector->mst_mgr.mst_state;
2740         bool result = false;
2741         enum dc_connection_type new_connection_type = dc_connection_none;
2742         struct amdgpu_device *adev = drm_to_adev(dev);
2743         union hpd_irq_data hpd_irq_data;
2744         bool lock_flag = 0;
2745
2746         memset(&hpd_irq_data, 0, sizeof(hpd_irq_data));
2747
2748         if (adev->dm.disable_hpd_irq)
2749                 return;
2750
2751
2752         /*
2753          * TODO:Temporary add mutex to protect hpd interrupt not have a gpio
2754          * conflict, after implement i2c helper, this mutex should be
2755          * retired.
2756          */
2757         mutex_lock(&aconnector->hpd_lock);
2758
2759         read_hpd_rx_irq_data(dc_link, &hpd_irq_data);
2760
2761         if ((dc_link->cur_link_settings.lane_count != LANE_COUNT_UNKNOWN) ||
2762                 (dc_link->type == dc_connection_mst_branch)) {
2763                 if (hpd_irq_data.bytes.device_service_irq.bits.UP_REQ_MSG_RDY) {
2764                         result = true;
2765                         dm_handle_hpd_rx_irq(aconnector);
2766                         goto out;
2767                 } else if (hpd_irq_data.bytes.device_service_irq.bits.DOWN_REP_MSG_RDY) {
2768                         result = false;
2769                         dm_handle_hpd_rx_irq(aconnector);
2770                         goto out;
2771                 }
2772         }
2773
2774         /*
2775          * TODO: We need the lock to avoid touching DC state while it's being
2776          * modified during automated compliance testing, or when link loss
2777          * happens. While this should be split into subhandlers and proper
2778          * interfaces to avoid having to conditionally lock like this in the
2779          * outer layer, we need this workaround temporarily to allow MST
2780          * lightup in some scenarios to avoid timeout.
2781          */
2782         if (!amdgpu_in_reset(adev) &&
2783             (hpd_rx_irq_check_link_loss_status(dc_link, &hpd_irq_data) ||
2784              hpd_irq_data.bytes.device_service_irq.bits.AUTOMATED_TEST)) {
2785                 mutex_lock(&adev->dm.dc_lock);
2786                 lock_flag = 1;
2787         }
2788
2789 #ifdef CONFIG_DRM_AMD_DC_HDCP
2790         result = dc_link_handle_hpd_rx_irq(dc_link, &hpd_irq_data, NULL);
2791 #else
2792         result = dc_link_handle_hpd_rx_irq(dc_link, NULL, NULL);
2793 #endif
2794         if (!amdgpu_in_reset(adev) && lock_flag)
2795                 mutex_unlock(&adev->dm.dc_lock);
2796
2797 out:
2798         if (result && !is_mst_root_connector) {
2799                 /* Downstream Port status changed. */
2800                 if (!dc_link_detect_sink(dc_link, &new_connection_type))
2801                         DRM_ERROR("KMS: Failed to detect connector\n");
2802
2803                 if (aconnector->base.force && new_connection_type == dc_connection_none) {
2804                         emulated_link_detect(dc_link);
2805
2806                         if (aconnector->fake_enable)
2807                                 aconnector->fake_enable = false;
2808
2809                         amdgpu_dm_update_connector_after_detect(aconnector);
2810
2811
2812                         drm_modeset_lock_all(dev);
2813                         dm_restore_drm_connector_state(dev, connector);
2814                         drm_modeset_unlock_all(dev);
2815
2816                         drm_kms_helper_hotplug_event(dev);
2817                 } else if (dc_link_detect(dc_link, DETECT_REASON_HPDRX)) {
2818
2819                         if (aconnector->fake_enable)
2820                                 aconnector->fake_enable = false;
2821
2822                         amdgpu_dm_update_connector_after_detect(aconnector);
2823
2824
2825                         drm_modeset_lock_all(dev);
2826                         dm_restore_drm_connector_state(dev, connector);
2827                         drm_modeset_unlock_all(dev);
2828
2829                         drm_kms_helper_hotplug_event(dev);
2830                 }
2831         }
2832 #ifdef CONFIG_DRM_AMD_DC_HDCP
2833         if (hpd_irq_data.bytes.device_service_irq.bits.CP_IRQ) {
2834                 if (adev->dm.hdcp_workqueue)
2835                         hdcp_handle_cpirq(adev->dm.hdcp_workqueue,  aconnector->base.index);
2836         }
2837 #endif
2838
2839         if (dc_link->type != dc_connection_mst_branch)
2840                 drm_dp_cec_irq(&aconnector->dm_dp_aux.aux);
2841
2842         mutex_unlock(&aconnector->hpd_lock);
2843 }
2844
2845 static void register_hpd_handlers(struct amdgpu_device *adev)
2846 {
2847         struct drm_device *dev = adev_to_drm(adev);
2848         struct drm_connector *connector;
2849         struct amdgpu_dm_connector *aconnector;
2850         const struct dc_link *dc_link;
2851         struct dc_interrupt_params int_params = {0};
2852
2853         int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
2854         int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
2855
2856         list_for_each_entry(connector,
2857                         &dev->mode_config.connector_list, head) {
2858
2859                 aconnector = to_amdgpu_dm_connector(connector);
2860                 dc_link = aconnector->dc_link;
2861
2862                 if (DC_IRQ_SOURCE_INVALID != dc_link->irq_source_hpd) {
2863                         int_params.int_context = INTERRUPT_LOW_IRQ_CONTEXT;
2864                         int_params.irq_source = dc_link->irq_source_hpd;
2865
2866                         amdgpu_dm_irq_register_interrupt(adev, &int_params,
2867                                         handle_hpd_irq,
2868                                         (void *) aconnector);
2869                 }
2870
2871                 if (DC_IRQ_SOURCE_INVALID != dc_link->irq_source_hpd_rx) {
2872
2873                         /* Also register for DP short pulse (hpd_rx). */
2874                         int_params.int_context = INTERRUPT_LOW_IRQ_CONTEXT;
2875                         int_params.irq_source = dc_link->irq_source_hpd_rx;
2876
2877                         amdgpu_dm_irq_register_interrupt(adev, &int_params,
2878                                         handle_hpd_rx_irq,
2879                                         (void *) aconnector);
2880                 }
2881         }
2882 }
2883
2884 #if defined(CONFIG_DRM_AMD_DC_SI)
2885 /* Register IRQ sources and initialize IRQ callbacks */
2886 static int dce60_register_irq_handlers(struct amdgpu_device *adev)
2887 {
2888         struct dc *dc = adev->dm.dc;
2889         struct common_irq_params *c_irq_params;
2890         struct dc_interrupt_params int_params = {0};
2891         int r;
2892         int i;
2893         unsigned client_id = AMDGPU_IRQ_CLIENTID_LEGACY;
2894
2895         int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
2896         int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
2897
2898         /*
2899          * Actions of amdgpu_irq_add_id():
2900          * 1. Register a set() function with base driver.
2901          *    Base driver will call set() function to enable/disable an
2902          *    interrupt in DC hardware.
2903          * 2. Register amdgpu_dm_irq_handler().
2904          *    Base driver will call amdgpu_dm_irq_handler() for ALL interrupts
2905          *    coming from DC hardware.
2906          *    amdgpu_dm_irq_handler() will re-direct the interrupt to DC
2907          *    for acknowledging and handling. */
2908
2909         /* Use VBLANK interrupt */
2910         for (i = 0; i < adev->mode_info.num_crtc; i++) {
2911                 r = amdgpu_irq_add_id(adev, client_id, i+1 , &adev->crtc_irq);
2912                 if (r) {
2913                         DRM_ERROR("Failed to add crtc irq id!\n");
2914                         return r;
2915                 }
2916
2917                 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
2918                 int_params.irq_source =
2919                         dc_interrupt_to_irq_source(dc, i+1 , 0);
2920
2921                 c_irq_params = &adev->dm.vblank_params[int_params.irq_source - DC_IRQ_SOURCE_VBLANK1];
2922
2923                 c_irq_params->adev = adev;
2924                 c_irq_params->irq_src = int_params.irq_source;
2925
2926                 amdgpu_dm_irq_register_interrupt(adev, &int_params,
2927                                 dm_crtc_high_irq, c_irq_params);
2928         }
2929
2930         /* Use GRPH_PFLIP interrupt */
2931         for (i = VISLANDS30_IV_SRCID_D1_GRPH_PFLIP;
2932                         i <= VISLANDS30_IV_SRCID_D6_GRPH_PFLIP; i += 2) {
2933                 r = amdgpu_irq_add_id(adev, client_id, i, &adev->pageflip_irq);
2934                 if (r) {
2935                         DRM_ERROR("Failed to add page flip irq id!\n");
2936                         return r;
2937                 }
2938
2939                 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
2940                 int_params.irq_source =
2941                         dc_interrupt_to_irq_source(dc, i, 0);
2942
2943                 c_irq_params = &adev->dm.pflip_params[int_params.irq_source - DC_IRQ_SOURCE_PFLIP_FIRST];
2944
2945                 c_irq_params->adev = adev;
2946                 c_irq_params->irq_src = int_params.irq_source;
2947
2948                 amdgpu_dm_irq_register_interrupt(adev, &int_params,
2949                                 dm_pflip_high_irq, c_irq_params);
2950
2951         }
2952
2953         /* HPD */
2954         r = amdgpu_irq_add_id(adev, client_id,
2955                         VISLANDS30_IV_SRCID_HOTPLUG_DETECT_A, &adev->hpd_irq);
2956         if (r) {
2957                 DRM_ERROR("Failed to add hpd irq id!\n");
2958                 return r;
2959         }
2960
2961         register_hpd_handlers(adev);
2962
2963         return 0;
2964 }
2965 #endif
2966
2967 /* Register IRQ sources and initialize IRQ callbacks */
2968 static int dce110_register_irq_handlers(struct amdgpu_device *adev)
2969 {
2970         struct dc *dc = adev->dm.dc;
2971         struct common_irq_params *c_irq_params;
2972         struct dc_interrupt_params int_params = {0};
2973         int r;
2974         int i;
2975         unsigned client_id = AMDGPU_IRQ_CLIENTID_LEGACY;
2976
2977         if (adev->asic_type >= CHIP_VEGA10)
2978                 client_id = SOC15_IH_CLIENTID_DCE;
2979
2980         int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
2981         int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
2982
2983         /*
2984          * Actions of amdgpu_irq_add_id():
2985          * 1. Register a set() function with base driver.
2986          *    Base driver will call set() function to enable/disable an
2987          *    interrupt in DC hardware.
2988          * 2. Register amdgpu_dm_irq_handler().
2989          *    Base driver will call amdgpu_dm_irq_handler() for ALL interrupts
2990          *    coming from DC hardware.
2991          *    amdgpu_dm_irq_handler() will re-direct the interrupt to DC
2992          *    for acknowledging and handling. */
2993
2994         /* Use VBLANK interrupt */
2995         for (i = VISLANDS30_IV_SRCID_D1_VERTICAL_INTERRUPT0; i <= VISLANDS30_IV_SRCID_D6_VERTICAL_INTERRUPT0; i++) {
2996                 r = amdgpu_irq_add_id(adev, client_id, i, &adev->crtc_irq);
2997                 if (r) {
2998                         DRM_ERROR("Failed to add crtc irq id!\n");
2999                         return r;
3000                 }
3001
3002                 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
3003                 int_params.irq_source =
3004                         dc_interrupt_to_irq_source(dc, i, 0);
3005
3006                 c_irq_params = &adev->dm.vblank_params[int_params.irq_source - DC_IRQ_SOURCE_VBLANK1];
3007
3008                 c_irq_params->adev = adev;
3009                 c_irq_params->irq_src = int_params.irq_source;
3010
3011                 amdgpu_dm_irq_register_interrupt(adev, &int_params,
3012                                 dm_crtc_high_irq, c_irq_params);
3013         }
3014
3015         /* Use VUPDATE interrupt */
3016         for (i = VISLANDS30_IV_SRCID_D1_V_UPDATE_INT; i <= VISLANDS30_IV_SRCID_D6_V_UPDATE_INT; i += 2) {
3017                 r = amdgpu_irq_add_id(adev, client_id, i, &adev->vupdate_irq);
3018                 if (r) {
3019                         DRM_ERROR("Failed to add vupdate irq id!\n");
3020                         return r;
3021                 }
3022
3023                 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
3024                 int_params.irq_source =
3025                         dc_interrupt_to_irq_source(dc, i, 0);
3026
3027                 c_irq_params = &adev->dm.vupdate_params[int_params.irq_source - DC_IRQ_SOURCE_VUPDATE1];
3028
3029                 c_irq_params->adev = adev;
3030                 c_irq_params->irq_src = int_params.irq_source;
3031
3032                 amdgpu_dm_irq_register_interrupt(adev, &int_params,
3033                                 dm_vupdate_high_irq, c_irq_params);
3034         }
3035
3036         /* Use GRPH_PFLIP interrupt */
3037         for (i = VISLANDS30_IV_SRCID_D1_GRPH_PFLIP;
3038                         i <= VISLANDS30_IV_SRCID_D6_GRPH_PFLIP; i += 2) {
3039                 r = amdgpu_irq_add_id(adev, client_id, i, &adev->pageflip_irq);
3040                 if (r) {
3041                         DRM_ERROR("Failed to add page flip irq id!\n");
3042                         return r;
3043                 }
3044
3045                 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
3046                 int_params.irq_source =
3047                         dc_interrupt_to_irq_source(dc, i, 0);
3048
3049                 c_irq_params = &adev->dm.pflip_params[int_params.irq_source - DC_IRQ_SOURCE_PFLIP_FIRST];
3050
3051                 c_irq_params->adev = adev;
3052                 c_irq_params->irq_src = int_params.irq_source;
3053
3054                 amdgpu_dm_irq_register_interrupt(adev, &int_params,
3055                                 dm_pflip_high_irq, c_irq_params);
3056
3057         }
3058
3059         /* HPD */
3060         r = amdgpu_irq_add_id(adev, client_id,
3061                         VISLANDS30_IV_SRCID_HOTPLUG_DETECT_A, &adev->hpd_irq);
3062         if (r) {
3063                 DRM_ERROR("Failed to add hpd irq id!\n");
3064                 return r;
3065         }
3066
3067         register_hpd_handlers(adev);
3068
3069         return 0;
3070 }
3071
3072 #if defined(CONFIG_DRM_AMD_DC_DCN)
3073 /* Register IRQ sources and initialize IRQ callbacks */
3074 static int dcn10_register_irq_handlers(struct amdgpu_device *adev)
3075 {
3076         struct dc *dc = adev->dm.dc;
3077         struct common_irq_params *c_irq_params;
3078         struct dc_interrupt_params int_params = {0};
3079         int r;
3080         int i;
3081 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
3082         static const unsigned int vrtl_int_srcid[] = {
3083                 DCN_1_0__SRCID__OTG1_VERTICAL_INTERRUPT0_CONTROL,
3084                 DCN_1_0__SRCID__OTG2_VERTICAL_INTERRUPT0_CONTROL,
3085                 DCN_1_0__SRCID__OTG3_VERTICAL_INTERRUPT0_CONTROL,
3086                 DCN_1_0__SRCID__OTG4_VERTICAL_INTERRUPT0_CONTROL,
3087                 DCN_1_0__SRCID__OTG5_VERTICAL_INTERRUPT0_CONTROL,
3088                 DCN_1_0__SRCID__OTG6_VERTICAL_INTERRUPT0_CONTROL
3089         };
3090 #endif
3091
3092         int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
3093         int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
3094
3095         /*
3096          * Actions of amdgpu_irq_add_id():
3097          * 1. Register a set() function with base driver.
3098          *    Base driver will call set() function to enable/disable an
3099          *    interrupt in DC hardware.
3100          * 2. Register amdgpu_dm_irq_handler().
3101          *    Base driver will call amdgpu_dm_irq_handler() for ALL interrupts
3102          *    coming from DC hardware.
3103          *    amdgpu_dm_irq_handler() will re-direct the interrupt to DC
3104          *    for acknowledging and handling.
3105          */
3106
3107         /* Use VSTARTUP interrupt */
3108         for (i = DCN_1_0__SRCID__DC_D1_OTG_VSTARTUP;
3109                         i <= DCN_1_0__SRCID__DC_D1_OTG_VSTARTUP + adev->mode_info.num_crtc - 1;
3110                         i++) {
3111                 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, i, &adev->crtc_irq);
3112
3113                 if (r) {
3114                         DRM_ERROR("Failed to add crtc irq id!\n");
3115                         return r;
3116                 }
3117
3118                 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
3119                 int_params.irq_source =
3120                         dc_interrupt_to_irq_source(dc, i, 0);
3121
3122                 c_irq_params = &adev->dm.vblank_params[int_params.irq_source - DC_IRQ_SOURCE_VBLANK1];
3123
3124                 c_irq_params->adev = adev;
3125                 c_irq_params->irq_src = int_params.irq_source;
3126
3127                 amdgpu_dm_irq_register_interrupt(
3128                         adev, &int_params, dm_crtc_high_irq, c_irq_params);
3129         }
3130
3131         /* Use otg vertical line interrupt */
3132 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
3133         for (i = 0; i <= adev->mode_info.num_crtc - 1; i++) {
3134                 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE,
3135                                 vrtl_int_srcid[i], &adev->vline0_irq);
3136
3137                 if (r) {
3138                         DRM_ERROR("Failed to add vline0 irq id!\n");
3139                         return r;
3140                 }
3141
3142                 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
3143                 int_params.irq_source =
3144                         dc_interrupt_to_irq_source(dc, vrtl_int_srcid[i], 0);
3145
3146                 if (int_params.irq_source == DC_IRQ_SOURCE_INVALID) {
3147                         DRM_ERROR("Failed to register vline0 irq %d!\n", vrtl_int_srcid[i]);
3148                         break;
3149                 }
3150
3151                 c_irq_params = &adev->dm.vline0_params[int_params.irq_source
3152                                         - DC_IRQ_SOURCE_DC1_VLINE0];
3153
3154                 c_irq_params->adev = adev;
3155                 c_irq_params->irq_src = int_params.irq_source;
3156
3157                 amdgpu_dm_irq_register_interrupt(adev, &int_params,
3158                                 dm_dcn_vertical_interrupt0_high_irq, c_irq_params);
3159         }
3160 #endif
3161
3162         /* Use VUPDATE_NO_LOCK interrupt on DCN, which seems to correspond to
3163          * the regular VUPDATE interrupt on DCE. We want DC_IRQ_SOURCE_VUPDATEx
3164          * to trigger at end of each vblank, regardless of state of the lock,
3165          * matching DCE behaviour.
3166          */
3167         for (i = DCN_1_0__SRCID__OTG0_IHC_V_UPDATE_NO_LOCK_INTERRUPT;
3168              i <= DCN_1_0__SRCID__OTG0_IHC_V_UPDATE_NO_LOCK_INTERRUPT + adev->mode_info.num_crtc - 1;
3169              i++) {
3170                 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, i, &adev->vupdate_irq);
3171
3172                 if (r) {
3173                         DRM_ERROR("Failed to add vupdate irq id!\n");
3174                         return r;
3175                 }
3176
3177                 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
3178                 int_params.irq_source =
3179                         dc_interrupt_to_irq_source(dc, i, 0);
3180
3181                 c_irq_params = &adev->dm.vupdate_params[int_params.irq_source - DC_IRQ_SOURCE_VUPDATE1];
3182
3183                 c_irq_params->adev = adev;
3184                 c_irq_params->irq_src = int_params.irq_source;
3185
3186                 amdgpu_dm_irq_register_interrupt(adev, &int_params,
3187                                 dm_vupdate_high_irq, c_irq_params);
3188         }
3189
3190         /* Use GRPH_PFLIP interrupt */
3191         for (i = DCN_1_0__SRCID__HUBP0_FLIP_INTERRUPT;
3192                         i <= DCN_1_0__SRCID__HUBP0_FLIP_INTERRUPT + adev->mode_info.num_crtc - 1;
3193                         i++) {
3194                 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, i, &adev->pageflip_irq);
3195                 if (r) {
3196                         DRM_ERROR("Failed to add page flip irq id!\n");
3197                         return r;
3198                 }
3199
3200                 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
3201                 int_params.irq_source =
3202                         dc_interrupt_to_irq_source(dc, i, 0);
3203
3204                 c_irq_params = &adev->dm.pflip_params[int_params.irq_source - DC_IRQ_SOURCE_PFLIP_FIRST];
3205
3206                 c_irq_params->adev = adev;
3207                 c_irq_params->irq_src = int_params.irq_source;
3208
3209                 amdgpu_dm_irq_register_interrupt(adev, &int_params,
3210                                 dm_pflip_high_irq, c_irq_params);
3211
3212         }
3213
3214         /* HPD */
3215         r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, DCN_1_0__SRCID__DC_HPD1_INT,
3216                         &adev->hpd_irq);
3217         if (r) {
3218                 DRM_ERROR("Failed to add hpd irq id!\n");
3219                 return r;
3220         }
3221
3222         register_hpd_handlers(adev);
3223
3224         return 0;
3225 }
3226 /* Register Outbox IRQ sources and initialize IRQ callbacks */
3227 static int register_outbox_irq_handlers(struct amdgpu_device *adev)
3228 {
3229         struct dc *dc = adev->dm.dc;
3230         struct common_irq_params *c_irq_params;
3231         struct dc_interrupt_params int_params = {0};
3232         int r, i;
3233
3234         int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
3235         int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
3236
3237         r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, DCN_1_0__SRCID__DMCUB_OUTBOX_LOW_PRIORITY_READY_INT,
3238                         &adev->dmub_outbox_irq);
3239         if (r) {
3240                 DRM_ERROR("Failed to add outbox irq id!\n");
3241                 return r;
3242         }
3243
3244         if (dc->ctx->dmub_srv) {
3245                 i = DCN_1_0__SRCID__DMCUB_OUTBOX_LOW_PRIORITY_READY_INT;
3246                 int_params.int_context = INTERRUPT_LOW_IRQ_CONTEXT;
3247                 int_params.irq_source =
3248                 dc_interrupt_to_irq_source(dc, i, 0);
3249
3250                 c_irq_params = &adev->dm.dmub_outbox_params[0];
3251
3252                 c_irq_params->adev = adev;
3253                 c_irq_params->irq_src = int_params.irq_source;
3254
3255                 amdgpu_dm_irq_register_interrupt(adev, &int_params,
3256                                 dm_dmub_outbox1_low_irq, c_irq_params);
3257         }
3258
3259         return 0;
3260 }
3261 #endif
3262
3263 /*
3264  * Acquires the lock for the atomic state object and returns
3265  * the new atomic state.
3266  *
3267  * This should only be called during atomic check.
3268  */
3269 static int dm_atomic_get_state(struct drm_atomic_state *state,
3270                                struct dm_atomic_state **dm_state)
3271 {
3272         struct drm_device *dev = state->dev;
3273         struct amdgpu_device *adev = drm_to_adev(dev);
3274         struct amdgpu_display_manager *dm = &adev->dm;
3275         struct drm_private_state *priv_state;
3276
3277         if (*dm_state)
3278                 return 0;
3279
3280         priv_state = drm_atomic_get_private_obj_state(state, &dm->atomic_obj);
3281         if (IS_ERR(priv_state))
3282                 return PTR_ERR(priv_state);
3283
3284         *dm_state = to_dm_atomic_state(priv_state);
3285
3286         return 0;
3287 }
3288
3289 static struct dm_atomic_state *
3290 dm_atomic_get_new_state(struct drm_atomic_state *state)
3291 {
3292         struct drm_device *dev = state->dev;
3293         struct amdgpu_device *adev = drm_to_adev(dev);
3294         struct amdgpu_display_manager *dm = &adev->dm;
3295         struct drm_private_obj *obj;
3296         struct drm_private_state *new_obj_state;
3297         int i;
3298
3299         for_each_new_private_obj_in_state(state, obj, new_obj_state, i) {
3300                 if (obj->funcs == dm->atomic_obj.funcs)
3301                         return to_dm_atomic_state(new_obj_state);
3302         }
3303
3304         return NULL;
3305 }
3306
3307 static struct drm_private_state *
3308 dm_atomic_duplicate_state(struct drm_private_obj *obj)
3309 {
3310         struct dm_atomic_state *old_state, *new_state;
3311
3312         new_state = kzalloc(sizeof(*new_state), GFP_KERNEL);
3313         if (!new_state)
3314                 return NULL;
3315
3316         __drm_atomic_helper_private_obj_duplicate_state(obj, &new_state->base);
3317
3318         old_state = to_dm_atomic_state(obj->state);
3319
3320         if (old_state && old_state->context)
3321                 new_state->context = dc_copy_state(old_state->context);
3322
3323         if (!new_state->context) {
3324                 kfree(new_state);
3325                 return NULL;
3326         }
3327
3328         return &new_state->base;
3329 }
3330
3331 static void dm_atomic_destroy_state(struct drm_private_obj *obj,
3332                                     struct drm_private_state *state)
3333 {
3334         struct dm_atomic_state *dm_state = to_dm_atomic_state(state);
3335
3336         if (dm_state && dm_state->context)
3337                 dc_release_state(dm_state->context);
3338
3339         kfree(dm_state);
3340 }
3341
3342 static struct drm_private_state_funcs dm_atomic_state_funcs = {
3343         .atomic_duplicate_state = dm_atomic_duplicate_state,
3344         .atomic_destroy_state = dm_atomic_destroy_state,
3345 };
3346
3347 static int amdgpu_dm_mode_config_init(struct amdgpu_device *adev)
3348 {
3349         struct dm_atomic_state *state;
3350         int r;
3351
3352         adev->mode_info.mode_config_initialized = true;
3353
3354         adev_to_drm(adev)->mode_config.funcs = (void *)&amdgpu_dm_mode_funcs;
3355         adev_to_drm(adev)->mode_config.helper_private = &amdgpu_dm_mode_config_helperfuncs;
3356
3357         adev_to_drm(adev)->mode_config.max_width = 16384;
3358         adev_to_drm(adev)->mode_config.max_height = 16384;
3359
3360         adev_to_drm(adev)->mode_config.preferred_depth = 24;
3361         adev_to_drm(adev)->mode_config.prefer_shadow = 1;
3362         /* indicates support for immediate flip */
3363         adev_to_drm(adev)->mode_config.async_page_flip = true;
3364
3365         adev_to_drm(adev)->mode_config.fb_base = adev->gmc.aper_base;
3366
3367         state = kzalloc(sizeof(*state), GFP_KERNEL);
3368         if (!state)
3369                 return -ENOMEM;
3370
3371         state->context = dc_create_state(adev->dm.dc);
3372         if (!state->context) {
3373                 kfree(state);
3374                 return -ENOMEM;
3375         }
3376
3377         dc_resource_state_copy_construct_current(adev->dm.dc, state->context);
3378
3379         drm_atomic_private_obj_init(adev_to_drm(adev),
3380                                     &adev->dm.atomic_obj,
3381                                     &state->base,
3382                                     &dm_atomic_state_funcs);
3383
3384         r = amdgpu_display_modeset_create_props(adev);
3385         if (r) {
3386                 dc_release_state(state->context);
3387                 kfree(state);
3388                 return r;
3389         }
3390
3391         r = amdgpu_dm_audio_init(adev);
3392         if (r) {
3393                 dc_release_state(state->context);
3394                 kfree(state);
3395                 return r;
3396         }
3397
3398         return 0;
3399 }
3400
3401 #define AMDGPU_DM_DEFAULT_MIN_BACKLIGHT 12
3402 #define AMDGPU_DM_DEFAULT_MAX_BACKLIGHT 255
3403 #define AUX_BL_DEFAULT_TRANSITION_TIME_MS 50
3404
3405 #if defined(CONFIG_BACKLIGHT_CLASS_DEVICE) ||\
3406         defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE)
3407
3408 static void amdgpu_dm_update_backlight_caps(struct amdgpu_display_manager *dm)
3409 {
3410 #if defined(CONFIG_ACPI)
3411         struct amdgpu_dm_backlight_caps caps;
3412
3413         memset(&caps, 0, sizeof(caps));
3414
3415         if (dm->backlight_caps.caps_valid)
3416                 return;
3417
3418         amdgpu_acpi_get_backlight_caps(&caps);
3419         if (caps.caps_valid) {
3420                 dm->backlight_caps.caps_valid = true;
3421                 if (caps.aux_support)
3422                         return;
3423                 dm->backlight_caps.min_input_signal = caps.min_input_signal;
3424                 dm->backlight_caps.max_input_signal = caps.max_input_signal;
3425         } else {
3426                 dm->backlight_caps.min_input_signal =
3427                                 AMDGPU_DM_DEFAULT_MIN_BACKLIGHT;
3428                 dm->backlight_caps.max_input_signal =
3429                                 AMDGPU_DM_DEFAULT_MAX_BACKLIGHT;
3430         }
3431 #else
3432         if (dm->backlight_caps.aux_support)
3433                 return;
3434
3435         dm->backlight_caps.min_input_signal = AMDGPU_DM_DEFAULT_MIN_BACKLIGHT;
3436         dm->backlight_caps.max_input_signal = AMDGPU_DM_DEFAULT_MAX_BACKLIGHT;
3437 #endif
3438 }
3439
3440 static int get_brightness_range(const struct amdgpu_dm_backlight_caps *caps,
3441                                 unsigned *min, unsigned *max)
3442 {
3443         if (!caps)
3444                 return 0;
3445
3446         if (caps->aux_support) {
3447                 // Firmware limits are in nits, DC API wants millinits.
3448                 *max = 1000 * caps->aux_max_input_signal;
3449                 *min = 1000 * caps->aux_min_input_signal;
3450         } else {
3451                 // Firmware limits are 8-bit, PWM control is 16-bit.
3452                 *max = 0x101 * caps->max_input_signal;
3453                 *min = 0x101 * caps->min_input_signal;
3454         }
3455         return 1;
3456 }
3457
3458 static u32 convert_brightness_from_user(const struct amdgpu_dm_backlight_caps *caps,
3459                                         uint32_t brightness)
3460 {
3461         unsigned min, max;
3462
3463         if (!get_brightness_range(caps, &min, &max))
3464                 return brightness;
3465
3466         // Rescale 0..255 to min..max
3467         return min + DIV_ROUND_CLOSEST((max - min) * brightness,
3468                                        AMDGPU_MAX_BL_LEVEL);
3469 }
3470
3471 static u32 convert_brightness_to_user(const struct amdgpu_dm_backlight_caps *caps,
3472                                       uint32_t brightness)
3473 {
3474         unsigned min, max;
3475
3476         if (!get_brightness_range(caps, &min, &max))
3477                 return brightness;
3478
3479         if (brightness < min)
3480                 return 0;
3481         // Rescale min..max to 0..255
3482         return DIV_ROUND_CLOSEST(AMDGPU_MAX_BL_LEVEL * (brightness - min),
3483                                  max - min);
3484 }
3485
3486 static int amdgpu_dm_backlight_set_level(struct amdgpu_display_manager *dm,
3487                                          u32 user_brightness)
3488 {
3489         struct amdgpu_dm_backlight_caps caps;
3490         struct dc_link *link[AMDGPU_DM_MAX_NUM_EDP];
3491         u32 brightness[AMDGPU_DM_MAX_NUM_EDP];
3492         bool rc;
3493         int i;
3494
3495         amdgpu_dm_update_backlight_caps(dm);
3496         caps = dm->backlight_caps;
3497
3498         for (i = 0; i < dm->num_of_edps; i++) {
3499                 dm->brightness[i] = user_brightness;
3500                 brightness[i] = convert_brightness_from_user(&caps, dm->brightness[i]);
3501                 link[i] = (struct dc_link *)dm->backlight_link[i];
3502         }
3503
3504         /* Change brightness based on AUX property */
3505         if (caps.aux_support) {
3506                 for (i = 0; i < dm->num_of_edps; i++) {
3507                         rc = dc_link_set_backlight_level_nits(link[i], true, brightness[i],
3508                                 AUX_BL_DEFAULT_TRANSITION_TIME_MS);
3509                         if (!rc) {
3510                                 DRM_DEBUG("DM: Failed to update backlight via AUX on eDP[%d]\n", i);
3511                                 break;
3512                         }
3513                 }
3514         } else {
3515                 for (i = 0; i < dm->num_of_edps; i++) {
3516                         rc = dc_link_set_backlight_level(dm->backlight_link[i], brightness[i], 0);
3517                         if (!rc) {
3518                                 DRM_DEBUG("DM: Failed to update backlight on eDP[%d]\n", i);
3519                                 break;
3520                         }
3521                 }
3522         }
3523
3524         return rc ? 0 : 1;
3525 }
3526
3527 static int amdgpu_dm_backlight_update_status(struct backlight_device *bd)
3528 {
3529         struct amdgpu_display_manager *dm = bl_get_data(bd);
3530
3531         amdgpu_dm_backlight_set_level(dm, bd->props.brightness);
3532
3533         return 0;
3534 }
3535
3536 static u32 amdgpu_dm_backlight_get_level(struct amdgpu_display_manager *dm)
3537 {
3538         struct amdgpu_dm_backlight_caps caps;
3539
3540         amdgpu_dm_update_backlight_caps(dm);
3541         caps = dm->backlight_caps;
3542
3543         if (caps.aux_support) {
3544                 struct dc_link *link = (struct dc_link *)dm->backlight_link[0];
3545                 u32 avg, peak;
3546                 bool rc;
3547
3548                 rc = dc_link_get_backlight_level_nits(link, &avg, &peak);
3549                 if (!rc)
3550                         return dm->brightness[0];
3551                 return convert_brightness_to_user(&caps, avg);
3552         } else {
3553                 int ret = dc_link_get_backlight_level(dm->backlight_link[0]);
3554
3555                 if (ret == DC_ERROR_UNEXPECTED)
3556                         return dm->brightness[0];
3557                 return convert_brightness_to_user(&caps, ret);
3558         }
3559 }
3560
3561 static int amdgpu_dm_backlight_get_brightness(struct backlight_device *bd)
3562 {
3563         struct amdgpu_display_manager *dm = bl_get_data(bd);
3564
3565         return amdgpu_dm_backlight_get_level(dm);
3566 }
3567
3568 static const struct backlight_ops amdgpu_dm_backlight_ops = {
3569         .options = BL_CORE_SUSPENDRESUME,
3570         .get_brightness = amdgpu_dm_backlight_get_brightness,
3571         .update_status  = amdgpu_dm_backlight_update_status,
3572 };
3573
3574 static void
3575 amdgpu_dm_register_backlight_device(struct amdgpu_display_manager *dm)
3576 {
3577         char bl_name[16];
3578         struct backlight_properties props = { 0 };
3579         int i;
3580
3581         amdgpu_dm_update_backlight_caps(dm);
3582         for (i = 0; i < dm->num_of_edps; i++)
3583                 dm->brightness[i] = AMDGPU_MAX_BL_LEVEL;
3584
3585         props.max_brightness = AMDGPU_MAX_BL_LEVEL;
3586         props.brightness = AMDGPU_MAX_BL_LEVEL;
3587         props.type = BACKLIGHT_RAW;
3588
3589         snprintf(bl_name, sizeof(bl_name), "amdgpu_bl%d",
3590                  adev_to_drm(dm->adev)->primary->index);
3591
3592         dm->backlight_dev = backlight_device_register(bl_name,
3593                                                       adev_to_drm(dm->adev)->dev,
3594                                                       dm,
3595                                                       &amdgpu_dm_backlight_ops,
3596                                                       &props);
3597
3598         if (IS_ERR(dm->backlight_dev))
3599                 DRM_ERROR("DM: Backlight registration failed!\n");
3600         else
3601                 DRM_DEBUG_DRIVER("DM: Registered Backlight device: %s\n", bl_name);
3602 }
3603
3604 #endif
3605
3606 static int initialize_plane(struct amdgpu_display_manager *dm,
3607                             struct amdgpu_mode_info *mode_info, int plane_id,
3608                             enum drm_plane_type plane_type,
3609                             const struct dc_plane_cap *plane_cap)
3610 {
3611         struct drm_plane *plane;
3612         unsigned long possible_crtcs;
3613         int ret = 0;
3614
3615         plane = kzalloc(sizeof(struct drm_plane), GFP_KERNEL);
3616         if (!plane) {
3617                 DRM_ERROR("KMS: Failed to allocate plane\n");
3618                 return -ENOMEM;
3619         }
3620         plane->type = plane_type;
3621
3622         /*
3623          * HACK: IGT tests expect that the primary plane for a CRTC
3624          * can only have one possible CRTC. Only expose support for
3625          * any CRTC if they're not going to be used as a primary plane
3626          * for a CRTC - like overlay or underlay planes.
3627          */
3628         possible_crtcs = 1 << plane_id;
3629         if (plane_id >= dm->dc->caps.max_streams)
3630                 possible_crtcs = 0xff;
3631
3632         ret = amdgpu_dm_plane_init(dm, plane, possible_crtcs, plane_cap);
3633
3634         if (ret) {
3635                 DRM_ERROR("KMS: Failed to initialize plane\n");
3636                 kfree(plane);
3637                 return ret;
3638         }
3639
3640         if (mode_info)
3641                 mode_info->planes[plane_id] = plane;
3642
3643         return ret;
3644 }
3645
3646
3647 static void register_backlight_device(struct amdgpu_display_manager *dm,
3648                                       struct dc_link *link)
3649 {
3650 #if defined(CONFIG_BACKLIGHT_CLASS_DEVICE) ||\
3651         defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE)
3652
3653         if ((link->connector_signal & (SIGNAL_TYPE_EDP | SIGNAL_TYPE_LVDS)) &&
3654             link->type != dc_connection_none) {
3655                 /*
3656                  * Event if registration failed, we should continue with
3657                  * DM initialization because not having a backlight control
3658                  * is better then a black screen.
3659                  */
3660                 if (!dm->backlight_dev)
3661                         amdgpu_dm_register_backlight_device(dm);
3662
3663                 if (dm->backlight_dev) {
3664                         dm->backlight_link[dm->num_of_edps] = link;
3665                         dm->num_of_edps++;
3666                 }
3667         }
3668 #endif
3669 }
3670
3671
3672 /*
3673  * In this architecture, the association
3674  * connector -> encoder -> crtc
3675  * id not really requried. The crtc and connector will hold the
3676  * display_index as an abstraction to use with DAL component
3677  *
3678  * Returns 0 on success
3679  */
3680 static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev)
3681 {
3682         struct amdgpu_display_manager *dm = &adev->dm;
3683         int32_t i;
3684         struct amdgpu_dm_connector *aconnector = NULL;
3685         struct amdgpu_encoder *aencoder = NULL;
3686         struct amdgpu_mode_info *mode_info = &adev->mode_info;
3687         uint32_t link_cnt;
3688         int32_t primary_planes;
3689         enum dc_connection_type new_connection_type = dc_connection_none;
3690         const struct dc_plane_cap *plane;
3691
3692         dm->display_indexes_num = dm->dc->caps.max_streams;
3693         /* Update the actual used number of crtc */
3694         adev->mode_info.num_crtc = adev->dm.display_indexes_num;
3695
3696         link_cnt = dm->dc->caps.max_links;
3697         if (amdgpu_dm_mode_config_init(dm->adev)) {
3698                 DRM_ERROR("DM: Failed to initialize mode config\n");
3699                 return -EINVAL;
3700         }
3701
3702         /* There is one primary plane per CRTC */
3703         primary_planes = dm->dc->caps.max_streams;
3704         ASSERT(primary_planes <= AMDGPU_MAX_PLANES);
3705
3706         /*
3707          * Initialize primary planes, implicit planes for legacy IOCTLS.
3708          * Order is reversed to match iteration order in atomic check.
3709          */
3710         for (i = (primary_planes - 1); i >= 0; i--) {
3711                 plane = &dm->dc->caps.planes[i];
3712
3713                 if (initialize_plane(dm, mode_info, i,
3714                                      DRM_PLANE_TYPE_PRIMARY, plane)) {
3715                         DRM_ERROR("KMS: Failed to initialize primary plane\n");
3716                         goto fail;
3717                 }
3718         }
3719
3720         /*
3721          * Initialize overlay planes, index starting after primary planes.
3722          * These planes have a higher DRM index than the primary planes since
3723          * they should be considered as having a higher z-order.
3724          * Order is reversed to match iteration order in atomic check.
3725          *
3726          * Only support DCN for now, and only expose one so we don't encourage
3727          * userspace to use up all the pipes.
3728          */
3729         for (i = 0; i < dm->dc->caps.max_planes; ++i) {
3730                 struct dc_plane_cap *plane = &dm->dc->caps.planes[i];
3731
3732                 if (plane->type != DC_PLANE_TYPE_DCN_UNIVERSAL)
3733                         continue;
3734
3735                 if (!plane->blends_with_above || !plane->blends_with_below)
3736                         continue;
3737
3738                 if (!plane->pixel_format_support.argb8888)
3739                         continue;
3740
3741                 if (initialize_plane(dm, NULL, primary_planes + i,
3742                                      DRM_PLANE_TYPE_OVERLAY, plane)) {
3743                         DRM_ERROR("KMS: Failed to initialize overlay plane\n");
3744                         goto fail;
3745                 }
3746
3747                 /* Only create one overlay plane. */
3748                 break;
3749         }
3750
3751         for (i = 0; i < dm->dc->caps.max_streams; i++)
3752                 if (amdgpu_dm_crtc_init(dm, mode_info->planes[i], i)) {
3753                         DRM_ERROR("KMS: Failed to initialize crtc\n");
3754                         goto fail;
3755                 }
3756
3757 #if defined(CONFIG_DRM_AMD_DC_DCN)
3758         /* Use Outbox interrupt */
3759         switch (adev->asic_type) {
3760         case CHIP_SIENNA_CICHLID:
3761         case CHIP_NAVY_FLOUNDER:
3762         case CHIP_RENOIR:
3763                 if (register_outbox_irq_handlers(dm->adev)) {
3764                         DRM_ERROR("DM: Failed to initialize IRQ\n");
3765                         goto fail;
3766                 }
3767                 break;
3768         default:
3769                 DRM_DEBUG_KMS("Unsupported ASIC type for outbox: 0x%X\n", adev->asic_type);
3770         }
3771 #endif
3772
3773         /* loops over all connectors on the board */
3774         for (i = 0; i < link_cnt; i++) {
3775                 struct dc_link *link = NULL;
3776
3777                 if (i > AMDGPU_DM_MAX_DISPLAY_INDEX) {
3778                         DRM_ERROR(
3779                                 "KMS: Cannot support more than %d display indexes\n",
3780                                         AMDGPU_DM_MAX_DISPLAY_INDEX);
3781                         continue;
3782                 }
3783
3784                 aconnector = kzalloc(sizeof(*aconnector), GFP_KERNEL);
3785                 if (!aconnector)
3786                         goto fail;
3787
3788                 aencoder = kzalloc(sizeof(*aencoder), GFP_KERNEL);
3789                 if (!aencoder)
3790                         goto fail;
3791
3792                 if (amdgpu_dm_encoder_init(dm->ddev, aencoder, i)) {
3793                         DRM_ERROR("KMS: Failed to initialize encoder\n");
3794                         goto fail;
3795                 }
3796
3797                 if (amdgpu_dm_connector_init(dm, aconnector, i, aencoder)) {
3798                         DRM_ERROR("KMS: Failed to initialize connector\n");
3799                         goto fail;
3800                 }
3801
3802                 link = dc_get_link_at_index(dm->dc, i);
3803
3804                 if (!dc_link_detect_sink(link, &new_connection_type))
3805                         DRM_ERROR("KMS: Failed to detect connector\n");
3806
3807                 if (aconnector->base.force && new_connection_type == dc_connection_none) {
3808                         emulated_link_detect(link);
3809                         amdgpu_dm_update_connector_after_detect(aconnector);
3810
3811                 } else if (dc_link_detect(link, DETECT_REASON_BOOT)) {
3812                         amdgpu_dm_update_connector_after_detect(aconnector);
3813                         register_backlight_device(dm, link);
3814                         if (amdgpu_dc_feature_mask & DC_PSR_MASK)
3815                                 amdgpu_dm_set_psr_caps(link);
3816                 }
3817
3818
3819         }
3820
3821         /* Software is initialized. Now we can register interrupt handlers. */
3822         switch (adev->asic_type) {
3823 #if defined(CONFIG_DRM_AMD_DC_SI)
3824         case CHIP_TAHITI:
3825         case CHIP_PITCAIRN:
3826         case CHIP_VERDE:
3827         case CHIP_OLAND:
3828                 if (dce60_register_irq_handlers(dm->adev)) {
3829                         DRM_ERROR("DM: Failed to initialize IRQ\n");
3830                         goto fail;
3831                 }
3832                 break;
3833 #endif
3834         case CHIP_BONAIRE:
3835         case CHIP_HAWAII:
3836         case CHIP_KAVERI:
3837         case CHIP_KABINI:
3838         case CHIP_MULLINS:
3839         case CHIP_TONGA:
3840         case CHIP_FIJI:
3841         case CHIP_CARRIZO:
3842         case CHIP_STONEY:
3843         case CHIP_POLARIS11:
3844         case CHIP_POLARIS10:
3845         case CHIP_POLARIS12:
3846         case CHIP_VEGAM:
3847         case CHIP_VEGA10:
3848         case CHIP_VEGA12:
3849         case CHIP_VEGA20:
3850                 if (dce110_register_irq_handlers(dm->adev)) {
3851                         DRM_ERROR("DM: Failed to initialize IRQ\n");
3852                         goto fail;
3853                 }
3854                 break;
3855 #if defined(CONFIG_DRM_AMD_DC_DCN)
3856         case CHIP_RAVEN:
3857         case CHIP_NAVI12:
3858         case CHIP_NAVI10:
3859         case CHIP_NAVI14:
3860         case CHIP_RENOIR:
3861         case CHIP_SIENNA_CICHLID:
3862         case CHIP_NAVY_FLOUNDER:
3863         case CHIP_DIMGREY_CAVEFISH:
3864         case CHIP_BEIGE_GOBY:
3865         case CHIP_VANGOGH:
3866                 if (dcn10_register_irq_handlers(dm->adev)) {
3867                         DRM_ERROR("DM: Failed to initialize IRQ\n");
3868                         goto fail;
3869                 }
3870                 break;
3871 #endif
3872         default:
3873                 DRM_ERROR("Unsupported ASIC type: 0x%X\n", adev->asic_type);
3874                 goto fail;
3875         }
3876
3877         return 0;
3878 fail:
3879         kfree(aencoder);
3880         kfree(aconnector);
3881
3882         return -EINVAL;
3883 }
3884
3885 static void amdgpu_dm_destroy_drm_device(struct amdgpu_display_manager *dm)
3886 {
3887         drm_mode_config_cleanup(dm->ddev);
3888         drm_atomic_private_obj_fini(&dm->atomic_obj);
3889         return;
3890 }
3891
3892 /******************************************************************************
3893  * amdgpu_display_funcs functions
3894  *****************************************************************************/
3895
3896 /*
3897  * dm_bandwidth_update - program display watermarks
3898  *
3899  * @adev: amdgpu_device pointer
3900  *
3901  * Calculate and program the display watermarks and line buffer allocation.
3902  */
3903 static void dm_bandwidth_update(struct amdgpu_device *adev)
3904 {
3905         /* TODO: implement later */
3906 }
3907
3908 static const struct amdgpu_display_funcs dm_display_funcs = {
3909         .bandwidth_update = dm_bandwidth_update, /* called unconditionally */
3910         .vblank_get_counter = dm_vblank_get_counter,/* called unconditionally */
3911         .backlight_set_level = NULL, /* never called for DC */
3912         .backlight_get_level = NULL, /* never called for DC */
3913         .hpd_sense = NULL,/* called unconditionally */
3914         .hpd_set_polarity = NULL, /* called unconditionally */
3915         .hpd_get_gpio_reg = NULL, /* VBIOS parsing. DAL does it. */
3916         .page_flip_get_scanoutpos =
3917                 dm_crtc_get_scanoutpos,/* called unconditionally */
3918         .add_encoder = NULL, /* VBIOS parsing. DAL does it. */
3919         .add_connector = NULL, /* VBIOS parsing. DAL does it. */
3920 };
3921
3922 #if defined(CONFIG_DEBUG_KERNEL_DC)
3923
3924 static ssize_t s3_debug_store(struct device *device,
3925                               struct device_attribute *attr,
3926                               const char *buf,
3927                               size_t count)
3928 {
3929         int ret;
3930         int s3_state;
3931         struct drm_device *drm_dev = dev_get_drvdata(device);
3932         struct amdgpu_device *adev = drm_to_adev(drm_dev);
3933
3934         ret = kstrtoint(buf, 0, &s3_state);
3935
3936         if (ret == 0) {
3937                 if (s3_state) {
3938                         dm_resume(adev);
3939                         drm_kms_helper_hotplug_event(adev_to_drm(adev));
3940                 } else
3941                         dm_suspend(adev);
3942         }
3943
3944         return ret == 0 ? count : 0;
3945 }
3946
3947 DEVICE_ATTR_WO(s3_debug);
3948
3949 #endif
3950
3951 static int dm_early_init(void *handle)
3952 {
3953         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
3954
3955         switch (adev->asic_type) {
3956 #if defined(CONFIG_DRM_AMD_DC_SI)
3957         case CHIP_TAHITI:
3958         case CHIP_PITCAIRN:
3959         case CHIP_VERDE:
3960                 adev->mode_info.num_crtc = 6;
3961                 adev->mode_info.num_hpd = 6;
3962                 adev->mode_info.num_dig = 6;
3963                 break;
3964         case CHIP_OLAND:
3965                 adev->mode_info.num_crtc = 2;
3966                 adev->mode_info.num_hpd = 2;
3967                 adev->mode_info.num_dig = 2;
3968                 break;
3969 #endif
3970         case CHIP_BONAIRE:
3971         case CHIP_HAWAII:
3972                 adev->mode_info.num_crtc = 6;
3973                 adev->mode_info.num_hpd = 6;
3974                 adev->mode_info.num_dig = 6;
3975                 break;
3976         case CHIP_KAVERI:
3977                 adev->mode_info.num_crtc = 4;
3978                 adev->mode_info.num_hpd = 6;
3979                 adev->mode_info.num_dig = 7;
3980                 break;
3981         case CHIP_KABINI:
3982         case CHIP_MULLINS:
3983                 adev->mode_info.num_crtc = 2;
3984                 adev->mode_info.num_hpd = 6;
3985                 adev->mode_info.num_dig = 6;
3986                 break;
3987         case CHIP_FIJI:
3988         case CHIP_TONGA:
3989                 adev->mode_info.num_crtc = 6;
3990                 adev->mode_info.num_hpd = 6;
3991                 adev->mode_info.num_dig = 7;
3992                 break;
3993         case CHIP_CARRIZO:
3994                 adev->mode_info.num_crtc = 3;
3995                 adev->mode_info.num_hpd = 6;
3996                 adev->mode_info.num_dig = 9;
3997                 break;
3998         case CHIP_STONEY:
3999                 adev->mode_info.num_crtc = 2;
4000                 adev->mode_info.num_hpd = 6;
4001                 adev->mode_info.num_dig = 9;
4002                 break;
4003         case CHIP_POLARIS11:
4004         case CHIP_POLARIS12:
4005                 adev->mode_info.num_crtc = 5;
4006                 adev->mode_info.num_hpd = 5;
4007                 adev->mode_info.num_dig = 5;
4008                 break;
4009         case CHIP_POLARIS10:
4010         case CHIP_VEGAM:
4011                 adev->mode_info.num_crtc = 6;
4012                 adev->mode_info.num_hpd = 6;
4013                 adev->mode_info.num_dig = 6;
4014                 break;
4015         case CHIP_VEGA10:
4016         case CHIP_VEGA12:
4017         case CHIP_VEGA20:
4018                 adev->mode_info.num_crtc = 6;
4019                 adev->mode_info.num_hpd = 6;
4020                 adev->mode_info.num_dig = 6;
4021                 break;
4022 #if defined(CONFIG_DRM_AMD_DC_DCN)
4023         case CHIP_RAVEN:
4024         case CHIP_RENOIR:
4025         case CHIP_VANGOGH:
4026                 adev->mode_info.num_crtc = 4;
4027                 adev->mode_info.num_hpd = 4;
4028                 adev->mode_info.num_dig = 4;
4029                 break;
4030         case CHIP_NAVI10:
4031         case CHIP_NAVI12:
4032         case CHIP_SIENNA_CICHLID:
4033         case CHIP_NAVY_FLOUNDER:
4034                 adev->mode_info.num_crtc = 6;
4035                 adev->mode_info.num_hpd = 6;
4036                 adev->mode_info.num_dig = 6;
4037                 break;
4038         case CHIP_NAVI14:
4039         case CHIP_DIMGREY_CAVEFISH:
4040                 adev->mode_info.num_crtc = 5;
4041                 adev->mode_info.num_hpd = 5;
4042                 adev->mode_info.num_dig = 5;
4043                 break;
4044         case CHIP_BEIGE_GOBY:
4045                 adev->mode_info.num_crtc = 2;
4046                 adev->mode_info.num_hpd = 2;
4047                 adev->mode_info.num_dig = 2;
4048                 break;
4049 #endif
4050         default:
4051                 DRM_ERROR("Unsupported ASIC type: 0x%X\n", adev->asic_type);
4052                 return -EINVAL;
4053         }
4054
4055         amdgpu_dm_set_irq_funcs(adev);
4056
4057         if (adev->mode_info.funcs == NULL)
4058                 adev->mode_info.funcs = &dm_display_funcs;
4059
4060         /*
4061          * Note: Do NOT change adev->audio_endpt_rreg and
4062          * adev->audio_endpt_wreg because they are initialised in
4063          * amdgpu_device_init()
4064          */
4065 #if defined(CONFIG_DEBUG_KERNEL_DC)
4066         device_create_file(
4067                 adev_to_drm(adev)->dev,
4068                 &dev_attr_s3_debug);
4069 #endif
4070
4071         return 0;
4072 }
4073
4074 static bool modeset_required(struct drm_crtc_state *crtc_state,
4075                              struct dc_stream_state *new_stream,
4076                              struct dc_stream_state *old_stream)
4077 {
4078         return crtc_state->active && drm_atomic_crtc_needs_modeset(crtc_state);
4079 }
4080
4081 static bool modereset_required(struct drm_crtc_state *crtc_state)
4082 {
4083         return !crtc_state->active && drm_atomic_crtc_needs_modeset(crtc_state);
4084 }
4085
4086 static void amdgpu_dm_encoder_destroy(struct drm_encoder *encoder)
4087 {
4088         drm_encoder_cleanup(encoder);
4089         kfree(encoder);
4090 }
4091
4092 static const struct drm_encoder_funcs amdgpu_dm_encoder_funcs = {
4093         .destroy = amdgpu_dm_encoder_destroy,
4094 };
4095
4096
4097 static void get_min_max_dc_plane_scaling(struct drm_device *dev,
4098                                          struct drm_framebuffer *fb,
4099                                          int *min_downscale, int *max_upscale)
4100 {
4101         struct amdgpu_device *adev = drm_to_adev(dev);
4102         struct dc *dc = adev->dm.dc;
4103         /* Caps for all supported planes are the same on DCE and DCN 1 - 3 */
4104         struct dc_plane_cap *plane_cap = &dc->caps.planes[0];
4105
4106         switch (fb->format->format) {
4107         case DRM_FORMAT_P010:
4108         case DRM_FORMAT_NV12:
4109         case DRM_FORMAT_NV21:
4110                 *max_upscale = plane_cap->max_upscale_factor.nv12;
4111                 *min_downscale = plane_cap->max_downscale_factor.nv12;
4112                 break;
4113
4114         case DRM_FORMAT_XRGB16161616F:
4115         case DRM_FORMAT_ARGB16161616F:
4116         case DRM_FORMAT_XBGR16161616F:
4117         case DRM_FORMAT_ABGR16161616F:
4118                 *max_upscale = plane_cap->max_upscale_factor.fp16;
4119                 *min_downscale = plane_cap->max_downscale_factor.fp16;
4120                 break;
4121
4122         default:
4123                 *max_upscale = plane_cap->max_upscale_factor.argb8888;
4124                 *min_downscale = plane_cap->max_downscale_factor.argb8888;
4125                 break;
4126         }
4127
4128         /*
4129          * A factor of 1 in the plane_cap means to not allow scaling, ie. use a
4130          * scaling factor of 1.0 == 1000 units.
4131          */
4132         if (*max_upscale == 1)
4133                 *max_upscale = 1000;
4134
4135         if (*min_downscale == 1)
4136                 *min_downscale = 1000;
4137 }
4138
4139
4140 static int fill_dc_scaling_info(const struct drm_plane_state *state,
4141                                 struct dc_scaling_info *scaling_info)
4142 {
4143         int scale_w, scale_h, min_downscale, max_upscale;
4144
4145         memset(scaling_info, 0, sizeof(*scaling_info));
4146
4147         /* Source is fixed 16.16 but we ignore mantissa for now... */
4148         scaling_info->src_rect.x = state->src_x >> 16;
4149         scaling_info->src_rect.y = state->src_y >> 16;
4150
4151         /*
4152          * For reasons we don't (yet) fully understand a non-zero
4153          * src_y coordinate into an NV12 buffer can cause a
4154          * system hang. To avoid hangs (and maybe be overly cautious)
4155          * let's reject both non-zero src_x and src_y.
4156          *
4157          * We currently know of only one use-case to reproduce a
4158          * scenario with non-zero src_x and src_y for NV12, which
4159          * is to gesture the YouTube Android app into full screen
4160          * on ChromeOS.
4161          */
4162         if (state->fb &&
4163             state->fb->format->format == DRM_FORMAT_NV12 &&
4164             (scaling_info->src_rect.x != 0 ||
4165              scaling_info->src_rect.y != 0))
4166                 return -EINVAL;
4167
4168         scaling_info->src_rect.width = state->src_w >> 16;
4169         if (scaling_info->src_rect.width == 0)
4170                 return -EINVAL;
4171
4172         scaling_info->src_rect.height = state->src_h >> 16;
4173         if (scaling_info->src_rect.height == 0)
4174                 return -EINVAL;
4175
4176         scaling_info->dst_rect.x = state->crtc_x;
4177         scaling_info->dst_rect.y = state->crtc_y;
4178
4179         if (state->crtc_w == 0)
4180                 return -EINVAL;
4181
4182         scaling_info->dst_rect.width = state->crtc_w;
4183
4184         if (state->crtc_h == 0)
4185                 return -EINVAL;
4186
4187         scaling_info->dst_rect.height = state->crtc_h;
4188
4189         /* DRM doesn't specify clipping on destination output. */
4190         scaling_info->clip_rect = scaling_info->dst_rect;
4191
4192         /* Validate scaling per-format with DC plane caps */
4193         if (state->plane && state->plane->dev && state->fb) {
4194                 get_min_max_dc_plane_scaling(state->plane->dev, state->fb,
4195                                              &min_downscale, &max_upscale);
4196         } else {
4197                 min_downscale = 250;
4198                 max_upscale = 16000;
4199         }
4200
4201         scale_w = scaling_info->dst_rect.width * 1000 /
4202                   scaling_info->src_rect.width;
4203
4204         if (scale_w < min_downscale || scale_w > max_upscale)
4205                 return -EINVAL;
4206
4207         scale_h = scaling_info->dst_rect.height * 1000 /
4208                   scaling_info->src_rect.height;
4209
4210         if (scale_h < min_downscale || scale_h > max_upscale)
4211                 return -EINVAL;
4212
4213         /*
4214          * The "scaling_quality" can be ignored for now, quality = 0 has DC
4215          * assume reasonable defaults based on the format.
4216          */
4217
4218         return 0;
4219 }
4220
4221 static void
4222 fill_gfx8_tiling_info_from_flags(union dc_tiling_info *tiling_info,
4223                                  uint64_t tiling_flags)
4224 {
4225         /* Fill GFX8 params */
4226         if (AMDGPU_TILING_GET(tiling_flags, ARRAY_MODE) == DC_ARRAY_2D_TILED_THIN1) {
4227                 unsigned int bankw, bankh, mtaspect, tile_split, num_banks;
4228
4229                 bankw = AMDGPU_TILING_GET(tiling_flags, BANK_WIDTH);
4230                 bankh = AMDGPU_TILING_GET(tiling_flags, BANK_HEIGHT);
4231                 mtaspect = AMDGPU_TILING_GET(tiling_flags, MACRO_TILE_ASPECT);
4232                 tile_split = AMDGPU_TILING_GET(tiling_flags, TILE_SPLIT);
4233                 num_banks = AMDGPU_TILING_GET(tiling_flags, NUM_BANKS);
4234
4235                 /* XXX fix me for VI */
4236                 tiling_info->gfx8.num_banks = num_banks;
4237                 tiling_info->gfx8.array_mode =
4238                                 DC_ARRAY_2D_TILED_THIN1;
4239                 tiling_info->gfx8.tile_split = tile_split;
4240                 tiling_info->gfx8.bank_width = bankw;
4241                 tiling_info->gfx8.bank_height = bankh;
4242                 tiling_info->gfx8.tile_aspect = mtaspect;
4243                 tiling_info->gfx8.tile_mode =
4244                                 DC_ADDR_SURF_MICRO_TILING_DISPLAY;
4245         } else if (AMDGPU_TILING_GET(tiling_flags, ARRAY_MODE)
4246                         == DC_ARRAY_1D_TILED_THIN1) {
4247                 tiling_info->gfx8.array_mode = DC_ARRAY_1D_TILED_THIN1;
4248         }
4249
4250         tiling_info->gfx8.pipe_config =
4251                         AMDGPU_TILING_GET(tiling_flags, PIPE_CONFIG);
4252 }
4253
4254 static void
4255 fill_gfx9_tiling_info_from_device(const struct amdgpu_device *adev,
4256                                   union dc_tiling_info *tiling_info)
4257 {
4258         tiling_info->gfx9.num_pipes =
4259                 adev->gfx.config.gb_addr_config_fields.num_pipes;
4260         tiling_info->gfx9.num_banks =
4261                 adev->gfx.config.gb_addr_config_fields.num_banks;
4262         tiling_info->gfx9.pipe_interleave =
4263                 adev->gfx.config.gb_addr_config_fields.pipe_interleave_size;
4264         tiling_info->gfx9.num_shader_engines =
4265                 adev->gfx.config.gb_addr_config_fields.num_se;
4266         tiling_info->gfx9.max_compressed_frags =
4267                 adev->gfx.config.gb_addr_config_fields.max_compress_frags;
4268         tiling_info->gfx9.num_rb_per_se =
4269                 adev->gfx.config.gb_addr_config_fields.num_rb_per_se;
4270         tiling_info->gfx9.shaderEnable = 1;
4271         if (adev->asic_type == CHIP_SIENNA_CICHLID ||
4272             adev->asic_type == CHIP_NAVY_FLOUNDER ||
4273             adev->asic_type == CHIP_DIMGREY_CAVEFISH ||
4274             adev->asic_type == CHIP_BEIGE_GOBY ||
4275             adev->asic_type == CHIP_VANGOGH)
4276                 tiling_info->gfx9.num_pkrs = adev->gfx.config.gb_addr_config_fields.num_pkrs;
4277 }
4278
4279 static int
4280 validate_dcc(struct amdgpu_device *adev,
4281              const enum surface_pixel_format format,
4282              const enum dc_rotation_angle rotation,
4283              const union dc_tiling_info *tiling_info,
4284              const struct dc_plane_dcc_param *dcc,
4285              const struct dc_plane_address *address,
4286              const struct plane_size *plane_size)
4287 {
4288         struct dc *dc = adev->dm.dc;
4289         struct dc_dcc_surface_param input;
4290         struct dc_surface_dcc_cap output;
4291
4292         memset(&input, 0, sizeof(input));
4293         memset(&output, 0, sizeof(output));
4294
4295         if (!dcc->enable)
4296                 return 0;
4297
4298         if (format >= SURFACE_PIXEL_FORMAT_VIDEO_BEGIN ||
4299             !dc->cap_funcs.get_dcc_compression_cap)
4300                 return -EINVAL;
4301
4302         input.format = format;
4303         input.surface_size.width = plane_size->surface_size.width;
4304         input.surface_size.height = plane_size->surface_size.height;
4305         input.swizzle_mode = tiling_info->gfx9.swizzle;
4306
4307         if (rotation == ROTATION_ANGLE_0 || rotation == ROTATION_ANGLE_180)
4308                 input.scan = SCAN_DIRECTION_HORIZONTAL;
4309         else if (rotation == ROTATION_ANGLE_90 || rotation == ROTATION_ANGLE_270)
4310                 input.scan = SCAN_DIRECTION_VERTICAL;
4311
4312         if (!dc->cap_funcs.get_dcc_compression_cap(dc, &input, &output))
4313                 return -EINVAL;
4314
4315         if (!output.capable)
4316                 return -EINVAL;
4317
4318         if (dcc->independent_64b_blks == 0 &&
4319             output.grph.rgb.independent_64b_blks != 0)
4320                 return -EINVAL;
4321
4322         return 0;
4323 }
4324
4325 static bool
4326 modifier_has_dcc(uint64_t modifier)
4327 {
4328         return IS_AMD_FMT_MOD(modifier) && AMD_FMT_MOD_GET(DCC, modifier);
4329 }
4330
4331 static unsigned
4332 modifier_gfx9_swizzle_mode(uint64_t modifier)
4333 {
4334         if (modifier == DRM_FORMAT_MOD_LINEAR)
4335                 return 0;
4336
4337         return AMD_FMT_MOD_GET(TILE, modifier);
4338 }
4339
4340 static const struct drm_format_info *
4341 amd_get_format_info(const struct drm_mode_fb_cmd2 *cmd)
4342 {
4343         return amdgpu_lookup_format_info(cmd->pixel_format, cmd->modifier[0]);
4344 }
4345
4346 static void
4347 fill_gfx9_tiling_info_from_modifier(const struct amdgpu_device *adev,
4348                                     union dc_tiling_info *tiling_info,
4349                                     uint64_t modifier)
4350 {
4351         unsigned int mod_bank_xor_bits = AMD_FMT_MOD_GET(BANK_XOR_BITS, modifier);
4352         unsigned int mod_pipe_xor_bits = AMD_FMT_MOD_GET(PIPE_XOR_BITS, modifier);
4353         unsigned int pkrs_log2 = AMD_FMT_MOD_GET(PACKERS, modifier);
4354         unsigned int pipes_log2 = min(4u, mod_pipe_xor_bits);
4355
4356         fill_gfx9_tiling_info_from_device(adev, tiling_info);
4357
4358         if (!IS_AMD_FMT_MOD(modifier))
4359                 return;
4360
4361         tiling_info->gfx9.num_pipes = 1u << pipes_log2;
4362         tiling_info->gfx9.num_shader_engines = 1u << (mod_pipe_xor_bits - pipes_log2);
4363
4364         if (adev->family >= AMDGPU_FAMILY_NV) {
4365                 tiling_info->gfx9.num_pkrs = 1u << pkrs_log2;
4366         } else {
4367                 tiling_info->gfx9.num_banks = 1u << mod_bank_xor_bits;
4368
4369                 /* for DCC we know it isn't rb aligned, so rb_per_se doesn't matter. */
4370         }
4371 }
4372
4373 enum dm_micro_swizzle {
4374         MICRO_SWIZZLE_Z = 0,
4375         MICRO_SWIZZLE_S = 1,
4376         MICRO_SWIZZLE_D = 2,
4377         MICRO_SWIZZLE_R = 3
4378 };
4379
4380 static bool dm_plane_format_mod_supported(struct drm_plane *plane,
4381                                           uint32_t format,
4382                                           uint64_t modifier)
4383 {
4384         struct amdgpu_device *adev = drm_to_adev(plane->dev);
4385         const struct drm_format_info *info = drm_format_info(format);
4386         int i;
4387
4388         enum dm_micro_swizzle microtile = modifier_gfx9_swizzle_mode(modifier) & 3;
4389
4390         if (!info)
4391                 return false;
4392
4393         /*
4394          * We always have to allow these modifiers:
4395          * 1. Core DRM checks for LINEAR support if userspace does not provide modifiers.
4396          * 2. Not passing any modifiers is the same as explicitly passing INVALID.
4397          */
4398         if (modifier == DRM_FORMAT_MOD_LINEAR ||
4399             modifier == DRM_FORMAT_MOD_INVALID) {
4400                 return true;
4401         }
4402
4403         /* Check that the modifier is on the list of the plane's supported modifiers. */
4404         for (i = 0; i < plane->modifier_count; i++) {
4405                 if (modifier == plane->modifiers[i])
4406                         break;
4407         }
4408         if (i == plane->modifier_count)
4409                 return false;
4410
4411         /*
4412          * For D swizzle the canonical modifier depends on the bpp, so check
4413          * it here.
4414          */
4415         if (AMD_FMT_MOD_GET(TILE_VERSION, modifier) == AMD_FMT_MOD_TILE_VER_GFX9 &&
4416             adev->family >= AMDGPU_FAMILY_NV) {
4417                 if (microtile == MICRO_SWIZZLE_D && info->cpp[0] == 4)
4418                         return false;
4419         }
4420
4421         if (adev->family >= AMDGPU_FAMILY_RV && microtile == MICRO_SWIZZLE_D &&
4422             info->cpp[0] < 8)
4423                 return false;
4424
4425         if (modifier_has_dcc(modifier)) {
4426                 /* Per radeonsi comments 16/64 bpp are more complicated. */
4427                 if (info->cpp[0] != 4)
4428                         return false;
4429                 /* We support multi-planar formats, but not when combined with
4430                  * additional DCC metadata planes. */
4431                 if (info->num_planes > 1)
4432                         return false;
4433         }
4434
4435         return true;
4436 }
4437
4438 static void
4439 add_modifier(uint64_t **mods, uint64_t *size, uint64_t *cap, uint64_t mod)
4440 {
4441         if (!*mods)
4442                 return;
4443
4444         if (*cap - *size < 1) {
4445                 uint64_t new_cap = *cap * 2;
4446                 uint64_t *new_mods = kmalloc(new_cap * sizeof(uint64_t), GFP_KERNEL);
4447
4448                 if (!new_mods) {
4449                         kfree(*mods);
4450                         *mods = NULL;
4451                         return;
4452                 }
4453
4454                 memcpy(new_mods, *mods, sizeof(uint64_t) * *size);
4455                 kfree(*mods);
4456                 *mods = new_mods;
4457                 *cap = new_cap;
4458         }
4459
4460         (*mods)[*size] = mod;
4461         *size += 1;
4462 }
4463
4464 static void
4465 add_gfx9_modifiers(const struct amdgpu_device *adev,
4466                    uint64_t **mods, uint64_t *size, uint64_t *capacity)
4467 {
4468         int pipes = ilog2(adev->gfx.config.gb_addr_config_fields.num_pipes);
4469         int pipe_xor_bits = min(8, pipes +
4470                                 ilog2(adev->gfx.config.gb_addr_config_fields.num_se));
4471         int bank_xor_bits = min(8 - pipe_xor_bits,
4472                                 ilog2(adev->gfx.config.gb_addr_config_fields.num_banks));
4473         int rb = ilog2(adev->gfx.config.gb_addr_config_fields.num_se) +
4474                  ilog2(adev->gfx.config.gb_addr_config_fields.num_rb_per_se);
4475
4476
4477         if (adev->family == AMDGPU_FAMILY_RV) {
4478                 /* Raven2 and later */
4479                 bool has_constant_encode = adev->asic_type > CHIP_RAVEN || adev->external_rev_id >= 0x81;
4480
4481                 /*
4482                  * No _D DCC swizzles yet because we only allow 32bpp, which
4483                  * doesn't support _D on DCN
4484                  */
4485
4486                 if (has_constant_encode) {
4487                         add_modifier(mods, size, capacity, AMD_FMT_MOD |
4488                                     AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) |
4489                                     AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9) |
4490                                     AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4491                                     AMD_FMT_MOD_SET(BANK_XOR_BITS, bank_xor_bits) |
4492                                     AMD_FMT_MOD_SET(DCC, 1) |
4493                                     AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
4494                                     AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B) |
4495                                     AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1));
4496                 }
4497
4498                 add_modifier(mods, size, capacity, AMD_FMT_MOD |
4499                             AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) |
4500                             AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9) |
4501                             AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4502                             AMD_FMT_MOD_SET(BANK_XOR_BITS, bank_xor_bits) |
4503                             AMD_FMT_MOD_SET(DCC, 1) |
4504                             AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
4505                             AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B) |
4506                             AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 0));
4507
4508                 if (has_constant_encode) {
4509                         add_modifier(mods, size, capacity, AMD_FMT_MOD |
4510                                     AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) |
4511                                     AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9) |
4512                                     AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4513                                     AMD_FMT_MOD_SET(BANK_XOR_BITS, bank_xor_bits) |
4514                                     AMD_FMT_MOD_SET(DCC, 1) |
4515                                     AMD_FMT_MOD_SET(DCC_RETILE, 1) |
4516                                     AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
4517                                     AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B) |
4518
4519                                     AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1) |
4520                                     AMD_FMT_MOD_SET(RB, rb) |
4521                                     AMD_FMT_MOD_SET(PIPE, pipes));
4522                 }
4523
4524                 add_modifier(mods, size, capacity, AMD_FMT_MOD |
4525                             AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) |
4526                             AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9) |
4527                             AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4528                             AMD_FMT_MOD_SET(BANK_XOR_BITS, bank_xor_bits) |
4529                             AMD_FMT_MOD_SET(DCC, 1) |
4530                             AMD_FMT_MOD_SET(DCC_RETILE, 1) |
4531                             AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
4532                             AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B) |
4533                             AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 0) |
4534                             AMD_FMT_MOD_SET(RB, rb) |
4535                             AMD_FMT_MOD_SET(PIPE, pipes));
4536         }
4537
4538         /*
4539          * Only supported for 64bpp on Raven, will be filtered on format in
4540          * dm_plane_format_mod_supported.
4541          */
4542         add_modifier(mods, size, capacity, AMD_FMT_MOD |
4543                     AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_D_X) |
4544                     AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9) |
4545                     AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4546                     AMD_FMT_MOD_SET(BANK_XOR_BITS, bank_xor_bits));
4547
4548         if (adev->family == AMDGPU_FAMILY_RV) {
4549                 add_modifier(mods, size, capacity, AMD_FMT_MOD |
4550                             AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) |
4551                             AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9) |
4552                             AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4553                             AMD_FMT_MOD_SET(BANK_XOR_BITS, bank_xor_bits));
4554         }
4555
4556         /*
4557          * Only supported for 64bpp on Raven, will be filtered on format in
4558          * dm_plane_format_mod_supported.
4559          */
4560         add_modifier(mods, size, capacity, AMD_FMT_MOD |
4561                     AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_D) |
4562                     AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9));
4563
4564         if (adev->family == AMDGPU_FAMILY_RV) {
4565                 add_modifier(mods, size, capacity, AMD_FMT_MOD |
4566                             AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S) |
4567                             AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9));
4568         }
4569 }
4570
4571 static void
4572 add_gfx10_1_modifiers(const struct amdgpu_device *adev,
4573                       uint64_t **mods, uint64_t *size, uint64_t *capacity)
4574 {
4575         int pipe_xor_bits = ilog2(adev->gfx.config.gb_addr_config_fields.num_pipes);
4576
4577         add_modifier(mods, size, capacity, AMD_FMT_MOD |
4578                     AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) |
4579                     AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10) |
4580                     AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4581                     AMD_FMT_MOD_SET(DCC, 1) |
4582                     AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1) |
4583                     AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
4584                     AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B));
4585
4586         add_modifier(mods, size, capacity, AMD_FMT_MOD |
4587                     AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) |
4588                     AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10) |
4589                     AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4590                     AMD_FMT_MOD_SET(DCC, 1) |
4591                     AMD_FMT_MOD_SET(DCC_RETILE, 1) |
4592                     AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1) |
4593                     AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
4594                     AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B));
4595
4596         add_modifier(mods, size, capacity, AMD_FMT_MOD |
4597                     AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) |
4598                     AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10) |
4599                     AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits));
4600
4601         add_modifier(mods, size, capacity, AMD_FMT_MOD |
4602                     AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) |
4603                     AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10) |
4604                     AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits));
4605
4606
4607         /* Only supported for 64bpp, will be filtered in dm_plane_format_mod_supported */
4608         add_modifier(mods, size, capacity, AMD_FMT_MOD |
4609                     AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_D) |
4610                     AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9));
4611
4612         add_modifier(mods, size, capacity, AMD_FMT_MOD |
4613                     AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S) |
4614                     AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9));
4615 }
4616
4617 static void
4618 add_gfx10_3_modifiers(const struct amdgpu_device *adev,
4619                       uint64_t **mods, uint64_t *size, uint64_t *capacity)
4620 {
4621         int pipe_xor_bits = ilog2(adev->gfx.config.gb_addr_config_fields.num_pipes);
4622         int pkrs = ilog2(adev->gfx.config.gb_addr_config_fields.num_pkrs);
4623
4624         add_modifier(mods, size, capacity, AMD_FMT_MOD |
4625                     AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) |
4626                     AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10_RBPLUS) |
4627                     AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4628                     AMD_FMT_MOD_SET(PACKERS, pkrs) |
4629                     AMD_FMT_MOD_SET(DCC, 1) |
4630                     AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1) |
4631                     AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
4632                     AMD_FMT_MOD_SET(DCC_INDEPENDENT_128B, 1) |
4633                     AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B));
4634
4635         add_modifier(mods, size, capacity, AMD_FMT_MOD |
4636                     AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) |
4637                     AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10_RBPLUS) |
4638                     AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4639                     AMD_FMT_MOD_SET(PACKERS, pkrs) |
4640                     AMD_FMT_MOD_SET(DCC, 1) |
4641                     AMD_FMT_MOD_SET(DCC_RETILE, 1) |
4642                     AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1) |
4643                     AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
4644                     AMD_FMT_MOD_SET(DCC_INDEPENDENT_128B, 1) |
4645                     AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B));
4646
4647         add_modifier(mods, size, capacity, AMD_FMT_MOD |
4648                     AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) |
4649                     AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10_RBPLUS) |
4650                     AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4651                     AMD_FMT_MOD_SET(PACKERS, pkrs));
4652
4653         add_modifier(mods, size, capacity, AMD_FMT_MOD |
4654                     AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) |
4655                     AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10_RBPLUS) |
4656                     AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4657                     AMD_FMT_MOD_SET(PACKERS, pkrs));
4658
4659         /* Only supported for 64bpp, will be filtered in dm_plane_format_mod_supported */
4660         add_modifier(mods, size, capacity, AMD_FMT_MOD |
4661                     AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_D) |
4662                     AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9));
4663
4664         add_modifier(mods, size, capacity, AMD_FMT_MOD |
4665                     AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S) |
4666                     AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9));
4667 }
4668
4669 static int
4670 get_plane_modifiers(const struct amdgpu_device *adev, unsigned int plane_type, uint64_t **mods)
4671 {
4672         uint64_t size = 0, capacity = 128;
4673         *mods = NULL;
4674
4675         /* We have not hooked up any pre-GFX9 modifiers. */
4676         if (adev->family < AMDGPU_FAMILY_AI)
4677                 return 0;
4678
4679         *mods = kmalloc(capacity * sizeof(uint64_t), GFP_KERNEL);
4680
4681         if (plane_type == DRM_PLANE_TYPE_CURSOR) {
4682                 add_modifier(mods, &size, &capacity, DRM_FORMAT_MOD_LINEAR);
4683                 add_modifier(mods, &size, &capacity, DRM_FORMAT_MOD_INVALID);
4684                 return *mods ? 0 : -ENOMEM;
4685         }
4686
4687         switch (adev->family) {
4688         case AMDGPU_FAMILY_AI:
4689         case AMDGPU_FAMILY_RV:
4690                 add_gfx9_modifiers(adev, mods, &size, &capacity);
4691                 break;
4692         case AMDGPU_FAMILY_NV:
4693         case AMDGPU_FAMILY_VGH:
4694                 if (adev->asic_type >= CHIP_SIENNA_CICHLID)
4695                         add_gfx10_3_modifiers(adev, mods, &size, &capacity);
4696                 else
4697                         add_gfx10_1_modifiers(adev, mods, &size, &capacity);
4698                 break;
4699         }
4700
4701         add_modifier(mods, &size, &capacity, DRM_FORMAT_MOD_LINEAR);
4702
4703         /* INVALID marks the end of the list. */
4704         add_modifier(mods, &size, &capacity, DRM_FORMAT_MOD_INVALID);
4705
4706         if (!*mods)
4707                 return -ENOMEM;
4708
4709         return 0;
4710 }
4711
4712 static int
4713 fill_gfx9_plane_attributes_from_modifiers(struct amdgpu_device *adev,
4714                                           const struct amdgpu_framebuffer *afb,
4715                                           const enum surface_pixel_format format,
4716                                           const enum dc_rotation_angle rotation,
4717                                           const struct plane_size *plane_size,
4718                                           union dc_tiling_info *tiling_info,
4719                                           struct dc_plane_dcc_param *dcc,
4720                                           struct dc_plane_address *address,
4721                                           const bool force_disable_dcc)
4722 {
4723         const uint64_t modifier = afb->base.modifier;
4724         int ret;
4725
4726         fill_gfx9_tiling_info_from_modifier(adev, tiling_info, modifier);
4727         tiling_info->gfx9.swizzle = modifier_gfx9_swizzle_mode(modifier);
4728
4729         if (modifier_has_dcc(modifier) && !force_disable_dcc) {
4730                 uint64_t dcc_address = afb->address + afb->base.offsets[1];
4731
4732                 dcc->enable = 1;
4733                 dcc->meta_pitch = afb->base.pitches[1];
4734                 dcc->independent_64b_blks = AMD_FMT_MOD_GET(DCC_INDEPENDENT_64B, modifier);
4735
4736                 address->grph.meta_addr.low_part = lower_32_bits(dcc_address);
4737                 address->grph.meta_addr.high_part = upper_32_bits(dcc_address);
4738         }
4739
4740         ret = validate_dcc(adev, format, rotation, tiling_info, dcc, address, plane_size);
4741         if (ret)
4742                 return ret;
4743
4744         return 0;
4745 }
4746
4747 static int
4748 fill_plane_buffer_attributes(struct amdgpu_device *adev,
4749                              const struct amdgpu_framebuffer *afb,
4750                              const enum surface_pixel_format format,
4751                              const enum dc_rotation_angle rotation,
4752                              const uint64_t tiling_flags,
4753                              union dc_tiling_info *tiling_info,
4754                              struct plane_size *plane_size,
4755                              struct dc_plane_dcc_param *dcc,
4756                              struct dc_plane_address *address,
4757                              bool tmz_surface,
4758                              bool force_disable_dcc)
4759 {
4760         const struct drm_framebuffer *fb = &afb->base;
4761         int ret;
4762
4763         memset(tiling_info, 0, sizeof(*tiling_info));
4764         memset(plane_size, 0, sizeof(*plane_size));
4765         memset(dcc, 0, sizeof(*dcc));
4766         memset(address, 0, sizeof(*address));
4767
4768         address->tmz_surface = tmz_surface;
4769
4770         if (format < SURFACE_PIXEL_FORMAT_VIDEO_BEGIN) {
4771                 uint64_t addr = afb->address + fb->offsets[0];
4772
4773                 plane_size->surface_size.x = 0;
4774                 plane_size->surface_size.y = 0;
4775                 plane_size->surface_size.width = fb->width;
4776                 plane_size->surface_size.height = fb->height;
4777                 plane_size->surface_pitch =
4778                         fb->pitches[0] / fb->format->cpp[0];
4779
4780                 address->type = PLN_ADDR_TYPE_GRAPHICS;
4781                 address->grph.addr.low_part = lower_32_bits(addr);
4782                 address->grph.addr.high_part = upper_32_bits(addr);
4783         } else if (format < SURFACE_PIXEL_FORMAT_INVALID) {
4784                 uint64_t luma_addr = afb->address + fb->offsets[0];
4785                 uint64_t chroma_addr = afb->address + fb->offsets[1];
4786
4787                 plane_size->surface_size.x = 0;
4788                 plane_size->surface_size.y = 0;
4789                 plane_size->surface_size.width = fb->width;
4790                 plane_size->surface_size.height = fb->height;
4791                 plane_size->surface_pitch =
4792                         fb->pitches[0] / fb->format->cpp[0];
4793
4794                 plane_size->chroma_size.x = 0;
4795                 plane_size->chroma_size.y = 0;
4796                 /* TODO: set these based on surface format */
4797                 plane_size->chroma_size.width = fb->width / 2;
4798                 plane_size->chroma_size.height = fb->height / 2;
4799
4800                 plane_size->chroma_pitch =
4801                         fb->pitches[1] / fb->format->cpp[1];
4802
4803                 address->type = PLN_ADDR_TYPE_VIDEO_PROGRESSIVE;
4804                 address->video_progressive.luma_addr.low_part =
4805                         lower_32_bits(luma_addr);
4806                 address->video_progressive.luma_addr.high_part =
4807                         upper_32_bits(luma_addr);
4808                 address->video_progressive.chroma_addr.low_part =
4809                         lower_32_bits(chroma_addr);
4810                 address->video_progressive.chroma_addr.high_part =
4811                         upper_32_bits(chroma_addr);
4812         }
4813
4814         if (adev->family >= AMDGPU_FAMILY_AI) {
4815                 ret = fill_gfx9_plane_attributes_from_modifiers(adev, afb, format,
4816                                                                 rotation, plane_size,
4817                                                                 tiling_info, dcc,
4818                                                                 address,
4819                                                                 force_disable_dcc);
4820                 if (ret)
4821                         return ret;
4822         } else {
4823                 fill_gfx8_tiling_info_from_flags(tiling_info, tiling_flags);
4824         }
4825
4826         return 0;
4827 }
4828
4829 static void
4830 fill_blending_from_plane_state(const struct drm_plane_state *plane_state,
4831                                bool *per_pixel_alpha, bool *global_alpha,
4832                                int *global_alpha_value)
4833 {
4834         *per_pixel_alpha = false;
4835         *global_alpha = false;
4836         *global_alpha_value = 0xff;
4837
4838         if (plane_state->plane->type != DRM_PLANE_TYPE_OVERLAY)
4839                 return;
4840
4841         if (plane_state->pixel_blend_mode == DRM_MODE_BLEND_PREMULTI) {
4842                 static const uint32_t alpha_formats[] = {
4843                         DRM_FORMAT_ARGB8888,
4844                         DRM_FORMAT_RGBA8888,
4845                         DRM_FORMAT_ABGR8888,
4846                 };
4847                 uint32_t format = plane_state->fb->format->format;
4848                 unsigned int i;
4849
4850                 for (i = 0; i < ARRAY_SIZE(alpha_formats); ++i) {
4851                         if (format == alpha_formats[i]) {
4852                                 *per_pixel_alpha = true;
4853                                 break;
4854                         }
4855                 }
4856         }
4857
4858         if (plane_state->alpha < 0xffff) {
4859                 *global_alpha = true;
4860                 *global_alpha_value = plane_state->alpha >> 8;
4861         }
4862 }
4863
4864 static int
4865 fill_plane_color_attributes(const struct drm_plane_state *plane_state,
4866                             const enum surface_pixel_format format,
4867                             enum dc_color_space *color_space)
4868 {
4869         bool full_range;
4870
4871         *color_space = COLOR_SPACE_SRGB;
4872
4873         /* DRM color properties only affect non-RGB formats. */
4874         if (format < SURFACE_PIXEL_FORMAT_VIDEO_BEGIN)
4875                 return 0;
4876
4877         full_range = (plane_state->color_range == DRM_COLOR_YCBCR_FULL_RANGE);
4878
4879         switch (plane_state->color_encoding) {
4880         case DRM_COLOR_YCBCR_BT601:
4881                 if (full_range)
4882                         *color_space = COLOR_SPACE_YCBCR601;
4883                 else
4884                         *color_space = COLOR_SPACE_YCBCR601_LIMITED;
4885                 break;
4886
4887         case DRM_COLOR_YCBCR_BT709:
4888                 if (full_range)
4889                         *color_space = COLOR_SPACE_YCBCR709;
4890                 else
4891                         *color_space = COLOR_SPACE_YCBCR709_LIMITED;
4892                 break;
4893
4894         case DRM_COLOR_YCBCR_BT2020:
4895                 if (full_range)
4896                         *color_space = COLOR_SPACE_2020_YCBCR;
4897                 else
4898                         return -EINVAL;
4899                 break;
4900
4901         default:
4902                 return -EINVAL;
4903         }
4904
4905         return 0;
4906 }
4907
4908 static int
4909 fill_dc_plane_info_and_addr(struct amdgpu_device *adev,
4910                             const struct drm_plane_state *plane_state,
4911                             const uint64_t tiling_flags,
4912                             struct dc_plane_info *plane_info,
4913                             struct dc_plane_address *address,
4914                             bool tmz_surface,
4915                             bool force_disable_dcc)
4916 {
4917         const struct drm_framebuffer *fb = plane_state->fb;
4918         const struct amdgpu_framebuffer *afb =
4919                 to_amdgpu_framebuffer(plane_state->fb);
4920         int ret;
4921
4922         memset(plane_info, 0, sizeof(*plane_info));
4923
4924         switch (fb->format->format) {
4925         case DRM_FORMAT_C8:
4926                 plane_info->format =
4927                         SURFACE_PIXEL_FORMAT_GRPH_PALETA_256_COLORS;
4928                 break;
4929         case DRM_FORMAT_RGB565:
4930                 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_RGB565;
4931                 break;
4932         case DRM_FORMAT_XRGB8888:
4933         case DRM_FORMAT_ARGB8888:
4934                 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ARGB8888;
4935                 break;
4936         case DRM_FORMAT_XRGB2101010:
4937         case DRM_FORMAT_ARGB2101010:
4938                 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ARGB2101010;
4939                 break;
4940         case DRM_FORMAT_XBGR2101010:
4941         case DRM_FORMAT_ABGR2101010:
4942                 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ABGR2101010;
4943                 break;
4944         case DRM_FORMAT_XBGR8888:
4945         case DRM_FORMAT_ABGR8888:
4946                 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ABGR8888;
4947                 break;
4948         case DRM_FORMAT_NV21:
4949                 plane_info->format = SURFACE_PIXEL_FORMAT_VIDEO_420_YCbCr;
4950                 break;
4951         case DRM_FORMAT_NV12:
4952                 plane_info->format = SURFACE_PIXEL_FORMAT_VIDEO_420_YCrCb;
4953                 break;
4954         case DRM_FORMAT_P010:
4955                 plane_info->format = SURFACE_PIXEL_FORMAT_VIDEO_420_10bpc_YCrCb;
4956                 break;
4957         case DRM_FORMAT_XRGB16161616F:
4958         case DRM_FORMAT_ARGB16161616F:
4959                 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ARGB16161616F;
4960                 break;
4961         case DRM_FORMAT_XBGR16161616F:
4962         case DRM_FORMAT_ABGR16161616F:
4963                 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ABGR16161616F;
4964                 break;
4965         default:
4966                 DRM_ERROR(
4967                         "Unsupported screen format %p4cc\n",
4968                         &fb->format->format);
4969                 return -EINVAL;
4970         }
4971
4972         switch (plane_state->rotation & DRM_MODE_ROTATE_MASK) {
4973         case DRM_MODE_ROTATE_0:
4974                 plane_info->rotation = ROTATION_ANGLE_0;
4975                 break;
4976         case DRM_MODE_ROTATE_90:
4977                 plane_info->rotation = ROTATION_ANGLE_90;
4978                 break;
4979         case DRM_MODE_ROTATE_180:
4980                 plane_info->rotation = ROTATION_ANGLE_180;
4981                 break;
4982         case DRM_MODE_ROTATE_270:
4983                 plane_info->rotation = ROTATION_ANGLE_270;
4984                 break;
4985         default:
4986                 plane_info->rotation = ROTATION_ANGLE_0;
4987                 break;
4988         }
4989
4990         plane_info->visible = true;
4991         plane_info->stereo_format = PLANE_STEREO_FORMAT_NONE;
4992
4993         plane_info->layer_index = 0;
4994
4995         ret = fill_plane_color_attributes(plane_state, plane_info->format,
4996                                           &plane_info->color_space);
4997         if (ret)
4998                 return ret;
4999
5000         ret = fill_plane_buffer_attributes(adev, afb, plane_info->format,
5001                                            plane_info->rotation, tiling_flags,
5002                                            &plane_info->tiling_info,
5003                                            &plane_info->plane_size,
5004                                            &plane_info->dcc, address, tmz_surface,
5005                                            force_disable_dcc);
5006         if (ret)
5007                 return ret;
5008
5009         fill_blending_from_plane_state(
5010                 plane_state, &plane_info->per_pixel_alpha,
5011                 &plane_info->global_alpha, &plane_info->global_alpha_value);
5012
5013         return 0;
5014 }
5015
5016 static int fill_dc_plane_attributes(struct amdgpu_device *adev,
5017                                     struct dc_plane_state *dc_plane_state,
5018                                     struct drm_plane_state *plane_state,
5019                                     struct drm_crtc_state *crtc_state)
5020 {
5021         struct dm_crtc_state *dm_crtc_state = to_dm_crtc_state(crtc_state);
5022         struct amdgpu_framebuffer *afb = (struct amdgpu_framebuffer *)plane_state->fb;
5023         struct dc_scaling_info scaling_info;
5024         struct dc_plane_info plane_info;
5025         int ret;
5026         bool force_disable_dcc = false;
5027
5028         ret = fill_dc_scaling_info(plane_state, &scaling_info);
5029         if (ret)
5030                 return ret;
5031
5032         dc_plane_state->src_rect = scaling_info.src_rect;
5033         dc_plane_state->dst_rect = scaling_info.dst_rect;
5034         dc_plane_state->clip_rect = scaling_info.clip_rect;
5035         dc_plane_state->scaling_quality = scaling_info.scaling_quality;
5036
5037         force_disable_dcc = adev->asic_type == CHIP_RAVEN && adev->in_suspend;
5038         ret = fill_dc_plane_info_and_addr(adev, plane_state,
5039                                           afb->tiling_flags,
5040                                           &plane_info,
5041                                           &dc_plane_state->address,
5042                                           afb->tmz_surface,
5043                                           force_disable_dcc);
5044         if (ret)
5045                 return ret;
5046
5047         dc_plane_state->format = plane_info.format;
5048         dc_plane_state->color_space = plane_info.color_space;
5049         dc_plane_state->format = plane_info.format;
5050         dc_plane_state->plane_size = plane_info.plane_size;
5051         dc_plane_state->rotation = plane_info.rotation;
5052         dc_plane_state->horizontal_mirror = plane_info.horizontal_mirror;
5053         dc_plane_state->stereo_format = plane_info.stereo_format;
5054         dc_plane_state->tiling_info = plane_info.tiling_info;
5055         dc_plane_state->visible = plane_info.visible;
5056         dc_plane_state->per_pixel_alpha = plane_info.per_pixel_alpha;
5057         dc_plane_state->global_alpha = plane_info.global_alpha;
5058         dc_plane_state->global_alpha_value = plane_info.global_alpha_value;
5059         dc_plane_state->dcc = plane_info.dcc;
5060         dc_plane_state->layer_index = plane_info.layer_index; // Always returns 0
5061         dc_plane_state->flip_int_enabled = true;
5062
5063         /*
5064          * Always set input transfer function, since plane state is refreshed
5065          * every time.
5066          */
5067         ret = amdgpu_dm_update_plane_color_mgmt(dm_crtc_state, dc_plane_state);
5068         if (ret)
5069                 return ret;
5070
5071         return 0;
5072 }
5073
5074 static void update_stream_scaling_settings(const struct drm_display_mode *mode,
5075                                            const struct dm_connector_state *dm_state,
5076                                            struct dc_stream_state *stream)
5077 {
5078         enum amdgpu_rmx_type rmx_type;
5079
5080         struct rect src = { 0 }; /* viewport in composition space*/
5081         struct rect dst = { 0 }; /* stream addressable area */
5082
5083         /* no mode. nothing to be done */
5084         if (!mode)
5085                 return;
5086
5087         /* Full screen scaling by default */
5088         src.width = mode->hdisplay;
5089         src.height = mode->vdisplay;
5090         dst.width = stream->timing.h_addressable;
5091         dst.height = stream->timing.v_addressable;
5092
5093         if (dm_state) {
5094                 rmx_type = dm_state->scaling;
5095                 if (rmx_type == RMX_ASPECT || rmx_type == RMX_OFF) {
5096                         if (src.width * dst.height <
5097                                         src.height * dst.width) {
5098                                 /* height needs less upscaling/more downscaling */
5099                                 dst.width = src.width *
5100                                                 dst.height / src.height;
5101                         } else {
5102                                 /* width needs less upscaling/more downscaling */
5103                                 dst.height = src.height *
5104                                                 dst.width / src.width;
5105                         }
5106                 } else if (rmx_type == RMX_CENTER) {
5107                         dst = src;
5108                 }
5109
5110                 dst.x = (stream->timing.h_addressable - dst.width) / 2;
5111                 dst.y = (stream->timing.v_addressable - dst.height) / 2;
5112
5113                 if (dm_state->underscan_enable) {
5114                         dst.x += dm_state->underscan_hborder / 2;
5115                         dst.y += dm_state->underscan_vborder / 2;
5116                         dst.width -= dm_state->underscan_hborder;
5117                         dst.height -= dm_state->underscan_vborder;
5118                 }
5119         }
5120
5121         stream->src = src;
5122         stream->dst = dst;
5123
5124         DRM_DEBUG_KMS("Destination Rectangle x:%d  y:%d  width:%d  height:%d\n",
5125                       dst.x, dst.y, dst.width, dst.height);
5126
5127 }
5128
5129 static enum dc_color_depth
5130 convert_color_depth_from_display_info(const struct drm_connector *connector,
5131                                       bool is_y420, int requested_bpc)
5132 {
5133         uint8_t bpc;
5134
5135         if (is_y420) {
5136                 bpc = 8;
5137
5138                 /* Cap display bpc based on HDMI 2.0 HF-VSDB */
5139                 if (connector->display_info.hdmi.y420_dc_modes & DRM_EDID_YCBCR420_DC_48)
5140                         bpc = 16;
5141                 else if (connector->display_info.hdmi.y420_dc_modes & DRM_EDID_YCBCR420_DC_36)
5142                         bpc = 12;
5143                 else if (connector->display_info.hdmi.y420_dc_modes & DRM_EDID_YCBCR420_DC_30)
5144                         bpc = 10;
5145         } else {
5146                 bpc = (uint8_t)connector->display_info.bpc;
5147                 /* Assume 8 bpc by default if no bpc is specified. */
5148                 bpc = bpc ? bpc : 8;
5149         }
5150
5151         if (requested_bpc > 0) {
5152                 /*
5153                  * Cap display bpc based on the user requested value.
5154                  *
5155                  * The value for state->max_bpc may not correctly updated
5156                  * depending on when the connector gets added to the state
5157                  * or if this was called outside of atomic check, so it
5158                  * can't be used directly.
5159                  */
5160                 bpc = min_t(u8, bpc, requested_bpc);
5161
5162                 /* Round down to the nearest even number. */
5163                 bpc = bpc - (bpc & 1);
5164         }
5165
5166         switch (bpc) {
5167         case 0:
5168                 /*
5169                  * Temporary Work around, DRM doesn't parse color depth for
5170                  * EDID revision before 1.4
5171                  * TODO: Fix edid parsing
5172                  */
5173                 return COLOR_DEPTH_888;
5174         case 6:
5175                 return COLOR_DEPTH_666;
5176         case 8:
5177                 return COLOR_DEPTH_888;
5178         case 10:
5179                 return COLOR_DEPTH_101010;
5180         case 12:
5181                 return COLOR_DEPTH_121212;
5182         case 14:
5183                 return COLOR_DEPTH_141414;
5184         case 16:
5185                 return COLOR_DEPTH_161616;
5186         default:
5187                 return COLOR_DEPTH_UNDEFINED;
5188         }
5189 }
5190
5191 static enum dc_aspect_ratio
5192 get_aspect_ratio(const struct drm_display_mode *mode_in)
5193 {
5194         /* 1-1 mapping, since both enums follow the HDMI spec. */
5195         return (enum dc_aspect_ratio) mode_in->picture_aspect_ratio;
5196 }
5197
5198 static enum dc_color_space
5199 get_output_color_space(const struct dc_crtc_timing *dc_crtc_timing)
5200 {
5201         enum dc_color_space color_space = COLOR_SPACE_SRGB;
5202
5203         switch (dc_crtc_timing->pixel_encoding) {
5204         case PIXEL_ENCODING_YCBCR422:
5205         case PIXEL_ENCODING_YCBCR444:
5206         case PIXEL_ENCODING_YCBCR420:
5207         {
5208                 /*
5209                  * 27030khz is the separation point between HDTV and SDTV
5210                  * according to HDMI spec, we use YCbCr709 and YCbCr601
5211                  * respectively
5212                  */
5213                 if (dc_crtc_timing->pix_clk_100hz > 270300) {
5214                         if (dc_crtc_timing->flags.Y_ONLY)
5215                                 color_space =
5216                                         COLOR_SPACE_YCBCR709_LIMITED;
5217                         else
5218                                 color_space = COLOR_SPACE_YCBCR709;
5219                 } else {
5220                         if (dc_crtc_timing->flags.Y_ONLY)
5221                                 color_space =
5222                                         COLOR_SPACE_YCBCR601_LIMITED;
5223                         else
5224                                 color_space = COLOR_SPACE_YCBCR601;
5225                 }
5226
5227         }
5228         break;
5229         case PIXEL_ENCODING_RGB:
5230                 color_space = COLOR_SPACE_SRGB;
5231                 break;
5232
5233         default:
5234                 WARN_ON(1);
5235                 break;
5236         }
5237
5238         return color_space;
5239 }
5240
5241 static bool adjust_colour_depth_from_display_info(
5242         struct dc_crtc_timing *timing_out,
5243         const struct drm_display_info *info)
5244 {
5245         enum dc_color_depth depth = timing_out->display_color_depth;
5246         int normalized_clk;
5247         do {
5248                 normalized_clk = timing_out->pix_clk_100hz / 10;
5249                 /* YCbCr 4:2:0 requires additional adjustment of 1/2 */
5250                 if (timing_out->pixel_encoding == PIXEL_ENCODING_YCBCR420)
5251                         normalized_clk /= 2;
5252                 /* Adjusting pix clock following on HDMI spec based on colour depth */
5253                 switch (depth) {
5254                 case COLOR_DEPTH_888:
5255                         break;
5256                 case COLOR_DEPTH_101010:
5257                         normalized_clk = (normalized_clk * 30) / 24;
5258                         break;
5259                 case COLOR_DEPTH_121212:
5260                         normalized_clk = (normalized_clk * 36) / 24;
5261                         break;
5262                 case COLOR_DEPTH_161616:
5263                         normalized_clk = (normalized_clk * 48) / 24;
5264                         break;
5265                 default:
5266                         /* The above depths are the only ones valid for HDMI. */
5267                         return false;
5268                 }
5269                 if (normalized_clk <= info->max_tmds_clock) {
5270                         timing_out->display_color_depth = depth;
5271                         return true;
5272                 }
5273         } while (--depth > COLOR_DEPTH_666);
5274         return false;
5275 }
5276
5277 static void fill_stream_properties_from_drm_display_mode(
5278         struct dc_stream_state *stream,
5279         const struct drm_display_mode *mode_in,
5280         const struct drm_connector *connector,
5281         const struct drm_connector_state *connector_state,
5282         const struct dc_stream_state *old_stream,
5283         int requested_bpc)
5284 {
5285         struct dc_crtc_timing *timing_out = &stream->timing;
5286         const struct drm_display_info *info = &connector->display_info;
5287         struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
5288         struct hdmi_vendor_infoframe hv_frame;
5289         struct hdmi_avi_infoframe avi_frame;
5290
5291         memset(&hv_frame, 0, sizeof(hv_frame));
5292         memset(&avi_frame, 0, sizeof(avi_frame));
5293
5294         timing_out->h_border_left = 0;
5295         timing_out->h_border_right = 0;
5296         timing_out->v_border_top = 0;
5297         timing_out->v_border_bottom = 0;
5298         /* TODO: un-hardcode */
5299         if (drm_mode_is_420_only(info, mode_in)
5300                         && stream->signal == SIGNAL_TYPE_HDMI_TYPE_A)
5301                 timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR420;
5302         else if (drm_mode_is_420_also(info, mode_in)
5303                         && aconnector->force_yuv420_output)
5304                 timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR420;
5305         else if ((connector->display_info.color_formats & DRM_COLOR_FORMAT_YCRCB444)
5306                         && stream->signal == SIGNAL_TYPE_HDMI_TYPE_A)
5307                 timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR444;
5308         else
5309                 timing_out->pixel_encoding = PIXEL_ENCODING_RGB;
5310
5311         timing_out->timing_3d_format = TIMING_3D_FORMAT_NONE;
5312         timing_out->display_color_depth = convert_color_depth_from_display_info(
5313                 connector,
5314                 (timing_out->pixel_encoding == PIXEL_ENCODING_YCBCR420),
5315                 requested_bpc);
5316         timing_out->scan_type = SCANNING_TYPE_NODATA;
5317         timing_out->hdmi_vic = 0;
5318
5319         if(old_stream) {
5320                 timing_out->vic = old_stream->timing.vic;
5321                 timing_out->flags.HSYNC_POSITIVE_POLARITY = old_stream->timing.flags.HSYNC_POSITIVE_POLARITY;
5322                 timing_out->flags.VSYNC_POSITIVE_POLARITY = old_stream->timing.flags.VSYNC_POSITIVE_POLARITY;
5323         } else {
5324                 timing_out->vic = drm_match_cea_mode(mode_in);
5325                 if (mode_in->flags & DRM_MODE_FLAG_PHSYNC)
5326                         timing_out->flags.HSYNC_POSITIVE_POLARITY = 1;
5327                 if (mode_in->flags & DRM_MODE_FLAG_PVSYNC)
5328                         timing_out->flags.VSYNC_POSITIVE_POLARITY = 1;
5329         }
5330
5331         if (stream->signal == SIGNAL_TYPE_HDMI_TYPE_A) {
5332                 drm_hdmi_avi_infoframe_from_display_mode(&avi_frame, (struct drm_connector *)connector, mode_in);
5333                 timing_out->vic = avi_frame.video_code;
5334                 drm_hdmi_vendor_infoframe_from_display_mode(&hv_frame, (struct drm_connector *)connector, mode_in);
5335                 timing_out->hdmi_vic = hv_frame.vic;
5336         }
5337
5338         if (is_freesync_video_mode(mode_in, aconnector)) {
5339                 timing_out->h_addressable = mode_in->hdisplay;
5340                 timing_out->h_total = mode_in->htotal;
5341                 timing_out->h_sync_width = mode_in->hsync_end - mode_in->hsync_start;
5342                 timing_out->h_front_porch = mode_in->hsync_start - mode_in->hdisplay;
5343                 timing_out->v_total = mode_in->vtotal;
5344                 timing_out->v_addressable = mode_in->vdisplay;
5345                 timing_out->v_front_porch = mode_in->vsync_start - mode_in->vdisplay;
5346                 timing_out->v_sync_width = mode_in->vsync_end - mode_in->vsync_start;
5347                 timing_out->pix_clk_100hz = mode_in->clock * 10;
5348         } else {
5349                 timing_out->h_addressable = mode_in->crtc_hdisplay;
5350                 timing_out->h_total = mode_in->crtc_htotal;
5351                 timing_out->h_sync_width = mode_in->crtc_hsync_end - mode_in->crtc_hsync_start;
5352                 timing_out->h_front_porch = mode_in->crtc_hsync_start - mode_in->crtc_hdisplay;
5353                 timing_out->v_total = mode_in->crtc_vtotal;
5354                 timing_out->v_addressable = mode_in->crtc_vdisplay;
5355                 timing_out->v_front_porch = mode_in->crtc_vsync_start - mode_in->crtc_vdisplay;
5356                 timing_out->v_sync_width = mode_in->crtc_vsync_end - mode_in->crtc_vsync_start;
5357                 timing_out->pix_clk_100hz = mode_in->crtc_clock * 10;
5358         }
5359
5360         timing_out->aspect_ratio = get_aspect_ratio(mode_in);
5361
5362         stream->output_color_space = get_output_color_space(timing_out);
5363
5364         stream->out_transfer_func->type = TF_TYPE_PREDEFINED;
5365         stream->out_transfer_func->tf = TRANSFER_FUNCTION_SRGB;
5366         if (stream->signal == SIGNAL_TYPE_HDMI_TYPE_A) {
5367                 if (!adjust_colour_depth_from_display_info(timing_out, info) &&
5368                     drm_mode_is_420_also(info, mode_in) &&
5369                     timing_out->pixel_encoding != PIXEL_ENCODING_YCBCR420) {
5370                         timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR420;
5371                         adjust_colour_depth_from_display_info(timing_out, info);
5372                 }
5373         }
5374 }
5375
5376 static void fill_audio_info(struct audio_info *audio_info,
5377                             const struct drm_connector *drm_connector,
5378                             const struct dc_sink *dc_sink)
5379 {
5380         int i = 0;
5381         int cea_revision = 0;
5382         const struct dc_edid_caps *edid_caps = &dc_sink->edid_caps;
5383
5384         audio_info->manufacture_id = edid_caps->manufacturer_id;
5385         audio_info->product_id = edid_caps->product_id;
5386
5387         cea_revision = drm_connector->display_info.cea_rev;
5388
5389         strscpy(audio_info->display_name,
5390                 edid_caps->display_name,
5391                 AUDIO_INFO_DISPLAY_NAME_SIZE_IN_CHARS);
5392
5393         if (cea_revision >= 3) {
5394                 audio_info->mode_count = edid_caps->audio_mode_count;
5395
5396                 for (i = 0; i < audio_info->mode_count; ++i) {
5397                         audio_info->modes[i].format_code =
5398                                         (enum audio_format_code)
5399                                         (edid_caps->audio_modes[i].format_code);
5400                         audio_info->modes[i].channel_count =
5401                                         edid_caps->audio_modes[i].channel_count;
5402                         audio_info->modes[i].sample_rates.all =
5403                                         edid_caps->audio_modes[i].sample_rate;
5404                         audio_info->modes[i].sample_size =
5405                                         edid_caps->audio_modes[i].sample_size;
5406                 }
5407         }
5408
5409         audio_info->flags.all = edid_caps->speaker_flags;
5410
5411         /* TODO: We only check for the progressive mode, check for interlace mode too */
5412         if (drm_connector->latency_present[0]) {
5413                 audio_info->video_latency = drm_connector->video_latency[0];
5414                 audio_info->audio_latency = drm_connector->audio_latency[0];
5415         }
5416
5417         /* TODO: For DP, video and audio latency should be calculated from DPCD caps */
5418
5419 }
5420
5421 static void
5422 copy_crtc_timing_for_drm_display_mode(const struct drm_display_mode *src_mode,
5423                                       struct drm_display_mode *dst_mode)
5424 {
5425         dst_mode->crtc_hdisplay = src_mode->crtc_hdisplay;
5426         dst_mode->crtc_vdisplay = src_mode->crtc_vdisplay;
5427         dst_mode->crtc_clock = src_mode->crtc_clock;
5428         dst_mode->crtc_hblank_start = src_mode->crtc_hblank_start;
5429         dst_mode->crtc_hblank_end = src_mode->crtc_hblank_end;
5430         dst_mode->crtc_hsync_start =  src_mode->crtc_hsync_start;
5431         dst_mode->crtc_hsync_end = src_mode->crtc_hsync_end;
5432         dst_mode->crtc_htotal = src_mode->crtc_htotal;
5433         dst_mode->crtc_hskew = src_mode->crtc_hskew;
5434         dst_mode->crtc_vblank_start = src_mode->crtc_vblank_start;
5435         dst_mode->crtc_vblank_end = src_mode->crtc_vblank_end;
5436         dst_mode->crtc_vsync_start = src_mode->crtc_vsync_start;
5437         dst_mode->crtc_vsync_end = src_mode->crtc_vsync_end;
5438         dst_mode->crtc_vtotal = src_mode->crtc_vtotal;
5439 }
5440
5441 static void
5442 decide_crtc_timing_for_drm_display_mode(struct drm_display_mode *drm_mode,
5443                                         const struct drm_display_mode *native_mode,
5444                                         bool scale_enabled)
5445 {
5446         if (scale_enabled) {
5447                 copy_crtc_timing_for_drm_display_mode(native_mode, drm_mode);
5448         } else if (native_mode->clock == drm_mode->clock &&
5449                         native_mode->htotal == drm_mode->htotal &&
5450                         native_mode->vtotal == drm_mode->vtotal) {
5451                 copy_crtc_timing_for_drm_display_mode(native_mode, drm_mode);
5452         } else {
5453                 /* no scaling nor amdgpu inserted, no need to patch */
5454         }
5455 }
5456
5457 static struct dc_sink *
5458 create_fake_sink(struct amdgpu_dm_connector *aconnector)
5459 {
5460         struct dc_sink_init_data sink_init_data = { 0 };
5461         struct dc_sink *sink = NULL;
5462         sink_init_data.link = aconnector->dc_link;
5463         sink_init_data.sink_signal = aconnector->dc_link->connector_signal;
5464
5465         sink = dc_sink_create(&sink_init_data);
5466         if (!sink) {
5467                 DRM_ERROR("Failed to create sink!\n");
5468                 return NULL;
5469         }
5470         sink->sink_signal = SIGNAL_TYPE_VIRTUAL;
5471
5472         return sink;
5473 }
5474
5475 static void set_multisync_trigger_params(
5476                 struct dc_stream_state *stream)
5477 {
5478         struct dc_stream_state *master = NULL;
5479
5480         if (stream->triggered_crtc_reset.enabled) {
5481                 master = stream->triggered_crtc_reset.event_source;
5482                 stream->triggered_crtc_reset.event =
5483                         master->timing.flags.VSYNC_POSITIVE_POLARITY ?
5484                         CRTC_EVENT_VSYNC_RISING : CRTC_EVENT_VSYNC_FALLING;
5485                 stream->triggered_crtc_reset.delay = TRIGGER_DELAY_NEXT_PIXEL;
5486         }
5487 }
5488
5489 static void set_master_stream(struct dc_stream_state *stream_set[],
5490                               int stream_count)
5491 {
5492         int j, highest_rfr = 0, master_stream = 0;
5493
5494         for (j = 0;  j < stream_count; j++) {
5495                 if (stream_set[j] && stream_set[j]->triggered_crtc_reset.enabled) {
5496                         int refresh_rate = 0;
5497
5498                         refresh_rate = (stream_set[j]->timing.pix_clk_100hz*100)/
5499                                 (stream_set[j]->timing.h_total*stream_set[j]->timing.v_total);
5500                         if (refresh_rate > highest_rfr) {
5501                                 highest_rfr = refresh_rate;
5502                                 master_stream = j;
5503                         }
5504                 }
5505         }
5506         for (j = 0;  j < stream_count; j++) {
5507                 if (stream_set[j])
5508                         stream_set[j]->triggered_crtc_reset.event_source = stream_set[master_stream];
5509         }
5510 }
5511
5512 static void dm_enable_per_frame_crtc_master_sync(struct dc_state *context)
5513 {
5514         int i = 0;
5515         struct dc_stream_state *stream;
5516
5517         if (context->stream_count < 2)
5518                 return;
5519         for (i = 0; i < context->stream_count ; i++) {
5520                 if (!context->streams[i])
5521                         continue;
5522                 /*
5523                  * TODO: add a function to read AMD VSDB bits and set
5524                  * crtc_sync_master.multi_sync_enabled flag
5525                  * For now it's set to false
5526                  */
5527         }
5528
5529         set_master_stream(context->streams, context->stream_count);
5530
5531         for (i = 0; i < context->stream_count ; i++) {
5532                 stream = context->streams[i];
5533
5534                 if (!stream)
5535                         continue;
5536
5537                 set_multisync_trigger_params(stream);
5538         }
5539 }
5540
5541 static void update_dsc_caps(struct amdgpu_dm_connector *aconnector,
5542                                                         struct dc_sink *sink, struct dc_stream_state *stream,
5543                                                         struct dsc_dec_dpcd_caps *dsc_caps)
5544 {
5545         stream->timing.flags.DSC = 0;
5546
5547         if (aconnector->dc_link && sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT) {
5548 #if defined(CONFIG_DRM_AMD_DC_DCN)
5549                 dc_dsc_parse_dsc_dpcd(aconnector->dc_link->ctx->dc,
5550                                       aconnector->dc_link->dpcd_caps.dsc_caps.dsc_basic_caps.raw,
5551                                       aconnector->dc_link->dpcd_caps.dsc_caps.dsc_branch_decoder_caps.raw,
5552                                       dsc_caps);
5553 #endif
5554         }
5555 }
5556
5557 static void apply_dsc_policy_for_stream(struct amdgpu_dm_connector *aconnector,
5558                                                                                 struct dc_sink *sink, struct dc_stream_state *stream,
5559                                                                                 struct dsc_dec_dpcd_caps *dsc_caps)
5560 {
5561         struct drm_connector *drm_connector = &aconnector->base;
5562         uint32_t link_bandwidth_kbps;
5563
5564         link_bandwidth_kbps = dc_link_bandwidth_kbps(aconnector->dc_link,
5565                                                         dc_link_get_link_cap(aconnector->dc_link));
5566 #if defined(CONFIG_DRM_AMD_DC_DCN)
5567         /* Set DSC policy according to dsc_clock_en */
5568         dc_dsc_policy_set_enable_dsc_when_not_needed(
5569                 aconnector->dsc_settings.dsc_force_enable == DSC_CLK_FORCE_ENABLE);
5570
5571         if (aconnector->dc_link && sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT) {
5572
5573                 if (dc_dsc_compute_config(aconnector->dc_link->ctx->dc->res_pool->dscs[0],
5574                                                 dsc_caps,
5575                                                 aconnector->dc_link->ctx->dc->debug.dsc_min_slice_height_override,
5576                                                 0,
5577                                                 link_bandwidth_kbps,
5578                                                 &stream->timing,
5579                                                 &stream->timing.dsc_cfg)) {
5580                         stream->timing.flags.DSC = 1;
5581                         DRM_DEBUG_DRIVER("%s: [%s] DSC is selected from SST RX\n", __func__, drm_connector->name);
5582                 }
5583         }
5584
5585         /* Overwrite the stream flag if DSC is enabled through debugfs */
5586         if (aconnector->dsc_settings.dsc_force_enable == DSC_CLK_FORCE_ENABLE)
5587                 stream->timing.flags.DSC = 1;
5588
5589         if (stream->timing.flags.DSC && aconnector->dsc_settings.dsc_num_slices_h)
5590                 stream->timing.dsc_cfg.num_slices_h = aconnector->dsc_settings.dsc_num_slices_h;
5591
5592         if (stream->timing.flags.DSC && aconnector->dsc_settings.dsc_num_slices_v)
5593                 stream->timing.dsc_cfg.num_slices_v = aconnector->dsc_settings.dsc_num_slices_v;
5594
5595         if (stream->timing.flags.DSC && aconnector->dsc_settings.dsc_bits_per_pixel)
5596                 stream->timing.dsc_cfg.bits_per_pixel = aconnector->dsc_settings.dsc_bits_per_pixel;
5597 #endif
5598 }
5599
5600 static struct drm_display_mode *
5601 get_highest_refresh_rate_mode(struct amdgpu_dm_connector *aconnector,
5602                           bool use_probed_modes)
5603 {
5604         struct drm_display_mode *m, *m_pref = NULL;
5605         u16 current_refresh, highest_refresh;
5606         struct list_head *list_head = use_probed_modes ?
5607                                                     &aconnector->base.probed_modes :
5608                                                     &aconnector->base.modes;
5609
5610         if (aconnector->freesync_vid_base.clock != 0)
5611                 return &aconnector->freesync_vid_base;
5612
5613         /* Find the preferred mode */
5614         list_for_each_entry (m, list_head, head) {
5615                 if (m->type & DRM_MODE_TYPE_PREFERRED) {
5616                         m_pref = m;
5617                         break;
5618                 }
5619         }
5620
5621         if (!m_pref) {
5622                 /* Probably an EDID with no preferred mode. Fallback to first entry */
5623                 m_pref = list_first_entry_or_null(
5624                         &aconnector->base.modes, struct drm_display_mode, head);
5625                 if (!m_pref) {
5626                         DRM_DEBUG_DRIVER("No preferred mode found in EDID\n");
5627                         return NULL;
5628                 }
5629         }
5630
5631         highest_refresh = drm_mode_vrefresh(m_pref);
5632
5633         /*
5634          * Find the mode with highest refresh rate with same resolution.
5635          * For some monitors, preferred mode is not the mode with highest
5636          * supported refresh rate.
5637          */
5638         list_for_each_entry (m, list_head, head) {
5639                 current_refresh  = drm_mode_vrefresh(m);
5640
5641                 if (m->hdisplay == m_pref->hdisplay &&
5642                     m->vdisplay == m_pref->vdisplay &&
5643                     highest_refresh < current_refresh) {
5644                         highest_refresh = current_refresh;
5645                         m_pref = m;
5646                 }
5647         }
5648
5649         aconnector->freesync_vid_base = *m_pref;
5650         return m_pref;
5651 }
5652
5653 static bool is_freesync_video_mode(const struct drm_display_mode *mode,
5654                                    struct amdgpu_dm_connector *aconnector)
5655 {
5656         struct drm_display_mode *high_mode;
5657         int timing_diff;
5658
5659         high_mode = get_highest_refresh_rate_mode(aconnector, false);
5660         if (!high_mode || !mode)
5661                 return false;
5662
5663         timing_diff = high_mode->vtotal - mode->vtotal;
5664
5665         if (high_mode->clock == 0 || high_mode->clock != mode->clock ||
5666             high_mode->hdisplay != mode->hdisplay ||
5667             high_mode->vdisplay != mode->vdisplay ||
5668             high_mode->hsync_start != mode->hsync_start ||
5669             high_mode->hsync_end != mode->hsync_end ||
5670             high_mode->htotal != mode->htotal ||
5671             high_mode->hskew != mode->hskew ||
5672             high_mode->vscan != mode->vscan ||
5673             high_mode->vsync_start - mode->vsync_start != timing_diff ||
5674             high_mode->vsync_end - mode->vsync_end != timing_diff)
5675                 return false;
5676         else
5677                 return true;
5678 }
5679
5680 static struct dc_stream_state *
5681 create_stream_for_sink(struct amdgpu_dm_connector *aconnector,
5682                        const struct drm_display_mode *drm_mode,
5683                        const struct dm_connector_state *dm_state,
5684                        const struct dc_stream_state *old_stream,
5685                        int requested_bpc)
5686 {
5687         struct drm_display_mode *preferred_mode = NULL;
5688         struct drm_connector *drm_connector;
5689         const struct drm_connector_state *con_state =
5690                 dm_state ? &dm_state->base : NULL;
5691         struct dc_stream_state *stream = NULL;
5692         struct drm_display_mode mode = *drm_mode;
5693         struct drm_display_mode saved_mode;
5694         struct drm_display_mode *freesync_mode = NULL;
5695         bool native_mode_found = false;
5696         bool recalculate_timing = false;
5697         bool scale = dm_state ? (dm_state->scaling != RMX_OFF) : false;
5698         int mode_refresh;
5699         int preferred_refresh = 0;
5700 #if defined(CONFIG_DRM_AMD_DC_DCN)
5701         struct dsc_dec_dpcd_caps dsc_caps;
5702 #endif
5703         struct dc_sink *sink = NULL;
5704
5705         memset(&saved_mode, 0, sizeof(saved_mode));
5706
5707         if (aconnector == NULL) {
5708                 DRM_ERROR("aconnector is NULL!\n");
5709                 return stream;
5710         }
5711
5712         drm_connector = &aconnector->base;
5713
5714         if (!aconnector->dc_sink) {
5715                 sink = create_fake_sink(aconnector);
5716                 if (!sink)
5717                         return stream;
5718         } else {
5719                 sink = aconnector->dc_sink;
5720                 dc_sink_retain(sink);
5721         }
5722
5723         stream = dc_create_stream_for_sink(sink);
5724
5725         if (stream == NULL) {
5726                 DRM_ERROR("Failed to create stream for sink!\n");
5727                 goto finish;
5728         }
5729
5730         stream->dm_stream_context = aconnector;
5731
5732         stream->timing.flags.LTE_340MCSC_SCRAMBLE =
5733                 drm_connector->display_info.hdmi.scdc.scrambling.low_rates;
5734
5735         list_for_each_entry(preferred_mode, &aconnector->base.modes, head) {
5736                 /* Search for preferred mode */
5737                 if (preferred_mode->type & DRM_MODE_TYPE_PREFERRED) {
5738                         native_mode_found = true;
5739                         break;
5740                 }
5741         }
5742         if (!native_mode_found)
5743                 preferred_mode = list_first_entry_or_null(
5744                                 &aconnector->base.modes,
5745                                 struct drm_display_mode,
5746                                 head);
5747
5748         mode_refresh = drm_mode_vrefresh(&mode);
5749
5750         if (preferred_mode == NULL) {
5751                 /*
5752                  * This may not be an error, the use case is when we have no
5753                  * usermode calls to reset and set mode upon hotplug. In this
5754                  * case, we call set mode ourselves to restore the previous mode
5755                  * and the modelist may not be filled in in time.
5756                  */
5757                 DRM_DEBUG_DRIVER("No preferred mode found\n");
5758         } else {
5759                 recalculate_timing = amdgpu_freesync_vid_mode &&
5760                                  is_freesync_video_mode(&mode, aconnector);
5761                 if (recalculate_timing) {
5762                         freesync_mode = get_highest_refresh_rate_mode(aconnector, false);
5763                         saved_mode = mode;
5764                         mode = *freesync_mode;
5765                 } else {
5766                         decide_crtc_timing_for_drm_display_mode(
5767                                 &mode, preferred_mode, scale);
5768
5769                         preferred_refresh = drm_mode_vrefresh(preferred_mode);
5770                 }
5771         }
5772
5773         if (recalculate_timing)
5774                 drm_mode_set_crtcinfo(&saved_mode, 0);
5775         else if (!dm_state)
5776                 drm_mode_set_crtcinfo(&mode, 0);
5777
5778        /*
5779         * If scaling is enabled and refresh rate didn't change
5780         * we copy the vic and polarities of the old timings
5781         */
5782         if (!scale || mode_refresh != preferred_refresh)
5783                 fill_stream_properties_from_drm_display_mode(
5784                         stream, &mode, &aconnector->base, con_state, NULL,
5785                         requested_bpc);
5786         else
5787                 fill_stream_properties_from_drm_display_mode(
5788                         stream, &mode, &aconnector->base, con_state, old_stream,
5789                         requested_bpc);
5790
5791 #if defined(CONFIG_DRM_AMD_DC_DCN)
5792         /* SST DSC determination policy */
5793         update_dsc_caps(aconnector, sink, stream, &dsc_caps);
5794         if (aconnector->dsc_settings.dsc_force_enable != DSC_CLK_FORCE_DISABLE && dsc_caps.is_dsc_supported)
5795                 apply_dsc_policy_for_stream(aconnector, sink, stream, &dsc_caps);
5796 #endif
5797
5798         update_stream_scaling_settings(&mode, dm_state, stream);
5799
5800         fill_audio_info(
5801                 &stream->audio_info,
5802                 drm_connector,
5803                 sink);
5804
5805         update_stream_signal(stream, sink);
5806
5807         if (stream->signal == SIGNAL_TYPE_HDMI_TYPE_A)
5808                 mod_build_hf_vsif_infopacket(stream, &stream->vsp_infopacket);
5809
5810         if (stream->link->psr_settings.psr_feature_enabled) {
5811                 //
5812                 // should decide stream support vsc sdp colorimetry capability
5813                 // before building vsc info packet
5814                 //
5815                 stream->use_vsc_sdp_for_colorimetry = false;
5816                 if (aconnector->dc_sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT_MST) {
5817                         stream->use_vsc_sdp_for_colorimetry =
5818                                 aconnector->dc_sink->is_vsc_sdp_colorimetry_supported;
5819                 } else {
5820                         if (stream->link->dpcd_caps.dprx_feature.bits.VSC_SDP_COLORIMETRY_SUPPORTED)
5821                                 stream->use_vsc_sdp_for_colorimetry = true;
5822                 }
5823                 mod_build_vsc_infopacket(stream, &stream->vsc_infopacket);
5824         }
5825 finish:
5826         dc_sink_release(sink);
5827
5828         return stream;
5829 }
5830
5831 static void amdgpu_dm_crtc_destroy(struct drm_crtc *crtc)
5832 {
5833         drm_crtc_cleanup(crtc);
5834         kfree(crtc);
5835 }
5836
5837 static void dm_crtc_destroy_state(struct drm_crtc *crtc,
5838                                   struct drm_crtc_state *state)
5839 {
5840         struct dm_crtc_state *cur = to_dm_crtc_state(state);
5841
5842         /* TODO Destroy dc_stream objects are stream object is flattened */
5843         if (cur->stream)
5844                 dc_stream_release(cur->stream);
5845
5846
5847         __drm_atomic_helper_crtc_destroy_state(state);
5848
5849
5850         kfree(state);
5851 }
5852
5853 static void dm_crtc_reset_state(struct drm_crtc *crtc)
5854 {
5855         struct dm_crtc_state *state;
5856
5857         if (crtc->state)
5858                 dm_crtc_destroy_state(crtc, crtc->state);
5859
5860         state = kzalloc(sizeof(*state), GFP_KERNEL);
5861         if (WARN_ON(!state))
5862                 return;
5863
5864         __drm_atomic_helper_crtc_reset(crtc, &state->base);
5865 }
5866
5867 static struct drm_crtc_state *
5868 dm_crtc_duplicate_state(struct drm_crtc *crtc)
5869 {
5870         struct dm_crtc_state *state, *cur;
5871
5872         cur = to_dm_crtc_state(crtc->state);
5873
5874         if (WARN_ON(!crtc->state))
5875                 return NULL;
5876
5877         state = kzalloc(sizeof(*state), GFP_KERNEL);
5878         if (!state)
5879                 return NULL;
5880
5881         __drm_atomic_helper_crtc_duplicate_state(crtc, &state->base);
5882
5883         if (cur->stream) {
5884                 state->stream = cur->stream;
5885                 dc_stream_retain(state->stream);
5886         }
5887
5888         state->active_planes = cur->active_planes;
5889         state->vrr_infopacket = cur->vrr_infopacket;
5890         state->abm_level = cur->abm_level;
5891         state->vrr_supported = cur->vrr_supported;
5892         state->freesync_config = cur->freesync_config;
5893         state->cm_has_degamma = cur->cm_has_degamma;
5894         state->cm_is_degamma_srgb = cur->cm_is_degamma_srgb;
5895         /* TODO Duplicate dc_stream after objects are stream object is flattened */
5896
5897         return &state->base;
5898 }
5899
5900 #ifdef CONFIG_DRM_AMD_SECURE_DISPLAY
5901 static int amdgpu_dm_crtc_late_register(struct drm_crtc *crtc)
5902 {
5903         crtc_debugfs_init(crtc);
5904
5905         return 0;
5906 }
5907 #endif
5908
5909 static inline int dm_set_vupdate_irq(struct drm_crtc *crtc, bool enable)
5910 {
5911         enum dc_irq_source irq_source;
5912         struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
5913         struct amdgpu_device *adev = drm_to_adev(crtc->dev);
5914         int rc;
5915
5916         irq_source = IRQ_TYPE_VUPDATE + acrtc->otg_inst;
5917
5918         rc = dc_interrupt_set(adev->dm.dc, irq_source, enable) ? 0 : -EBUSY;
5919
5920         DRM_DEBUG_VBL("crtc %d - vupdate irq %sabling: r=%d\n",
5921                       acrtc->crtc_id, enable ? "en" : "dis", rc);
5922         return rc;
5923 }
5924
5925 static inline int dm_set_vblank(struct drm_crtc *crtc, bool enable)
5926 {
5927         enum dc_irq_source irq_source;
5928         struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
5929         struct amdgpu_device *adev = drm_to_adev(crtc->dev);
5930         struct dm_crtc_state *acrtc_state = to_dm_crtc_state(crtc->state);
5931 #if defined(CONFIG_DRM_AMD_DC_DCN)
5932         struct amdgpu_display_manager *dm = &adev->dm;
5933         unsigned long flags;
5934 #endif
5935         int rc = 0;
5936
5937         if (enable) {
5938                 /* vblank irq on -> Only need vupdate irq in vrr mode */
5939                 if (amdgpu_dm_vrr_active(acrtc_state))
5940                         rc = dm_set_vupdate_irq(crtc, true);
5941         } else {
5942                 /* vblank irq off -> vupdate irq off */
5943                 rc = dm_set_vupdate_irq(crtc, false);
5944         }
5945
5946         if (rc)
5947                 return rc;
5948
5949         irq_source = IRQ_TYPE_VBLANK + acrtc->otg_inst;
5950
5951         if (!dc_interrupt_set(adev->dm.dc, irq_source, enable))
5952                 return -EBUSY;
5953
5954         if (amdgpu_in_reset(adev))
5955                 return 0;
5956
5957 #if defined(CONFIG_DRM_AMD_DC_DCN)
5958         spin_lock_irqsave(&dm->vblank_lock, flags);
5959         dm->vblank_workqueue->dm = dm;
5960         dm->vblank_workqueue->otg_inst = acrtc->otg_inst;
5961         dm->vblank_workqueue->enable = enable;
5962         spin_unlock_irqrestore(&dm->vblank_lock, flags);
5963         schedule_work(&dm->vblank_workqueue->mall_work);
5964 #endif
5965
5966         return 0;
5967 }
5968
5969 static int dm_enable_vblank(struct drm_crtc *crtc)
5970 {
5971         return dm_set_vblank(crtc, true);
5972 }
5973
5974 static void dm_disable_vblank(struct drm_crtc *crtc)
5975 {
5976         dm_set_vblank(crtc, false);
5977 }
5978
5979 /* Implemented only the options currently availible for the driver */
5980 static const struct drm_crtc_funcs amdgpu_dm_crtc_funcs = {
5981         .reset = dm_crtc_reset_state,
5982         .destroy = amdgpu_dm_crtc_destroy,
5983         .set_config = drm_atomic_helper_set_config,
5984         .page_flip = drm_atomic_helper_page_flip,
5985         .atomic_duplicate_state = dm_crtc_duplicate_state,
5986         .atomic_destroy_state = dm_crtc_destroy_state,
5987         .set_crc_source = amdgpu_dm_crtc_set_crc_source,
5988         .verify_crc_source = amdgpu_dm_crtc_verify_crc_source,
5989         .get_crc_sources = amdgpu_dm_crtc_get_crc_sources,
5990         .get_vblank_counter = amdgpu_get_vblank_counter_kms,
5991         .enable_vblank = dm_enable_vblank,
5992         .disable_vblank = dm_disable_vblank,
5993         .get_vblank_timestamp = drm_crtc_vblank_helper_get_vblank_timestamp,
5994 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
5995         .late_register = amdgpu_dm_crtc_late_register,
5996 #endif
5997 };
5998
5999 static enum drm_connector_status
6000 amdgpu_dm_connector_detect(struct drm_connector *connector, bool force)
6001 {
6002         bool connected;
6003         struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
6004
6005         /*
6006          * Notes:
6007          * 1. This interface is NOT called in context of HPD irq.
6008          * 2. This interface *is called* in context of user-mode ioctl. Which
6009          * makes it a bad place for *any* MST-related activity.
6010          */
6011
6012         if (aconnector->base.force == DRM_FORCE_UNSPECIFIED &&
6013             !aconnector->fake_enable)
6014                 connected = (aconnector->dc_sink != NULL);
6015         else
6016                 connected = (aconnector->base.force == DRM_FORCE_ON);
6017
6018         update_subconnector_property(aconnector);
6019
6020         return (connected ? connector_status_connected :
6021                         connector_status_disconnected);
6022 }
6023
6024 int amdgpu_dm_connector_atomic_set_property(struct drm_connector *connector,
6025                                             struct drm_connector_state *connector_state,
6026                                             struct drm_property *property,
6027                                             uint64_t val)
6028 {
6029         struct drm_device *dev = connector->dev;
6030         struct amdgpu_device *adev = drm_to_adev(dev);
6031         struct dm_connector_state *dm_old_state =
6032                 to_dm_connector_state(connector->state);
6033         struct dm_connector_state *dm_new_state =
6034                 to_dm_connector_state(connector_state);
6035
6036         int ret = -EINVAL;
6037
6038         if (property == dev->mode_config.scaling_mode_property) {
6039                 enum amdgpu_rmx_type rmx_type;
6040
6041                 switch (val) {
6042                 case DRM_MODE_SCALE_CENTER:
6043                         rmx_type = RMX_CENTER;
6044                         break;
6045                 case DRM_MODE_SCALE_ASPECT:
6046                         rmx_type = RMX_ASPECT;
6047                         break;
6048                 case DRM_MODE_SCALE_FULLSCREEN:
6049                         rmx_type = RMX_FULL;
6050                         break;
6051                 case DRM_MODE_SCALE_NONE:
6052                 default:
6053                         rmx_type = RMX_OFF;
6054                         break;
6055                 }
6056
6057                 if (dm_old_state->scaling == rmx_type)
6058                         return 0;
6059
6060                 dm_new_state->scaling = rmx_type;
6061                 ret = 0;
6062         } else if (property == adev->mode_info.underscan_hborder_property) {
6063                 dm_new_state->underscan_hborder = val;
6064                 ret = 0;
6065         } else if (property == adev->mode_info.underscan_vborder_property) {
6066                 dm_new_state->underscan_vborder = val;
6067                 ret = 0;
6068         } else if (property == adev->mode_info.underscan_property) {
6069                 dm_new_state->underscan_enable = val;
6070                 ret = 0;
6071         } else if (property == adev->mode_info.abm_level_property) {
6072                 dm_new_state->abm_level = val;
6073                 ret = 0;
6074         }
6075
6076         return ret;
6077 }
6078
6079 int amdgpu_dm_connector_atomic_get_property(struct drm_connector *connector,
6080                                             const struct drm_connector_state *state,
6081                                             struct drm_property *property,
6082                                             uint64_t *val)
6083 {
6084         struct drm_device *dev = connector->dev;
6085         struct amdgpu_device *adev = drm_to_adev(dev);
6086         struct dm_connector_state *dm_state =
6087                 to_dm_connector_state(state);
6088         int ret = -EINVAL;
6089
6090         if (property == dev->mode_config.scaling_mode_property) {
6091                 switch (dm_state->scaling) {
6092                 case RMX_CENTER:
6093                         *val = DRM_MODE_SCALE_CENTER;
6094                         break;
6095                 case RMX_ASPECT:
6096                         *val = DRM_MODE_SCALE_ASPECT;
6097                         break;
6098                 case RMX_FULL:
6099                         *val = DRM_MODE_SCALE_FULLSCREEN;
6100                         break;
6101                 case RMX_OFF:
6102                 default:
6103                         *val = DRM_MODE_SCALE_NONE;
6104                         break;
6105                 }
6106                 ret = 0;
6107         } else if (property == adev->mode_info.underscan_hborder_property) {
6108                 *val = dm_state->underscan_hborder;
6109                 ret = 0;
6110         } else if (property == adev->mode_info.underscan_vborder_property) {
6111                 *val = dm_state->underscan_vborder;
6112                 ret = 0;
6113         } else if (property == adev->mode_info.underscan_property) {
6114                 *val = dm_state->underscan_enable;
6115                 ret = 0;
6116         } else if (property == adev->mode_info.abm_level_property) {
6117                 *val = dm_state->abm_level;
6118                 ret = 0;
6119         }
6120
6121         return ret;
6122 }
6123
6124 static void amdgpu_dm_connector_unregister(struct drm_connector *connector)
6125 {
6126         struct amdgpu_dm_connector *amdgpu_dm_connector = to_amdgpu_dm_connector(connector);
6127
6128         drm_dp_aux_unregister(&amdgpu_dm_connector->dm_dp_aux.aux);
6129 }
6130
6131 static void amdgpu_dm_connector_destroy(struct drm_connector *connector)
6132 {
6133         struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
6134         const struct dc_link *link = aconnector->dc_link;
6135         struct amdgpu_device *adev = drm_to_adev(connector->dev);
6136         struct amdgpu_display_manager *dm = &adev->dm;
6137
6138         /*
6139          * Call only if mst_mgr was iniitalized before since it's not done
6140          * for all connector types.
6141          */
6142         if (aconnector->mst_mgr.dev)
6143                 drm_dp_mst_topology_mgr_destroy(&aconnector->mst_mgr);
6144
6145 #if defined(CONFIG_BACKLIGHT_CLASS_DEVICE) ||\
6146         defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE)
6147
6148         if ((link->connector_signal & (SIGNAL_TYPE_EDP | SIGNAL_TYPE_LVDS)) &&
6149             link->type != dc_connection_none &&
6150             dm->backlight_dev) {
6151                 backlight_device_unregister(dm->backlight_dev);
6152                 dm->backlight_dev = NULL;
6153         }
6154 #endif
6155
6156         if (aconnector->dc_em_sink)
6157                 dc_sink_release(aconnector->dc_em_sink);
6158         aconnector->dc_em_sink = NULL;
6159         if (aconnector->dc_sink)
6160                 dc_sink_release(aconnector->dc_sink);
6161         aconnector->dc_sink = NULL;
6162
6163         drm_dp_cec_unregister_connector(&aconnector->dm_dp_aux.aux);
6164         drm_connector_unregister(connector);
6165         drm_connector_cleanup(connector);
6166         if (aconnector->i2c) {
6167                 i2c_del_adapter(&aconnector->i2c->base);
6168                 kfree(aconnector->i2c);
6169         }
6170         kfree(aconnector->dm_dp_aux.aux.name);
6171
6172         kfree(connector);
6173 }
6174
6175 void amdgpu_dm_connector_funcs_reset(struct drm_connector *connector)
6176 {
6177         struct dm_connector_state *state =
6178                 to_dm_connector_state(connector->state);
6179
6180         if (connector->state)
6181                 __drm_atomic_helper_connector_destroy_state(connector->state);
6182
6183         kfree(state);
6184
6185         state = kzalloc(sizeof(*state), GFP_KERNEL);
6186
6187         if (state) {
6188                 state->scaling = RMX_OFF;
6189                 state->underscan_enable = false;
6190                 state->underscan_hborder = 0;
6191                 state->underscan_vborder = 0;
6192                 state->base.max_requested_bpc = 8;
6193                 state->vcpi_slots = 0;
6194                 state->pbn = 0;
6195                 if (connector->connector_type == DRM_MODE_CONNECTOR_eDP)
6196                         state->abm_level = amdgpu_dm_abm_level;
6197
6198                 __drm_atomic_helper_connector_reset(connector, &state->base);
6199         }
6200 }
6201
6202 struct drm_connector_state *
6203 amdgpu_dm_connector_atomic_duplicate_state(struct drm_connector *connector)
6204 {
6205         struct dm_connector_state *state =
6206                 to_dm_connector_state(connector->state);
6207
6208         struct dm_connector_state *new_state =
6209                         kmemdup(state, sizeof(*state), GFP_KERNEL);
6210
6211         if (!new_state)
6212                 return NULL;
6213
6214         __drm_atomic_helper_connector_duplicate_state(connector, &new_state->base);
6215
6216         new_state->freesync_capable = state->freesync_capable;
6217         new_state->abm_level = state->abm_level;
6218         new_state->scaling = state->scaling;
6219         new_state->underscan_enable = state->underscan_enable;
6220         new_state->underscan_hborder = state->underscan_hborder;
6221         new_state->underscan_vborder = state->underscan_vborder;
6222         new_state->vcpi_slots = state->vcpi_slots;
6223         new_state->pbn = state->pbn;
6224         return &new_state->base;
6225 }
6226
6227 static int
6228 amdgpu_dm_connector_late_register(struct drm_connector *connector)
6229 {
6230         struct amdgpu_dm_connector *amdgpu_dm_connector =
6231                 to_amdgpu_dm_connector(connector);
6232         int r;
6233
6234         if ((connector->connector_type == DRM_MODE_CONNECTOR_DisplayPort) ||
6235             (connector->connector_type == DRM_MODE_CONNECTOR_eDP)) {
6236                 amdgpu_dm_connector->dm_dp_aux.aux.dev = connector->kdev;
6237                 r = drm_dp_aux_register(&amdgpu_dm_connector->dm_dp_aux.aux);
6238                 if (r)
6239                         return r;
6240         }
6241
6242 #if defined(CONFIG_DEBUG_FS)
6243         connector_debugfs_init(amdgpu_dm_connector);
6244 #endif
6245
6246         return 0;
6247 }
6248
6249 static const struct drm_connector_funcs amdgpu_dm_connector_funcs = {
6250         .reset = amdgpu_dm_connector_funcs_reset,
6251         .detect = amdgpu_dm_connector_detect,
6252         .fill_modes = drm_helper_probe_single_connector_modes,
6253         .destroy = amdgpu_dm_connector_destroy,
6254         .atomic_duplicate_state = amdgpu_dm_connector_atomic_duplicate_state,
6255         .atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
6256         .atomic_set_property = amdgpu_dm_connector_atomic_set_property,
6257         .atomic_get_property = amdgpu_dm_connector_atomic_get_property,
6258         .late_register = amdgpu_dm_connector_late_register,
6259         .early_unregister = amdgpu_dm_connector_unregister
6260 };
6261
6262 static int get_modes(struct drm_connector *connector)
6263 {
6264         return amdgpu_dm_connector_get_modes(connector);
6265 }
6266
6267 static void create_eml_sink(struct amdgpu_dm_connector *aconnector)
6268 {
6269         struct dc_sink_init_data init_params = {
6270                         .link = aconnector->dc_link,
6271                         .sink_signal = SIGNAL_TYPE_VIRTUAL
6272         };
6273         struct edid *edid;
6274
6275         if (!aconnector->base.edid_blob_ptr) {
6276                 DRM_ERROR("No EDID firmware found on connector: %s ,forcing to OFF!\n",
6277                                 aconnector->base.name);
6278
6279                 aconnector->base.force = DRM_FORCE_OFF;
6280                 aconnector->base.override_edid = false;
6281                 return;
6282         }
6283
6284         edid = (struct edid *) aconnector->base.edid_blob_ptr->data;
6285
6286         aconnector->edid = edid;
6287
6288         aconnector->dc_em_sink = dc_link_add_remote_sink(
6289                 aconnector->dc_link,
6290                 (uint8_t *)edid,
6291                 (edid->extensions + 1) * EDID_LENGTH,
6292                 &init_params);
6293
6294         if (aconnector->base.force == DRM_FORCE_ON) {
6295                 aconnector->dc_sink = aconnector->dc_link->local_sink ?
6296                 aconnector->dc_link->local_sink :
6297                 aconnector->dc_em_sink;
6298                 dc_sink_retain(aconnector->dc_sink);
6299         }
6300 }
6301
6302 static void handle_edid_mgmt(struct amdgpu_dm_connector *aconnector)
6303 {
6304         struct dc_link *link = (struct dc_link *)aconnector->dc_link;
6305
6306         /*
6307          * In case of headless boot with force on for DP managed connector
6308          * Those settings have to be != 0 to get initial modeset
6309          */
6310         if (link->connector_signal == SIGNAL_TYPE_DISPLAY_PORT) {
6311                 link->verified_link_cap.lane_count = LANE_COUNT_FOUR;
6312                 link->verified_link_cap.link_rate = LINK_RATE_HIGH2;
6313         }
6314
6315
6316         aconnector->base.override_edid = true;
6317         create_eml_sink(aconnector);
6318 }
6319
6320 static struct dc_stream_state *
6321 create_validate_stream_for_sink(struct amdgpu_dm_connector *aconnector,
6322                                 const struct drm_display_mode *drm_mode,
6323                                 const struct dm_connector_state *dm_state,
6324                                 const struct dc_stream_state *old_stream)
6325 {
6326         struct drm_connector *connector = &aconnector->base;
6327         struct amdgpu_device *adev = drm_to_adev(connector->dev);
6328         struct dc_stream_state *stream;
6329         const struct drm_connector_state *drm_state = dm_state ? &dm_state->base : NULL;
6330         int requested_bpc = drm_state ? drm_state->max_requested_bpc : 8;
6331         enum dc_status dc_result = DC_OK;
6332
6333         do {
6334                 stream = create_stream_for_sink(aconnector, drm_mode,
6335                                                 dm_state, old_stream,
6336                                                 requested_bpc);
6337                 if (stream == NULL) {
6338                         DRM_ERROR("Failed to create stream for sink!\n");
6339                         break;
6340                 }
6341
6342                 dc_result = dc_validate_stream(adev->dm.dc, stream);
6343
6344                 if (dc_result != DC_OK) {
6345                         DRM_DEBUG_KMS("Mode %dx%d (clk %d) failed DC validation with error %d (%s)\n",
6346                                       drm_mode->hdisplay,
6347                                       drm_mode->vdisplay,
6348                                       drm_mode->clock,
6349                                       dc_result,
6350                                       dc_status_to_str(dc_result));
6351
6352                         dc_stream_release(stream);
6353                         stream = NULL;
6354                         requested_bpc -= 2; /* lower bpc to retry validation */
6355                 }
6356
6357         } while (stream == NULL && requested_bpc >= 6);
6358
6359         if (dc_result == DC_FAIL_ENC_VALIDATE && !aconnector->force_yuv420_output) {
6360                 DRM_DEBUG_KMS("Retry forcing YCbCr420 encoding\n");
6361
6362                 aconnector->force_yuv420_output = true;
6363                 stream = create_validate_stream_for_sink(aconnector, drm_mode,
6364                                                 dm_state, old_stream);
6365                 aconnector->force_yuv420_output = false;
6366         }
6367
6368         return stream;
6369 }
6370
6371 enum drm_mode_status amdgpu_dm_connector_mode_valid(struct drm_connector *connector,
6372                                    struct drm_display_mode *mode)
6373 {
6374         int result = MODE_ERROR;
6375         struct dc_sink *dc_sink;
6376         /* TODO: Unhardcode stream count */
6377         struct dc_stream_state *stream;
6378         struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
6379
6380         if ((mode->flags & DRM_MODE_FLAG_INTERLACE) ||
6381                         (mode->flags & DRM_MODE_FLAG_DBLSCAN))
6382                 return result;
6383
6384         /*
6385          * Only run this the first time mode_valid is called to initilialize
6386          * EDID mgmt
6387          */
6388         if (aconnector->base.force != DRM_FORCE_UNSPECIFIED &&
6389                 !aconnector->dc_em_sink)
6390                 handle_edid_mgmt(aconnector);
6391
6392         dc_sink = to_amdgpu_dm_connector(connector)->dc_sink;
6393
6394         if (dc_sink == NULL && aconnector->base.force != DRM_FORCE_ON_DIGITAL &&
6395                                 aconnector->base.force != DRM_FORCE_ON) {
6396                 DRM_ERROR("dc_sink is NULL!\n");
6397                 goto fail;
6398         }
6399
6400         stream = create_validate_stream_for_sink(aconnector, mode, NULL, NULL);
6401         if (stream) {
6402                 dc_stream_release(stream);
6403                 result = MODE_OK;
6404         }
6405
6406 fail:
6407         /* TODO: error handling*/
6408         return result;
6409 }
6410
6411 static int fill_hdr_info_packet(const struct drm_connector_state *state,
6412                                 struct dc_info_packet *out)
6413 {
6414         struct hdmi_drm_infoframe frame;
6415         unsigned char buf[30]; /* 26 + 4 */
6416         ssize_t len;
6417         int ret, i;
6418
6419         memset(out, 0, sizeof(*out));
6420
6421         if (!state->hdr_output_metadata)
6422                 return 0;
6423
6424         ret = drm_hdmi_infoframe_set_hdr_metadata(&frame, state);
6425         if (ret)
6426                 return ret;
6427
6428         len = hdmi_drm_infoframe_pack_only(&frame, buf, sizeof(buf));
6429         if (len < 0)
6430                 return (int)len;
6431
6432         /* Static metadata is a fixed 26 bytes + 4 byte header. */
6433         if (len != 30)
6434                 return -EINVAL;
6435
6436         /* Prepare the infopacket for DC. */
6437         switch (state->connector->connector_type) {
6438         case DRM_MODE_CONNECTOR_HDMIA:
6439                 out->hb0 = 0x87; /* type */
6440                 out->hb1 = 0x01; /* version */
6441                 out->hb2 = 0x1A; /* length */
6442                 out->sb[0] = buf[3]; /* checksum */
6443                 i = 1;
6444                 break;
6445
6446         case DRM_MODE_CONNECTOR_DisplayPort:
6447         case DRM_MODE_CONNECTOR_eDP:
6448                 out->hb0 = 0x00; /* sdp id, zero */
6449                 out->hb1 = 0x87; /* type */
6450                 out->hb2 = 0x1D; /* payload len - 1 */
6451                 out->hb3 = (0x13 << 2); /* sdp version */
6452                 out->sb[0] = 0x01; /* version */
6453                 out->sb[1] = 0x1A; /* length */
6454                 i = 2;
6455                 break;
6456
6457         default:
6458                 return -EINVAL;
6459         }
6460
6461         memcpy(&out->sb[i], &buf[4], 26);
6462         out->valid = true;
6463
6464         print_hex_dump(KERN_DEBUG, "HDR SB:", DUMP_PREFIX_NONE, 16, 1, out->sb,
6465                        sizeof(out->sb), false);
6466
6467         return 0;
6468 }
6469
6470 static int
6471 amdgpu_dm_connector_atomic_check(struct drm_connector *conn,
6472                                  struct drm_atomic_state *state)
6473 {
6474         struct drm_connector_state *new_con_state =
6475                 drm_atomic_get_new_connector_state(state, conn);
6476         struct drm_connector_state *old_con_state =
6477                 drm_atomic_get_old_connector_state(state, conn);
6478         struct drm_crtc *crtc = new_con_state->crtc;
6479         struct drm_crtc_state *new_crtc_state;
6480         int ret;
6481
6482         trace_amdgpu_dm_connector_atomic_check(new_con_state);
6483
6484         if (!crtc)
6485                 return 0;
6486
6487         if (!drm_connector_atomic_hdr_metadata_equal(old_con_state, new_con_state)) {
6488                 struct dc_info_packet hdr_infopacket;
6489
6490                 ret = fill_hdr_info_packet(new_con_state, &hdr_infopacket);
6491                 if (ret)
6492                         return ret;
6493
6494                 new_crtc_state = drm_atomic_get_crtc_state(state, crtc);
6495                 if (IS_ERR(new_crtc_state))
6496                         return PTR_ERR(new_crtc_state);
6497
6498                 /*
6499                  * DC considers the stream backends changed if the
6500                  * static metadata changes. Forcing the modeset also
6501                  * gives a simple way for userspace to switch from
6502                  * 8bpc to 10bpc when setting the metadata to enter
6503                  * or exit HDR.
6504                  *
6505                  * Changing the static metadata after it's been
6506                  * set is permissible, however. So only force a
6507                  * modeset if we're entering or exiting HDR.
6508                  */
6509                 new_crtc_state->mode_changed =
6510                         !old_con_state->hdr_output_metadata ||
6511                         !new_con_state->hdr_output_metadata;
6512         }
6513
6514         return 0;
6515 }
6516
6517 static const struct drm_connector_helper_funcs
6518 amdgpu_dm_connector_helper_funcs = {
6519         /*
6520          * If hotplugging a second bigger display in FB Con mode, bigger resolution
6521          * modes will be filtered by drm_mode_validate_size(), and those modes
6522          * are missing after user start lightdm. So we need to renew modes list.
6523          * in get_modes call back, not just return the modes count
6524          */
6525         .get_modes = get_modes,
6526         .mode_valid = amdgpu_dm_connector_mode_valid,
6527         .atomic_check = amdgpu_dm_connector_atomic_check,
6528 };
6529
6530 static void dm_crtc_helper_disable(struct drm_crtc *crtc)
6531 {
6532 }
6533
6534 static int count_crtc_active_planes(struct drm_crtc_state *new_crtc_state)
6535 {
6536         struct drm_atomic_state *state = new_crtc_state->state;
6537         struct drm_plane *plane;
6538         int num_active = 0;
6539
6540         drm_for_each_plane_mask(plane, state->dev, new_crtc_state->plane_mask) {
6541                 struct drm_plane_state *new_plane_state;
6542
6543                 /* Cursor planes are "fake". */
6544                 if (plane->type == DRM_PLANE_TYPE_CURSOR)
6545                         continue;
6546
6547                 new_plane_state = drm_atomic_get_new_plane_state(state, plane);
6548
6549                 if (!new_plane_state) {
6550                         /*
6551                          * The plane is enable on the CRTC and hasn't changed
6552                          * state. This means that it previously passed
6553                          * validation and is therefore enabled.
6554                          */
6555                         num_active += 1;
6556                         continue;
6557                 }
6558
6559                 /* We need a framebuffer to be considered enabled. */
6560                 num_active += (new_plane_state->fb != NULL);
6561         }
6562
6563         return num_active;
6564 }
6565
6566 static void dm_update_crtc_active_planes(struct drm_crtc *crtc,
6567                                          struct drm_crtc_state *new_crtc_state)
6568 {
6569         struct dm_crtc_state *dm_new_crtc_state =
6570                 to_dm_crtc_state(new_crtc_state);
6571
6572         dm_new_crtc_state->active_planes = 0;
6573
6574         if (!dm_new_crtc_state->stream)
6575                 return;
6576
6577         dm_new_crtc_state->active_planes =
6578                 count_crtc_active_planes(new_crtc_state);
6579 }
6580
6581 static int dm_crtc_helper_atomic_check(struct drm_crtc *crtc,
6582                                        struct drm_atomic_state *state)
6583 {
6584         struct drm_crtc_state *crtc_state = drm_atomic_get_new_crtc_state(state,
6585                                                                           crtc);
6586         struct amdgpu_device *adev = drm_to_adev(crtc->dev);
6587         struct dc *dc = adev->dm.dc;
6588         struct dm_crtc_state *dm_crtc_state = to_dm_crtc_state(crtc_state);
6589         int ret = -EINVAL;
6590
6591         trace_amdgpu_dm_crtc_atomic_check(crtc_state);
6592
6593         dm_update_crtc_active_planes(crtc, crtc_state);
6594
6595         if (unlikely(!dm_crtc_state->stream &&
6596                      modeset_required(crtc_state, NULL, dm_crtc_state->stream))) {
6597                 WARN_ON(1);
6598                 return ret;
6599         }
6600
6601         /*
6602          * We require the primary plane to be enabled whenever the CRTC is, otherwise
6603          * drm_mode_cursor_universal may end up trying to enable the cursor plane while all other
6604          * planes are disabled, which is not supported by the hardware. And there is legacy
6605          * userspace which stops using the HW cursor altogether in response to the resulting EINVAL.
6606          */
6607         if (crtc_state->enable &&
6608             !(crtc_state->plane_mask & drm_plane_mask(crtc->primary))) {
6609                 DRM_DEBUG_ATOMIC("Can't enable a CRTC without enabling the primary plane\n");
6610                 return -EINVAL;
6611         }
6612
6613         /* In some use cases, like reset, no stream is attached */
6614         if (!dm_crtc_state->stream)
6615                 return 0;
6616
6617         if (dc_validate_stream(dc, dm_crtc_state->stream) == DC_OK)
6618                 return 0;
6619
6620         DRM_DEBUG_ATOMIC("Failed DC stream validation\n");
6621         return ret;
6622 }
6623
6624 static bool dm_crtc_helper_mode_fixup(struct drm_crtc *crtc,
6625                                       const struct drm_display_mode *mode,
6626                                       struct drm_display_mode *adjusted_mode)
6627 {
6628         return true;
6629 }
6630
6631 static const struct drm_crtc_helper_funcs amdgpu_dm_crtc_helper_funcs = {
6632         .disable = dm_crtc_helper_disable,
6633         .atomic_check = dm_crtc_helper_atomic_check,
6634         .mode_fixup = dm_crtc_helper_mode_fixup,
6635         .get_scanout_position = amdgpu_crtc_get_scanout_position,
6636 };
6637
6638 static void dm_encoder_helper_disable(struct drm_encoder *encoder)
6639 {
6640
6641 }
6642
6643 static int convert_dc_color_depth_into_bpc (enum dc_color_depth display_color_depth)
6644 {
6645         switch (display_color_depth) {
6646                 case COLOR_DEPTH_666:
6647                         return 6;
6648                 case COLOR_DEPTH_888:
6649                         return 8;
6650                 case COLOR_DEPTH_101010:
6651                         return 10;
6652                 case COLOR_DEPTH_121212:
6653                         return 12;
6654                 case COLOR_DEPTH_141414:
6655                         return 14;
6656                 case COLOR_DEPTH_161616:
6657                         return 16;
6658                 default:
6659                         break;
6660                 }
6661         return 0;
6662 }
6663
6664 static int dm_encoder_helper_atomic_check(struct drm_encoder *encoder,
6665                                           struct drm_crtc_state *crtc_state,
6666                                           struct drm_connector_state *conn_state)
6667 {
6668         struct drm_atomic_state *state = crtc_state->state;
6669         struct drm_connector *connector = conn_state->connector;
6670         struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
6671         struct dm_connector_state *dm_new_connector_state = to_dm_connector_state(conn_state);
6672         const struct drm_display_mode *adjusted_mode = &crtc_state->adjusted_mode;
6673         struct drm_dp_mst_topology_mgr *mst_mgr;
6674         struct drm_dp_mst_port *mst_port;
6675         enum dc_color_depth color_depth;
6676         int clock, bpp = 0;
6677         bool is_y420 = false;
6678
6679         if (!aconnector->port || !aconnector->dc_sink)
6680                 return 0;
6681
6682         mst_port = aconnector->port;
6683         mst_mgr = &aconnector->mst_port->mst_mgr;
6684
6685         if (!crtc_state->connectors_changed && !crtc_state->mode_changed)
6686                 return 0;
6687
6688         if (!state->duplicated) {
6689                 int max_bpc = conn_state->max_requested_bpc;
6690                 is_y420 = drm_mode_is_420_also(&connector->display_info, adjusted_mode) &&
6691                                 aconnector->force_yuv420_output;
6692                 color_depth = convert_color_depth_from_display_info(connector,
6693                                                                     is_y420,
6694                                                                     max_bpc);
6695                 bpp = convert_dc_color_depth_into_bpc(color_depth) * 3;
6696                 clock = adjusted_mode->clock;
6697                 dm_new_connector_state->pbn = drm_dp_calc_pbn_mode(clock, bpp, false);
6698         }
6699         dm_new_connector_state->vcpi_slots = drm_dp_atomic_find_vcpi_slots(state,
6700                                                                            mst_mgr,
6701                                                                            mst_port,
6702                                                                            dm_new_connector_state->pbn,
6703                                                                            dm_mst_get_pbn_divider(aconnector->dc_link));
6704         if (dm_new_connector_state->vcpi_slots < 0) {
6705                 DRM_DEBUG_ATOMIC("failed finding vcpi slots: %d\n", (int)dm_new_connector_state->vcpi_slots);
6706                 return dm_new_connector_state->vcpi_slots;
6707         }
6708         return 0;
6709 }
6710
6711 const struct drm_encoder_helper_funcs amdgpu_dm_encoder_helper_funcs = {
6712         .disable = dm_encoder_helper_disable,
6713         .atomic_check = dm_encoder_helper_atomic_check
6714 };
6715
6716 #if defined(CONFIG_DRM_AMD_DC_DCN)
6717 static int dm_update_mst_vcpi_slots_for_dsc(struct drm_atomic_state *state,
6718                                             struct dc_state *dc_state)
6719 {
6720         struct dc_stream_state *stream = NULL;
6721         struct drm_connector *connector;
6722         struct drm_connector_state *new_con_state;
6723         struct amdgpu_dm_connector *aconnector;
6724         struct dm_connector_state *dm_conn_state;
6725         int i, j, clock, bpp;
6726         int vcpi, pbn_div, pbn = 0;
6727
6728         for_each_new_connector_in_state(state, connector, new_con_state, i) {
6729
6730                 aconnector = to_amdgpu_dm_connector(connector);
6731
6732                 if (!aconnector->port)
6733                         continue;
6734
6735                 if (!new_con_state || !new_con_state->crtc)
6736                         continue;
6737
6738                 dm_conn_state = to_dm_connector_state(new_con_state);
6739
6740                 for (j = 0; j < dc_state->stream_count; j++) {
6741                         stream = dc_state->streams[j];
6742                         if (!stream)
6743                                 continue;
6744
6745                         if ((struct amdgpu_dm_connector*)stream->dm_stream_context == aconnector)
6746                                 break;
6747
6748                         stream = NULL;
6749                 }
6750
6751                 if (!stream)
6752                         continue;
6753
6754                 if (stream->timing.flags.DSC != 1) {
6755                         drm_dp_mst_atomic_enable_dsc(state,
6756                                                      aconnector->port,
6757                                                      dm_conn_state->pbn,
6758                                                      0,
6759                                                      false);
6760                         continue;
6761                 }
6762
6763                 pbn_div = dm_mst_get_pbn_divider(stream->link);
6764                 bpp = stream->timing.dsc_cfg.bits_per_pixel;
6765                 clock = stream->timing.pix_clk_100hz / 10;
6766                 pbn = drm_dp_calc_pbn_mode(clock, bpp, true);
6767                 vcpi = drm_dp_mst_atomic_enable_dsc(state,
6768                                                     aconnector->port,
6769                                                     pbn, pbn_div,
6770                                                     true);
6771                 if (vcpi < 0)
6772                         return vcpi;
6773
6774                 dm_conn_state->pbn = pbn;
6775                 dm_conn_state->vcpi_slots = vcpi;
6776         }
6777         return 0;
6778 }
6779 #endif
6780
6781 static void dm_drm_plane_reset(struct drm_plane *plane)
6782 {
6783         struct dm_plane_state *amdgpu_state = NULL;
6784
6785         if (plane->state)
6786                 plane->funcs->atomic_destroy_state(plane, plane->state);
6787
6788         amdgpu_state = kzalloc(sizeof(*amdgpu_state), GFP_KERNEL);
6789         WARN_ON(amdgpu_state == NULL);
6790
6791         if (amdgpu_state)
6792                 __drm_atomic_helper_plane_reset(plane, &amdgpu_state->base);
6793 }
6794
6795 static struct drm_plane_state *
6796 dm_drm_plane_duplicate_state(struct drm_plane *plane)
6797 {
6798         struct dm_plane_state *dm_plane_state, *old_dm_plane_state;
6799
6800         old_dm_plane_state = to_dm_plane_state(plane->state);
6801         dm_plane_state = kzalloc(sizeof(*dm_plane_state), GFP_KERNEL);
6802         if (!dm_plane_state)
6803                 return NULL;
6804
6805         __drm_atomic_helper_plane_duplicate_state(plane, &dm_plane_state->base);
6806
6807         if (old_dm_plane_state->dc_state) {
6808                 dm_plane_state->dc_state = old_dm_plane_state->dc_state;
6809                 dc_plane_state_retain(dm_plane_state->dc_state);
6810         }
6811
6812         return &dm_plane_state->base;
6813 }
6814
6815 static void dm_drm_plane_destroy_state(struct drm_plane *plane,
6816                                 struct drm_plane_state *state)
6817 {
6818         struct dm_plane_state *dm_plane_state = to_dm_plane_state(state);
6819
6820         if (dm_plane_state->dc_state)
6821                 dc_plane_state_release(dm_plane_state->dc_state);
6822
6823         drm_atomic_helper_plane_destroy_state(plane, state);
6824 }
6825
6826 static const struct drm_plane_funcs dm_plane_funcs = {
6827         .update_plane   = drm_atomic_helper_update_plane,
6828         .disable_plane  = drm_atomic_helper_disable_plane,
6829         .destroy        = drm_primary_helper_destroy,
6830         .reset = dm_drm_plane_reset,
6831         .atomic_duplicate_state = dm_drm_plane_duplicate_state,
6832         .atomic_destroy_state = dm_drm_plane_destroy_state,
6833         .format_mod_supported = dm_plane_format_mod_supported,
6834 };
6835
6836 static int dm_plane_helper_prepare_fb(struct drm_plane *plane,
6837                                       struct drm_plane_state *new_state)
6838 {
6839         struct amdgpu_framebuffer *afb;
6840         struct drm_gem_object *obj;
6841         struct amdgpu_device *adev;
6842         struct amdgpu_bo *rbo;
6843         struct dm_plane_state *dm_plane_state_new, *dm_plane_state_old;
6844         struct list_head list;
6845         struct ttm_validate_buffer tv;
6846         struct ww_acquire_ctx ticket;
6847         uint32_t domain;
6848         int r;
6849
6850         if (!new_state->fb) {
6851                 DRM_DEBUG_KMS("No FB bound\n");
6852                 return 0;
6853         }
6854
6855         afb = to_amdgpu_framebuffer(new_state->fb);
6856         obj = new_state->fb->obj[0];
6857         rbo = gem_to_amdgpu_bo(obj);
6858         adev = amdgpu_ttm_adev(rbo->tbo.bdev);
6859         INIT_LIST_HEAD(&list);
6860
6861         tv.bo = &rbo->tbo;
6862         tv.num_shared = 1;
6863         list_add(&tv.head, &list);
6864
6865         r = ttm_eu_reserve_buffers(&ticket, &list, false, NULL);
6866         if (r) {
6867                 dev_err(adev->dev, "fail to reserve bo (%d)\n", r);
6868                 return r;
6869         }
6870
6871         if (plane->type != DRM_PLANE_TYPE_CURSOR)
6872                 domain = amdgpu_display_supported_domains(adev, rbo->flags);
6873         else
6874                 domain = AMDGPU_GEM_DOMAIN_VRAM;
6875
6876         r = amdgpu_bo_pin(rbo, domain);
6877         if (unlikely(r != 0)) {
6878                 if (r != -ERESTARTSYS)
6879                         DRM_ERROR("Failed to pin framebuffer with error %d\n", r);
6880                 ttm_eu_backoff_reservation(&ticket, &list);
6881                 return r;
6882         }
6883
6884         r = amdgpu_ttm_alloc_gart(&rbo->tbo);
6885         if (unlikely(r != 0)) {
6886                 amdgpu_bo_unpin(rbo);
6887                 ttm_eu_backoff_reservation(&ticket, &list);
6888                 DRM_ERROR("%p bind failed\n", rbo);
6889                 return r;
6890         }
6891
6892         ttm_eu_backoff_reservation(&ticket, &list);
6893
6894         afb->address = amdgpu_bo_gpu_offset(rbo);
6895
6896         amdgpu_bo_ref(rbo);
6897
6898         /**
6899          * We don't do surface updates on planes that have been newly created,
6900          * but we also don't have the afb->address during atomic check.
6901          *
6902          * Fill in buffer attributes depending on the address here, but only on
6903          * newly created planes since they're not being used by DC yet and this
6904          * won't modify global state.
6905          */
6906         dm_plane_state_old = to_dm_plane_state(plane->state);
6907         dm_plane_state_new = to_dm_plane_state(new_state);
6908
6909         if (dm_plane_state_new->dc_state &&
6910             dm_plane_state_old->dc_state != dm_plane_state_new->dc_state) {
6911                 struct dc_plane_state *plane_state =
6912                         dm_plane_state_new->dc_state;
6913                 bool force_disable_dcc = !plane_state->dcc.enable;
6914
6915                 fill_plane_buffer_attributes(
6916                         adev, afb, plane_state->format, plane_state->rotation,
6917                         afb->tiling_flags,
6918                         &plane_state->tiling_info, &plane_state->plane_size,
6919                         &plane_state->dcc, &plane_state->address,
6920                         afb->tmz_surface, force_disable_dcc);
6921         }
6922
6923         return 0;
6924 }
6925
6926 static void dm_plane_helper_cleanup_fb(struct drm_plane *plane,
6927                                        struct drm_plane_state *old_state)
6928 {
6929         struct amdgpu_bo *rbo;
6930         int r;
6931
6932         if (!old_state->fb)
6933                 return;
6934
6935         rbo = gem_to_amdgpu_bo(old_state->fb->obj[0]);
6936         r = amdgpu_bo_reserve(rbo, false);
6937         if (unlikely(r)) {
6938                 DRM_ERROR("failed to reserve rbo before unpin\n");
6939                 return;
6940         }
6941
6942         amdgpu_bo_unpin(rbo);
6943         amdgpu_bo_unreserve(rbo);
6944         amdgpu_bo_unref(&rbo);
6945 }
6946
6947 static int dm_plane_helper_check_state(struct drm_plane_state *state,
6948                                        struct drm_crtc_state *new_crtc_state)
6949 {
6950         struct drm_framebuffer *fb = state->fb;
6951         int min_downscale, max_upscale;
6952         int min_scale = 0;
6953         int max_scale = INT_MAX;
6954
6955         /* Plane enabled? Validate viewport and get scaling factors from plane caps. */
6956         if (fb && state->crtc) {
6957                 /* Validate viewport to cover the case when only the position changes */
6958                 if (state->plane->type != DRM_PLANE_TYPE_CURSOR) {
6959                         int viewport_width = state->crtc_w;
6960                         int viewport_height = state->crtc_h;
6961
6962                         if (state->crtc_x < 0)
6963                                 viewport_width += state->crtc_x;
6964                         else if (state->crtc_x + state->crtc_w > new_crtc_state->mode.crtc_hdisplay)
6965                                 viewport_width = new_crtc_state->mode.crtc_hdisplay - state->crtc_x;
6966
6967                         if (state->crtc_y < 0)
6968                                 viewport_height += state->crtc_y;
6969                         else if (state->crtc_y + state->crtc_h > new_crtc_state->mode.crtc_vdisplay)
6970                                 viewport_height = new_crtc_state->mode.crtc_vdisplay - state->crtc_y;
6971
6972                         if (viewport_width < 0 || viewport_height < 0) {
6973                                 DRM_DEBUG_ATOMIC("Plane completely outside of screen\n");
6974                                 return -EINVAL;
6975                         } else if (viewport_width < MIN_VIEWPORT_SIZE*2) { /* x2 for width is because of pipe-split. */
6976                                 DRM_DEBUG_ATOMIC("Viewport width %d smaller than %d\n", viewport_width, MIN_VIEWPORT_SIZE*2);
6977                                 return -EINVAL;
6978                         } else if (viewport_height < MIN_VIEWPORT_SIZE) {
6979                                 DRM_DEBUG_ATOMIC("Viewport height %d smaller than %d\n", viewport_height, MIN_VIEWPORT_SIZE);
6980                                 return -EINVAL;
6981                         }
6982
6983                 }
6984
6985                 /* Get min/max allowed scaling factors from plane caps. */
6986                 get_min_max_dc_plane_scaling(state->crtc->dev, fb,
6987                                              &min_downscale, &max_upscale);
6988                 /*
6989                  * Convert to drm convention: 16.16 fixed point, instead of dc's
6990                  * 1.0 == 1000. Also drm scaling is src/dst instead of dc's
6991                  * dst/src, so min_scale = 1.0 / max_upscale, etc.
6992                  */
6993                 min_scale = (1000 << 16) / max_upscale;
6994                 max_scale = (1000 << 16) / min_downscale;
6995         }
6996
6997         return drm_atomic_helper_check_plane_state(
6998                 state, new_crtc_state, min_scale, max_scale, true, true);
6999 }
7000
7001 static int dm_plane_atomic_check(struct drm_plane *plane,
7002                                  struct drm_atomic_state *state)
7003 {
7004         struct drm_plane_state *new_plane_state = drm_atomic_get_new_plane_state(state,
7005                                                                                  plane);
7006         struct amdgpu_device *adev = drm_to_adev(plane->dev);
7007         struct dc *dc = adev->dm.dc;
7008         struct dm_plane_state *dm_plane_state;
7009         struct dc_scaling_info scaling_info;
7010         struct drm_crtc_state *new_crtc_state;
7011         int ret;
7012
7013         trace_amdgpu_dm_plane_atomic_check(new_plane_state);
7014
7015         dm_plane_state = to_dm_plane_state(new_plane_state);
7016
7017         if (!dm_plane_state->dc_state)
7018                 return 0;
7019
7020         new_crtc_state =
7021                 drm_atomic_get_new_crtc_state(state,
7022                                               new_plane_state->crtc);
7023         if (!new_crtc_state)
7024                 return -EINVAL;
7025
7026         ret = dm_plane_helper_check_state(new_plane_state, new_crtc_state);
7027         if (ret)
7028                 return ret;
7029
7030         ret = fill_dc_scaling_info(new_plane_state, &scaling_info);
7031         if (ret)
7032                 return ret;
7033
7034         if (dc_validate_plane(dc, dm_plane_state->dc_state) == DC_OK)
7035                 return 0;
7036
7037         return -EINVAL;
7038 }
7039
7040 static int dm_plane_atomic_async_check(struct drm_plane *plane,
7041                                        struct drm_atomic_state *state)
7042 {
7043         /* Only support async updates on cursor planes. */
7044         if (plane->type != DRM_PLANE_TYPE_CURSOR)
7045                 return -EINVAL;
7046
7047         return 0;
7048 }
7049
7050 static void dm_plane_atomic_async_update(struct drm_plane *plane,
7051                                          struct drm_atomic_state *state)
7052 {
7053         struct drm_plane_state *new_state = drm_atomic_get_new_plane_state(state,
7054                                                                            plane);
7055         struct drm_plane_state *old_state =
7056                 drm_atomic_get_old_plane_state(state, plane);
7057
7058         trace_amdgpu_dm_atomic_update_cursor(new_state);
7059
7060         swap(plane->state->fb, new_state->fb);
7061
7062         plane->state->src_x = new_state->src_x;
7063         plane->state->src_y = new_state->src_y;
7064         plane->state->src_w = new_state->src_w;
7065         plane->state->src_h = new_state->src_h;
7066         plane->state->crtc_x = new_state->crtc_x;
7067         plane->state->crtc_y = new_state->crtc_y;
7068         plane->state->crtc_w = new_state->crtc_w;
7069         plane->state->crtc_h = new_state->crtc_h;
7070
7071         handle_cursor_update(plane, old_state);
7072 }
7073
7074 static const struct drm_plane_helper_funcs dm_plane_helper_funcs = {
7075         .prepare_fb = dm_plane_helper_prepare_fb,
7076         .cleanup_fb = dm_plane_helper_cleanup_fb,
7077         .atomic_check = dm_plane_atomic_check,
7078         .atomic_async_check = dm_plane_atomic_async_check,
7079         .atomic_async_update = dm_plane_atomic_async_update
7080 };
7081
7082 /*
7083  * TODO: these are currently initialized to rgb formats only.
7084  * For future use cases we should either initialize them dynamically based on
7085  * plane capabilities, or initialize this array to all formats, so internal drm
7086  * check will succeed, and let DC implement proper check
7087  */
7088 static const uint32_t rgb_formats[] = {
7089         DRM_FORMAT_XRGB8888,
7090         DRM_FORMAT_ARGB8888,
7091         DRM_FORMAT_RGBA8888,
7092         DRM_FORMAT_XRGB2101010,
7093         DRM_FORMAT_XBGR2101010,
7094         DRM_FORMAT_ARGB2101010,
7095         DRM_FORMAT_ABGR2101010,
7096         DRM_FORMAT_XBGR8888,
7097         DRM_FORMAT_ABGR8888,
7098         DRM_FORMAT_RGB565,
7099 };
7100
7101 static const uint32_t overlay_formats[] = {
7102         DRM_FORMAT_XRGB8888,
7103         DRM_FORMAT_ARGB8888,
7104         DRM_FORMAT_RGBA8888,
7105         DRM_FORMAT_XBGR8888,
7106         DRM_FORMAT_ABGR8888,
7107         DRM_FORMAT_RGB565
7108 };
7109
7110 static const u32 cursor_formats[] = {
7111         DRM_FORMAT_ARGB8888
7112 };
7113
7114 static int get_plane_formats(const struct drm_plane *plane,
7115                              const struct dc_plane_cap *plane_cap,
7116                              uint32_t *formats, int max_formats)
7117 {
7118         int i, num_formats = 0;
7119
7120         /*
7121          * TODO: Query support for each group of formats directly from
7122          * DC plane caps. This will require adding more formats to the
7123          * caps list.
7124          */
7125
7126         switch (plane->type) {
7127         case DRM_PLANE_TYPE_PRIMARY:
7128                 for (i = 0; i < ARRAY_SIZE(rgb_formats); ++i) {
7129                         if (num_formats >= max_formats)
7130                                 break;
7131
7132                         formats[num_formats++] = rgb_formats[i];
7133                 }
7134
7135                 if (plane_cap && plane_cap->pixel_format_support.nv12)
7136                         formats[num_formats++] = DRM_FORMAT_NV12;
7137                 if (plane_cap && plane_cap->pixel_format_support.p010)
7138                         formats[num_formats++] = DRM_FORMAT_P010;
7139                 if (plane_cap && plane_cap->pixel_format_support.fp16) {
7140                         formats[num_formats++] = DRM_FORMAT_XRGB16161616F;
7141                         formats[num_formats++] = DRM_FORMAT_ARGB16161616F;
7142                         formats[num_formats++] = DRM_FORMAT_XBGR16161616F;
7143                         formats[num_formats++] = DRM_FORMAT_ABGR16161616F;
7144                 }
7145                 break;
7146
7147         case DRM_PLANE_TYPE_OVERLAY:
7148                 for (i = 0; i < ARRAY_SIZE(overlay_formats); ++i) {
7149                         if (num_formats >= max_formats)
7150                                 break;
7151
7152                         formats[num_formats++] = overlay_formats[i];
7153                 }
7154                 break;
7155
7156         case DRM_PLANE_TYPE_CURSOR:
7157                 for (i = 0; i < ARRAY_SIZE(cursor_formats); ++i) {
7158                         if (num_formats >= max_formats)
7159                                 break;
7160
7161                         formats[num_formats++] = cursor_formats[i];
7162                 }
7163                 break;
7164         }
7165
7166         return num_formats;
7167 }
7168
7169 static int amdgpu_dm_plane_init(struct amdgpu_display_manager *dm,
7170                                 struct drm_plane *plane,
7171                                 unsigned long possible_crtcs,
7172                                 const struct dc_plane_cap *plane_cap)
7173 {
7174         uint32_t formats[32];
7175         int num_formats;
7176         int res = -EPERM;
7177         unsigned int supported_rotations;
7178         uint64_t *modifiers = NULL;
7179
7180         num_formats = get_plane_formats(plane, plane_cap, formats,
7181                                         ARRAY_SIZE(formats));
7182
7183         res = get_plane_modifiers(dm->adev, plane->type, &modifiers);
7184         if (res)
7185                 return res;
7186
7187         res = drm_universal_plane_init(adev_to_drm(dm->adev), plane, possible_crtcs,
7188                                        &dm_plane_funcs, formats, num_formats,
7189                                        modifiers, plane->type, NULL);
7190         kfree(modifiers);
7191         if (res)
7192                 return res;
7193
7194         if (plane->type == DRM_PLANE_TYPE_OVERLAY &&
7195             plane_cap && plane_cap->per_pixel_alpha) {
7196                 unsigned int blend_caps = BIT(DRM_MODE_BLEND_PIXEL_NONE) |
7197                                           BIT(DRM_MODE_BLEND_PREMULTI);
7198
7199                 drm_plane_create_alpha_property(plane);
7200                 drm_plane_create_blend_mode_property(plane, blend_caps);
7201         }
7202
7203         if (plane->type == DRM_PLANE_TYPE_PRIMARY &&
7204             plane_cap &&
7205             (plane_cap->pixel_format_support.nv12 ||
7206              plane_cap->pixel_format_support.p010)) {
7207                 /* This only affects YUV formats. */
7208                 drm_plane_create_color_properties(
7209                         plane,
7210                         BIT(DRM_COLOR_YCBCR_BT601) |
7211                         BIT(DRM_COLOR_YCBCR_BT709) |
7212                         BIT(DRM_COLOR_YCBCR_BT2020),
7213                         BIT(DRM_COLOR_YCBCR_LIMITED_RANGE) |
7214                         BIT(DRM_COLOR_YCBCR_FULL_RANGE),
7215                         DRM_COLOR_YCBCR_BT709, DRM_COLOR_YCBCR_LIMITED_RANGE);
7216         }
7217
7218         supported_rotations =
7219                 DRM_MODE_ROTATE_0 | DRM_MODE_ROTATE_90 |
7220                 DRM_MODE_ROTATE_180 | DRM_MODE_ROTATE_270;
7221
7222         if (dm->adev->asic_type >= CHIP_BONAIRE &&
7223             plane->type != DRM_PLANE_TYPE_CURSOR)
7224                 drm_plane_create_rotation_property(plane, DRM_MODE_ROTATE_0,
7225                                                    supported_rotations);
7226
7227         drm_plane_helper_add(plane, &dm_plane_helper_funcs);
7228
7229         /* Create (reset) the plane state */
7230         if (plane->funcs->reset)
7231                 plane->funcs->reset(plane);
7232
7233         return 0;
7234 }
7235
7236 static int amdgpu_dm_crtc_init(struct amdgpu_display_manager *dm,
7237                                struct drm_plane *plane,
7238                                uint32_t crtc_index)
7239 {
7240         struct amdgpu_crtc *acrtc = NULL;
7241         struct drm_plane *cursor_plane;
7242
7243         int res = -ENOMEM;
7244
7245         cursor_plane = kzalloc(sizeof(*cursor_plane), GFP_KERNEL);
7246         if (!cursor_plane)
7247                 goto fail;
7248
7249         cursor_plane->type = DRM_PLANE_TYPE_CURSOR;
7250         res = amdgpu_dm_plane_init(dm, cursor_plane, 0, NULL);
7251
7252         acrtc = kzalloc(sizeof(struct amdgpu_crtc), GFP_KERNEL);
7253         if (!acrtc)
7254                 goto fail;
7255
7256         res = drm_crtc_init_with_planes(
7257                         dm->ddev,
7258                         &acrtc->base,
7259                         plane,
7260                         cursor_plane,
7261                         &amdgpu_dm_crtc_funcs, NULL);
7262
7263         if (res)
7264                 goto fail;
7265
7266         drm_crtc_helper_add(&acrtc->base, &amdgpu_dm_crtc_helper_funcs);
7267
7268         /* Create (reset) the plane state */
7269         if (acrtc->base.funcs->reset)
7270                 acrtc->base.funcs->reset(&acrtc->base);
7271
7272         acrtc->max_cursor_width = dm->adev->dm.dc->caps.max_cursor_size;
7273         acrtc->max_cursor_height = dm->adev->dm.dc->caps.max_cursor_size;
7274
7275         acrtc->crtc_id = crtc_index;
7276         acrtc->base.enabled = false;
7277         acrtc->otg_inst = -1;
7278
7279         dm->adev->mode_info.crtcs[crtc_index] = acrtc;
7280         drm_crtc_enable_color_mgmt(&acrtc->base, MAX_COLOR_LUT_ENTRIES,
7281                                    true, MAX_COLOR_LUT_ENTRIES);
7282         drm_mode_crtc_set_gamma_size(&acrtc->base, MAX_COLOR_LEGACY_LUT_ENTRIES);
7283
7284         return 0;
7285
7286 fail:
7287         kfree(acrtc);
7288         kfree(cursor_plane);
7289         return res;
7290 }
7291
7292
7293 static int to_drm_connector_type(enum signal_type st)
7294 {
7295         switch (st) {
7296         case SIGNAL_TYPE_HDMI_TYPE_A:
7297                 return DRM_MODE_CONNECTOR_HDMIA;
7298         case SIGNAL_TYPE_EDP:
7299                 return DRM_MODE_CONNECTOR_eDP;
7300         case SIGNAL_TYPE_LVDS:
7301                 return DRM_MODE_CONNECTOR_LVDS;
7302         case SIGNAL_TYPE_RGB:
7303                 return DRM_MODE_CONNECTOR_VGA;
7304         case SIGNAL_TYPE_DISPLAY_PORT:
7305         case SIGNAL_TYPE_DISPLAY_PORT_MST:
7306                 return DRM_MODE_CONNECTOR_DisplayPort;
7307         case SIGNAL_TYPE_DVI_DUAL_LINK:
7308         case SIGNAL_TYPE_DVI_SINGLE_LINK:
7309                 return DRM_MODE_CONNECTOR_DVID;
7310         case SIGNAL_TYPE_VIRTUAL:
7311                 return DRM_MODE_CONNECTOR_VIRTUAL;
7312
7313         default:
7314                 return DRM_MODE_CONNECTOR_Unknown;
7315         }
7316 }
7317
7318 static struct drm_encoder *amdgpu_dm_connector_to_encoder(struct drm_connector *connector)
7319 {
7320         struct drm_encoder *encoder;
7321
7322         /* There is only one encoder per connector */
7323         drm_connector_for_each_possible_encoder(connector, encoder)
7324                 return encoder;
7325
7326         return NULL;
7327 }
7328
7329 static void amdgpu_dm_get_native_mode(struct drm_connector *connector)
7330 {
7331         struct drm_encoder *encoder;
7332         struct amdgpu_encoder *amdgpu_encoder;
7333
7334         encoder = amdgpu_dm_connector_to_encoder(connector);
7335
7336         if (encoder == NULL)
7337                 return;
7338
7339         amdgpu_encoder = to_amdgpu_encoder(encoder);
7340
7341         amdgpu_encoder->native_mode.clock = 0;
7342
7343         if (!list_empty(&connector->probed_modes)) {
7344                 struct drm_display_mode *preferred_mode = NULL;
7345
7346                 list_for_each_entry(preferred_mode,
7347                                     &connector->probed_modes,
7348                                     head) {
7349                         if (preferred_mode->type & DRM_MODE_TYPE_PREFERRED)
7350                                 amdgpu_encoder->native_mode = *preferred_mode;
7351
7352                         break;
7353                 }
7354
7355         }
7356 }
7357
7358 static struct drm_display_mode *
7359 amdgpu_dm_create_common_mode(struct drm_encoder *encoder,
7360                              char *name,
7361                              int hdisplay, int vdisplay)
7362 {
7363         struct drm_device *dev = encoder->dev;
7364         struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
7365         struct drm_display_mode *mode = NULL;
7366         struct drm_display_mode *native_mode = &amdgpu_encoder->native_mode;
7367
7368         mode = drm_mode_duplicate(dev, native_mode);
7369
7370         if (mode == NULL)
7371                 return NULL;
7372
7373         mode->hdisplay = hdisplay;
7374         mode->vdisplay = vdisplay;
7375         mode->type &= ~DRM_MODE_TYPE_PREFERRED;
7376         strscpy(mode->name, name, DRM_DISPLAY_MODE_LEN);
7377
7378         return mode;
7379
7380 }
7381
7382 static void amdgpu_dm_connector_add_common_modes(struct drm_encoder *encoder,
7383                                                  struct drm_connector *connector)
7384 {
7385         struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
7386         struct drm_display_mode *mode = NULL;
7387         struct drm_display_mode *native_mode = &amdgpu_encoder->native_mode;
7388         struct amdgpu_dm_connector *amdgpu_dm_connector =
7389                                 to_amdgpu_dm_connector(connector);
7390         int i;
7391         int n;
7392         struct mode_size {
7393                 char name[DRM_DISPLAY_MODE_LEN];
7394                 int w;
7395                 int h;
7396         } common_modes[] = {
7397                 {  "640x480",  640,  480},
7398                 {  "800x600",  800,  600},
7399                 { "1024x768", 1024,  768},
7400                 { "1280x720", 1280,  720},
7401                 { "1280x800", 1280,  800},
7402                 {"1280x1024", 1280, 1024},
7403                 { "1440x900", 1440,  900},
7404                 {"1680x1050", 1680, 1050},
7405                 {"1600x1200", 1600, 1200},
7406                 {"1920x1080", 1920, 1080},
7407                 {"1920x1200", 1920, 1200}
7408         };
7409
7410         n = ARRAY_SIZE(common_modes);
7411
7412         for (i = 0; i < n; i++) {
7413                 struct drm_display_mode *curmode = NULL;
7414                 bool mode_existed = false;
7415
7416                 if (common_modes[i].w > native_mode->hdisplay ||
7417                     common_modes[i].h > native_mode->vdisplay ||
7418                    (common_modes[i].w == native_mode->hdisplay &&
7419                     common_modes[i].h == native_mode->vdisplay))
7420                         continue;
7421
7422                 list_for_each_entry(curmode, &connector->probed_modes, head) {
7423                         if (common_modes[i].w == curmode->hdisplay &&
7424                             common_modes[i].h == curmode->vdisplay) {
7425                                 mode_existed = true;
7426                                 break;
7427                         }
7428                 }
7429
7430                 if (mode_existed)
7431                         continue;
7432
7433                 mode = amdgpu_dm_create_common_mode(encoder,
7434                                 common_modes[i].name, common_modes[i].w,
7435                                 common_modes[i].h);
7436                 drm_mode_probed_add(connector, mode);
7437                 amdgpu_dm_connector->num_modes++;
7438         }
7439 }
7440
7441 static void amdgpu_dm_connector_ddc_get_modes(struct drm_connector *connector,
7442                                               struct edid *edid)
7443 {
7444         struct amdgpu_dm_connector *amdgpu_dm_connector =
7445                         to_amdgpu_dm_connector(connector);
7446
7447         if (edid) {
7448                 /* empty probed_modes */
7449                 INIT_LIST_HEAD(&connector->probed_modes);
7450                 amdgpu_dm_connector->num_modes =
7451                                 drm_add_edid_modes(connector, edid);
7452
7453                 /* sorting the probed modes before calling function
7454                  * amdgpu_dm_get_native_mode() since EDID can have
7455                  * more than one preferred mode. The modes that are
7456                  * later in the probed mode list could be of higher
7457                  * and preferred resolution. For example, 3840x2160
7458                  * resolution in base EDID preferred timing and 4096x2160
7459                  * preferred resolution in DID extension block later.
7460                  */
7461                 drm_mode_sort(&connector->probed_modes);
7462                 amdgpu_dm_get_native_mode(connector);
7463
7464                 /* Freesync capabilities are reset by calling
7465                  * drm_add_edid_modes() and need to be
7466                  * restored here.
7467                  */
7468                 amdgpu_dm_update_freesync_caps(connector, edid);
7469         } else {
7470                 amdgpu_dm_connector->num_modes = 0;
7471         }
7472 }
7473
7474 static bool is_duplicate_mode(struct amdgpu_dm_connector *aconnector,
7475                               struct drm_display_mode *mode)
7476 {
7477         struct drm_display_mode *m;
7478
7479         list_for_each_entry (m, &aconnector->base.probed_modes, head) {
7480                 if (drm_mode_equal(m, mode))
7481                         return true;
7482         }
7483
7484         return false;
7485 }
7486
7487 static uint add_fs_modes(struct amdgpu_dm_connector *aconnector)
7488 {
7489         const struct drm_display_mode *m;
7490         struct drm_display_mode *new_mode;
7491         uint i;
7492         uint32_t new_modes_count = 0;
7493
7494         /* Standard FPS values
7495          *
7496          * 23.976   - TV/NTSC
7497          * 24       - Cinema
7498          * 25       - TV/PAL
7499          * 29.97    - TV/NTSC
7500          * 30       - TV/NTSC
7501          * 48       - Cinema HFR
7502          * 50       - TV/PAL
7503          * 60       - Commonly used
7504          * 48,72,96 - Multiples of 24
7505          */
7506         const uint32_t common_rates[] = { 23976, 24000, 25000, 29970, 30000,
7507                                          48000, 50000, 60000, 72000, 96000 };
7508
7509         /*
7510          * Find mode with highest refresh rate with the same resolution
7511          * as the preferred mode. Some monitors report a preferred mode
7512          * with lower resolution than the highest refresh rate supported.
7513          */
7514
7515         m = get_highest_refresh_rate_mode(aconnector, true);
7516         if (!m)
7517                 return 0;
7518
7519         for (i = 0; i < ARRAY_SIZE(common_rates); i++) {
7520                 uint64_t target_vtotal, target_vtotal_diff;
7521                 uint64_t num, den;
7522
7523                 if (drm_mode_vrefresh(m) * 1000 < common_rates[i])
7524                         continue;
7525
7526                 if (common_rates[i] < aconnector->min_vfreq * 1000 ||
7527                     common_rates[i] > aconnector->max_vfreq * 1000)
7528                         continue;
7529
7530                 num = (unsigned long long)m->clock * 1000 * 1000;
7531                 den = common_rates[i] * (unsigned long long)m->htotal;
7532                 target_vtotal = div_u64(num, den);
7533                 target_vtotal_diff = target_vtotal - m->vtotal;
7534
7535                 /* Check for illegal modes */
7536                 if (m->vsync_start + target_vtotal_diff < m->vdisplay ||
7537                     m->vsync_end + target_vtotal_diff < m->vsync_start ||
7538                     m->vtotal + target_vtotal_diff < m->vsync_end)
7539                         continue;
7540
7541                 new_mode = drm_mode_duplicate(aconnector->base.dev, m);
7542                 if (!new_mode)
7543                         goto out;
7544
7545                 new_mode->vtotal += (u16)target_vtotal_diff;
7546                 new_mode->vsync_start += (u16)target_vtotal_diff;
7547                 new_mode->vsync_end += (u16)target_vtotal_diff;
7548                 new_mode->type &= ~DRM_MODE_TYPE_PREFERRED;
7549                 new_mode->type |= DRM_MODE_TYPE_DRIVER;
7550
7551                 if (!is_duplicate_mode(aconnector, new_mode)) {
7552                         drm_mode_probed_add(&aconnector->base, new_mode);
7553                         new_modes_count += 1;
7554                 } else
7555                         drm_mode_destroy(aconnector->base.dev, new_mode);
7556         }
7557  out:
7558         return new_modes_count;
7559 }
7560
7561 static void amdgpu_dm_connector_add_freesync_modes(struct drm_connector *connector,
7562                                                    struct edid *edid)
7563 {
7564         struct amdgpu_dm_connector *amdgpu_dm_connector =
7565                 to_amdgpu_dm_connector(connector);
7566
7567         if (!(amdgpu_freesync_vid_mode && edid))
7568                 return;
7569
7570         if (amdgpu_dm_connector->max_vfreq - amdgpu_dm_connector->min_vfreq > 10)
7571                 amdgpu_dm_connector->num_modes +=
7572                         add_fs_modes(amdgpu_dm_connector);
7573 }
7574
7575 static int amdgpu_dm_connector_get_modes(struct drm_connector *connector)
7576 {
7577         struct amdgpu_dm_connector *amdgpu_dm_connector =
7578                         to_amdgpu_dm_connector(connector);
7579         struct drm_encoder *encoder;
7580         struct edid *edid = amdgpu_dm_connector->edid;
7581
7582         encoder = amdgpu_dm_connector_to_encoder(connector);
7583
7584         if (!drm_edid_is_valid(edid)) {
7585                 amdgpu_dm_connector->num_modes =
7586                                 drm_add_modes_noedid(connector, 640, 480);
7587         } else {
7588                 amdgpu_dm_connector_ddc_get_modes(connector, edid);
7589                 amdgpu_dm_connector_add_common_modes(encoder, connector);
7590                 amdgpu_dm_connector_add_freesync_modes(connector, edid);
7591         }
7592         amdgpu_dm_fbc_init(connector);
7593
7594         return amdgpu_dm_connector->num_modes;
7595 }
7596
7597 void amdgpu_dm_connector_init_helper(struct amdgpu_display_manager *dm,
7598                                      struct amdgpu_dm_connector *aconnector,
7599                                      int connector_type,
7600                                      struct dc_link *link,
7601                                      int link_index)
7602 {
7603         struct amdgpu_device *adev = drm_to_adev(dm->ddev);
7604
7605         /*
7606          * Some of the properties below require access to state, like bpc.
7607          * Allocate some default initial connector state with our reset helper.
7608          */
7609         if (aconnector->base.funcs->reset)
7610                 aconnector->base.funcs->reset(&aconnector->base);
7611
7612         aconnector->connector_id = link_index;
7613         aconnector->dc_link = link;
7614         aconnector->base.interlace_allowed = false;
7615         aconnector->base.doublescan_allowed = false;
7616         aconnector->base.stereo_allowed = false;
7617         aconnector->base.dpms = DRM_MODE_DPMS_OFF;
7618         aconnector->hpd.hpd = AMDGPU_HPD_NONE; /* not used */
7619         aconnector->audio_inst = -1;
7620         mutex_init(&aconnector->hpd_lock);
7621
7622         /*
7623          * configure support HPD hot plug connector_>polled default value is 0
7624          * which means HPD hot plug not supported
7625          */
7626         switch (connector_type) {
7627         case DRM_MODE_CONNECTOR_HDMIA:
7628                 aconnector->base.polled = DRM_CONNECTOR_POLL_HPD;
7629                 aconnector->base.ycbcr_420_allowed =
7630                         link->link_enc->features.hdmi_ycbcr420_supported ? true : false;
7631                 break;
7632         case DRM_MODE_CONNECTOR_DisplayPort:
7633                 aconnector->base.polled = DRM_CONNECTOR_POLL_HPD;
7634                 aconnector->base.ycbcr_420_allowed =
7635                         link->link_enc->features.dp_ycbcr420_supported ? true : false;
7636                 break;
7637         case DRM_MODE_CONNECTOR_DVID:
7638                 aconnector->base.polled = DRM_CONNECTOR_POLL_HPD;
7639                 break;
7640         default:
7641                 break;
7642         }
7643
7644         drm_object_attach_property(&aconnector->base.base,
7645                                 dm->ddev->mode_config.scaling_mode_property,
7646                                 DRM_MODE_SCALE_NONE);
7647
7648         drm_object_attach_property(&aconnector->base.base,
7649                                 adev->mode_info.underscan_property,
7650                                 UNDERSCAN_OFF);
7651         drm_object_attach_property(&aconnector->base.base,
7652                                 adev->mode_info.underscan_hborder_property,
7653                                 0);
7654         drm_object_attach_property(&aconnector->base.base,
7655                                 adev->mode_info.underscan_vborder_property,
7656                                 0);
7657
7658         if (!aconnector->mst_port)
7659                 drm_connector_attach_max_bpc_property(&aconnector->base, 8, 16);
7660
7661         /* This defaults to the max in the range, but we want 8bpc for non-edp. */
7662         aconnector->base.state->max_bpc = (connector_type == DRM_MODE_CONNECTOR_eDP) ? 16 : 8;
7663         aconnector->base.state->max_requested_bpc = aconnector->base.state->max_bpc;
7664
7665         if (connector_type == DRM_MODE_CONNECTOR_eDP &&
7666             (dc_is_dmcu_initialized(adev->dm.dc) || adev->dm.dc->ctx->dmub_srv)) {
7667                 drm_object_attach_property(&aconnector->base.base,
7668                                 adev->mode_info.abm_level_property, 0);
7669         }
7670
7671         if (connector_type == DRM_MODE_CONNECTOR_HDMIA ||
7672             connector_type == DRM_MODE_CONNECTOR_DisplayPort ||
7673             connector_type == DRM_MODE_CONNECTOR_eDP) {
7674                 drm_connector_attach_hdr_output_metadata_property(&aconnector->base);
7675
7676                 if (!aconnector->mst_port)
7677                         drm_connector_attach_vrr_capable_property(&aconnector->base);
7678
7679 #ifdef CONFIG_DRM_AMD_DC_HDCP
7680                 if (adev->dm.hdcp_workqueue)
7681                         drm_connector_attach_content_protection_property(&aconnector->base, true);
7682 #endif
7683         }
7684 }
7685
7686 static int amdgpu_dm_i2c_xfer(struct i2c_adapter *i2c_adap,
7687                               struct i2c_msg *msgs, int num)
7688 {
7689         struct amdgpu_i2c_adapter *i2c = i2c_get_adapdata(i2c_adap);
7690         struct ddc_service *ddc_service = i2c->ddc_service;
7691         struct i2c_command cmd;
7692         int i;
7693         int result = -EIO;
7694
7695         cmd.payloads = kcalloc(num, sizeof(struct i2c_payload), GFP_KERNEL);
7696
7697         if (!cmd.payloads)
7698                 return result;
7699
7700         cmd.number_of_payloads = num;
7701         cmd.engine = I2C_COMMAND_ENGINE_DEFAULT;
7702         cmd.speed = 100;
7703
7704         for (i = 0; i < num; i++) {
7705                 cmd.payloads[i].write = !(msgs[i].flags & I2C_M_RD);
7706                 cmd.payloads[i].address = msgs[i].addr;
7707                 cmd.payloads[i].length = msgs[i].len;
7708                 cmd.payloads[i].data = msgs[i].buf;
7709         }
7710
7711         if (dc_submit_i2c(
7712                         ddc_service->ctx->dc,
7713                         ddc_service->ddc_pin->hw_info.ddc_channel,
7714                         &cmd))
7715                 result = num;
7716
7717         kfree(cmd.payloads);
7718         return result;
7719 }
7720
7721 static u32 amdgpu_dm_i2c_func(struct i2c_adapter *adap)
7722 {
7723         return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL;
7724 }
7725
7726 static const struct i2c_algorithm amdgpu_dm_i2c_algo = {
7727         .master_xfer = amdgpu_dm_i2c_xfer,
7728         .functionality = amdgpu_dm_i2c_func,
7729 };
7730
7731 static struct amdgpu_i2c_adapter *
7732 create_i2c(struct ddc_service *ddc_service,
7733            int link_index,
7734            int *res)
7735 {
7736         struct amdgpu_device *adev = ddc_service->ctx->driver_context;
7737         struct amdgpu_i2c_adapter *i2c;
7738
7739         i2c = kzalloc(sizeof(struct amdgpu_i2c_adapter), GFP_KERNEL);
7740         if (!i2c)
7741                 return NULL;
7742         i2c->base.owner = THIS_MODULE;
7743         i2c->base.class = I2C_CLASS_DDC;
7744         i2c->base.dev.parent = &adev->pdev->dev;
7745         i2c->base.algo = &amdgpu_dm_i2c_algo;
7746         snprintf(i2c->base.name, sizeof(i2c->base.name), "AMDGPU DM i2c hw bus %d", link_index);
7747         i2c_set_adapdata(&i2c->base, i2c);
7748         i2c->ddc_service = ddc_service;
7749         i2c->ddc_service->ddc_pin->hw_info.ddc_channel = link_index;
7750
7751         return i2c;
7752 }
7753
7754
7755 /*
7756  * Note: this function assumes that dc_link_detect() was called for the
7757  * dc_link which will be represented by this aconnector.
7758  */
7759 static int amdgpu_dm_connector_init(struct amdgpu_display_manager *dm,
7760                                     struct amdgpu_dm_connector *aconnector,
7761                                     uint32_t link_index,
7762                                     struct amdgpu_encoder *aencoder)
7763 {
7764         int res = 0;
7765         int connector_type;
7766         struct dc *dc = dm->dc;
7767         struct dc_link *link = dc_get_link_at_index(dc, link_index);
7768         struct amdgpu_i2c_adapter *i2c;
7769
7770         link->priv = aconnector;
7771
7772         DRM_DEBUG_DRIVER("%s()\n", __func__);
7773
7774         i2c = create_i2c(link->ddc, link->link_index, &res);
7775         if (!i2c) {
7776                 DRM_ERROR("Failed to create i2c adapter data\n");
7777                 return -ENOMEM;
7778         }
7779
7780         aconnector->i2c = i2c;
7781         res = i2c_add_adapter(&i2c->base);
7782
7783         if (res) {
7784                 DRM_ERROR("Failed to register hw i2c %d\n", link->link_index);
7785                 goto out_free;
7786         }
7787
7788         connector_type = to_drm_connector_type(link->connector_signal);
7789
7790         res = drm_connector_init_with_ddc(
7791                         dm->ddev,
7792                         &aconnector->base,
7793                         &amdgpu_dm_connector_funcs,
7794                         connector_type,
7795                         &i2c->base);
7796
7797         if (res) {
7798                 DRM_ERROR("connector_init failed\n");
7799                 aconnector->connector_id = -1;
7800                 goto out_free;
7801         }
7802
7803         drm_connector_helper_add(
7804                         &aconnector->base,
7805                         &amdgpu_dm_connector_helper_funcs);
7806
7807         amdgpu_dm_connector_init_helper(
7808                 dm,
7809                 aconnector,
7810                 connector_type,
7811                 link,
7812                 link_index);
7813
7814         drm_connector_attach_encoder(
7815                 &aconnector->base, &aencoder->base);
7816
7817         if (connector_type == DRM_MODE_CONNECTOR_DisplayPort
7818                 || connector_type == DRM_MODE_CONNECTOR_eDP)
7819                 amdgpu_dm_initialize_dp_connector(dm, aconnector, link->link_index);
7820
7821 out_free:
7822         if (res) {
7823                 kfree(i2c);
7824                 aconnector->i2c = NULL;
7825         }
7826         return res;
7827 }
7828
7829 int amdgpu_dm_get_encoder_crtc_mask(struct amdgpu_device *adev)
7830 {
7831         switch (adev->mode_info.num_crtc) {
7832         case 1:
7833                 return 0x1;
7834         case 2:
7835                 return 0x3;
7836         case 3:
7837                 return 0x7;
7838         case 4:
7839                 return 0xf;
7840         case 5:
7841                 return 0x1f;
7842         case 6:
7843         default:
7844                 return 0x3f;
7845         }
7846 }
7847
7848 static int amdgpu_dm_encoder_init(struct drm_device *dev,
7849                                   struct amdgpu_encoder *aencoder,
7850                                   uint32_t link_index)
7851 {
7852         struct amdgpu_device *adev = drm_to_adev(dev);
7853
7854         int res = drm_encoder_init(dev,
7855                                    &aencoder->base,
7856                                    &amdgpu_dm_encoder_funcs,
7857                                    DRM_MODE_ENCODER_TMDS,
7858                                    NULL);
7859
7860         aencoder->base.possible_crtcs = amdgpu_dm_get_encoder_crtc_mask(adev);
7861
7862         if (!res)
7863                 aencoder->encoder_id = link_index;
7864         else
7865                 aencoder->encoder_id = -1;
7866
7867         drm_encoder_helper_add(&aencoder->base, &amdgpu_dm_encoder_helper_funcs);
7868
7869         return res;
7870 }
7871
7872 static void manage_dm_interrupts(struct amdgpu_device *adev,
7873                                  struct amdgpu_crtc *acrtc,
7874                                  bool enable)
7875 {
7876         /*
7877          * We have no guarantee that the frontend index maps to the same
7878          * backend index - some even map to more than one.
7879          *
7880          * TODO: Use a different interrupt or check DC itself for the mapping.
7881          */
7882         int irq_type =
7883                 amdgpu_display_crtc_idx_to_irq_type(
7884                         adev,
7885                         acrtc->crtc_id);
7886
7887         if (enable) {
7888                 drm_crtc_vblank_on(&acrtc->base);
7889                 amdgpu_irq_get(
7890                         adev,
7891                         &adev->pageflip_irq,
7892                         irq_type);
7893 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
7894                 amdgpu_irq_get(
7895                         adev,
7896                         &adev->vline0_irq,
7897                         irq_type);
7898 #endif
7899         } else {
7900 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
7901                 amdgpu_irq_put(
7902                         adev,
7903                         &adev->vline0_irq,
7904                         irq_type);
7905 #endif
7906                 amdgpu_irq_put(
7907                         adev,
7908                         &adev->pageflip_irq,
7909                         irq_type);
7910                 drm_crtc_vblank_off(&acrtc->base);
7911         }
7912 }
7913
7914 static void dm_update_pflip_irq_state(struct amdgpu_device *adev,
7915                                       struct amdgpu_crtc *acrtc)
7916 {
7917         int irq_type =
7918                 amdgpu_display_crtc_idx_to_irq_type(adev, acrtc->crtc_id);
7919
7920         /**
7921          * This reads the current state for the IRQ and force reapplies
7922          * the setting to hardware.
7923          */
7924         amdgpu_irq_update(adev, &adev->pageflip_irq, irq_type);
7925 }
7926
7927 static bool
7928 is_scaling_state_different(const struct dm_connector_state *dm_state,
7929                            const struct dm_connector_state *old_dm_state)
7930 {
7931         if (dm_state->scaling != old_dm_state->scaling)
7932                 return true;
7933         if (!dm_state->underscan_enable && old_dm_state->underscan_enable) {
7934                 if (old_dm_state->underscan_hborder != 0 && old_dm_state->underscan_vborder != 0)
7935                         return true;
7936         } else  if (dm_state->underscan_enable && !old_dm_state->underscan_enable) {
7937                 if (dm_state->underscan_hborder != 0 && dm_state->underscan_vborder != 0)
7938                         return true;
7939         } else if (dm_state->underscan_hborder != old_dm_state->underscan_hborder ||
7940                    dm_state->underscan_vborder != old_dm_state->underscan_vborder)
7941                 return true;
7942         return false;
7943 }
7944
7945 #ifdef CONFIG_DRM_AMD_DC_HDCP
7946 static bool is_content_protection_different(struct drm_connector_state *state,
7947                                             const struct drm_connector_state *old_state,
7948                                             const struct drm_connector *connector, struct hdcp_workqueue *hdcp_w)
7949 {
7950         struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
7951         struct dm_connector_state *dm_con_state = to_dm_connector_state(connector->state);
7952
7953         /* Handle: Type0/1 change */
7954         if (old_state->hdcp_content_type != state->hdcp_content_type &&
7955             state->content_protection != DRM_MODE_CONTENT_PROTECTION_UNDESIRED) {
7956                 state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED;
7957                 return true;
7958         }
7959
7960         /* CP is being re enabled, ignore this
7961          *
7962          * Handles:     ENABLED -> DESIRED
7963          */
7964         if (old_state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED &&
7965             state->content_protection == DRM_MODE_CONTENT_PROTECTION_DESIRED) {
7966                 state->content_protection = DRM_MODE_CONTENT_PROTECTION_ENABLED;
7967                 return false;
7968         }
7969
7970         /* S3 resume case, since old state will always be 0 (UNDESIRED) and the restored state will be ENABLED
7971          *
7972          * Handles:     UNDESIRED -> ENABLED
7973          */
7974         if (old_state->content_protection == DRM_MODE_CONTENT_PROTECTION_UNDESIRED &&
7975             state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED)
7976                 state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED;
7977
7978         /* Check if something is connected/enabled, otherwise we start hdcp but nothing is connected/enabled
7979          * hot-plug, headless s3, dpms
7980          *
7981          * Handles:     DESIRED -> DESIRED (Special case)
7982          */
7983         if (dm_con_state->update_hdcp && state->content_protection == DRM_MODE_CONTENT_PROTECTION_DESIRED &&
7984             connector->dpms == DRM_MODE_DPMS_ON && aconnector->dc_sink != NULL) {
7985                 dm_con_state->update_hdcp = false;
7986                 return true;
7987         }
7988
7989         /*
7990          * Handles:     UNDESIRED -> UNDESIRED
7991          *              DESIRED -> DESIRED
7992          *              ENABLED -> ENABLED
7993          */
7994         if (old_state->content_protection == state->content_protection)
7995                 return false;
7996
7997         /*
7998          * Handles:     UNDESIRED -> DESIRED
7999          *              DESIRED -> UNDESIRED
8000          *              ENABLED -> UNDESIRED
8001          */
8002         if (state->content_protection != DRM_MODE_CONTENT_PROTECTION_ENABLED)
8003                 return true;
8004
8005         /*
8006          * Handles:     DESIRED -> ENABLED
8007          */
8008         return false;
8009 }
8010
8011 #endif
8012 static void remove_stream(struct amdgpu_device *adev,
8013                           struct amdgpu_crtc *acrtc,
8014                           struct dc_stream_state *stream)
8015 {
8016         /* this is the update mode case */
8017
8018         acrtc->otg_inst = -1;
8019         acrtc->enabled = false;
8020 }
8021
8022 static int get_cursor_position(struct drm_plane *plane, struct drm_crtc *crtc,
8023                                struct dc_cursor_position *position)
8024 {
8025         struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
8026         int x, y;
8027         int xorigin = 0, yorigin = 0;
8028
8029         if (!crtc || !plane->state->fb)
8030                 return 0;
8031
8032         if ((plane->state->crtc_w > amdgpu_crtc->max_cursor_width) ||
8033             (plane->state->crtc_h > amdgpu_crtc->max_cursor_height)) {
8034                 DRM_ERROR("%s: bad cursor width or height %d x %d\n",
8035                           __func__,
8036                           plane->state->crtc_w,
8037                           plane->state->crtc_h);
8038                 return -EINVAL;
8039         }
8040
8041         x = plane->state->crtc_x;
8042         y = plane->state->crtc_y;
8043
8044         if (x <= -amdgpu_crtc->max_cursor_width ||
8045             y <= -amdgpu_crtc->max_cursor_height)
8046                 return 0;
8047
8048         if (x < 0) {
8049                 xorigin = min(-x, amdgpu_crtc->max_cursor_width - 1);
8050                 x = 0;
8051         }
8052         if (y < 0) {
8053                 yorigin = min(-y, amdgpu_crtc->max_cursor_height - 1);
8054                 y = 0;
8055         }
8056         position->enable = true;
8057         position->translate_by_source = true;
8058         position->x = x;
8059         position->y = y;
8060         position->x_hotspot = xorigin;
8061         position->y_hotspot = yorigin;
8062
8063         return 0;
8064 }
8065
8066 static void handle_cursor_update(struct drm_plane *plane,
8067                                  struct drm_plane_state *old_plane_state)
8068 {
8069         struct amdgpu_device *adev = drm_to_adev(plane->dev);
8070         struct amdgpu_framebuffer *afb = to_amdgpu_framebuffer(plane->state->fb);
8071         struct drm_crtc *crtc = afb ? plane->state->crtc : old_plane_state->crtc;
8072         struct dm_crtc_state *crtc_state = crtc ? to_dm_crtc_state(crtc->state) : NULL;
8073         struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
8074         uint64_t address = afb ? afb->address : 0;
8075         struct dc_cursor_position position = {0};
8076         struct dc_cursor_attributes attributes;
8077         int ret;
8078
8079         if (!plane->state->fb && !old_plane_state->fb)
8080                 return;
8081
8082         DC_LOG_CURSOR("%s: crtc_id=%d with size %d to %d\n",
8083                       __func__,
8084                       amdgpu_crtc->crtc_id,
8085                       plane->state->crtc_w,
8086                       plane->state->crtc_h);
8087
8088         ret = get_cursor_position(plane, crtc, &position);
8089         if (ret)
8090                 return;
8091
8092         if (!position.enable) {
8093                 /* turn off cursor */
8094                 if (crtc_state && crtc_state->stream) {
8095                         mutex_lock(&adev->dm.dc_lock);
8096                         dc_stream_set_cursor_position(crtc_state->stream,
8097                                                       &position);
8098                         mutex_unlock(&adev->dm.dc_lock);
8099                 }
8100                 return;
8101         }
8102
8103         amdgpu_crtc->cursor_width = plane->state->crtc_w;
8104         amdgpu_crtc->cursor_height = plane->state->crtc_h;
8105
8106         memset(&attributes, 0, sizeof(attributes));
8107         attributes.address.high_part = upper_32_bits(address);
8108         attributes.address.low_part  = lower_32_bits(address);
8109         attributes.width             = plane->state->crtc_w;
8110         attributes.height            = plane->state->crtc_h;
8111         attributes.color_format      = CURSOR_MODE_COLOR_PRE_MULTIPLIED_ALPHA;
8112         attributes.rotation_angle    = 0;
8113         attributes.attribute_flags.value = 0;
8114
8115         attributes.pitch = afb->base.pitches[0] / afb->base.format->cpp[0];
8116
8117         if (crtc_state->stream) {
8118                 mutex_lock(&adev->dm.dc_lock);
8119                 if (!dc_stream_set_cursor_attributes(crtc_state->stream,
8120                                                          &attributes))
8121                         DRM_ERROR("DC failed to set cursor attributes\n");
8122
8123                 if (!dc_stream_set_cursor_position(crtc_state->stream,
8124                                                    &position))
8125                         DRM_ERROR("DC failed to set cursor position\n");
8126                 mutex_unlock(&adev->dm.dc_lock);
8127         }
8128 }
8129
8130 static void prepare_flip_isr(struct amdgpu_crtc *acrtc)
8131 {
8132
8133         assert_spin_locked(&acrtc->base.dev->event_lock);
8134         WARN_ON(acrtc->event);
8135
8136         acrtc->event = acrtc->base.state->event;
8137
8138         /* Set the flip status */
8139         acrtc->pflip_status = AMDGPU_FLIP_SUBMITTED;
8140
8141         /* Mark this event as consumed */
8142         acrtc->base.state->event = NULL;
8143
8144         DC_LOG_PFLIP("crtc:%d, pflip_stat:AMDGPU_FLIP_SUBMITTED\n",
8145                      acrtc->crtc_id);
8146 }
8147
8148 static void update_freesync_state_on_stream(
8149         struct amdgpu_display_manager *dm,
8150         struct dm_crtc_state *new_crtc_state,
8151         struct dc_stream_state *new_stream,
8152         struct dc_plane_state *surface,
8153         u32 flip_timestamp_in_us)
8154 {
8155         struct mod_vrr_params vrr_params;
8156         struct dc_info_packet vrr_infopacket = {0};
8157         struct amdgpu_device *adev = dm->adev;
8158         struct amdgpu_crtc *acrtc = to_amdgpu_crtc(new_crtc_state->base.crtc);
8159         unsigned long flags;
8160         bool pack_sdp_v1_3 = false;
8161
8162         if (!new_stream)
8163                 return;
8164
8165         /*
8166          * TODO: Determine why min/max totals and vrefresh can be 0 here.
8167          * For now it's sufficient to just guard against these conditions.
8168          */
8169
8170         if (!new_stream->timing.h_total || !new_stream->timing.v_total)
8171                 return;
8172
8173         spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
8174         vrr_params = acrtc->dm_irq_params.vrr_params;
8175
8176         if (surface) {
8177                 mod_freesync_handle_preflip(
8178                         dm->freesync_module,
8179                         surface,
8180                         new_stream,
8181                         flip_timestamp_in_us,
8182                         &vrr_params);
8183
8184                 if (adev->family < AMDGPU_FAMILY_AI &&
8185                     amdgpu_dm_vrr_active(new_crtc_state)) {
8186                         mod_freesync_handle_v_update(dm->freesync_module,
8187                                                      new_stream, &vrr_params);
8188
8189                         /* Need to call this before the frame ends. */
8190                         dc_stream_adjust_vmin_vmax(dm->dc,
8191                                                    new_crtc_state->stream,
8192                                                    &vrr_params.adjust);
8193                 }
8194         }
8195
8196         mod_freesync_build_vrr_infopacket(
8197                 dm->freesync_module,
8198                 new_stream,
8199                 &vrr_params,
8200                 PACKET_TYPE_VRR,
8201                 TRANSFER_FUNC_UNKNOWN,
8202                 &vrr_infopacket,
8203                 pack_sdp_v1_3);
8204
8205         new_crtc_state->freesync_timing_changed |=
8206                 (memcmp(&acrtc->dm_irq_params.vrr_params.adjust,
8207                         &vrr_params.adjust,
8208                         sizeof(vrr_params.adjust)) != 0);
8209
8210         new_crtc_state->freesync_vrr_info_changed |=
8211                 (memcmp(&new_crtc_state->vrr_infopacket,
8212                         &vrr_infopacket,
8213                         sizeof(vrr_infopacket)) != 0);
8214
8215         acrtc->dm_irq_params.vrr_params = vrr_params;
8216         new_crtc_state->vrr_infopacket = vrr_infopacket;
8217
8218         new_stream->adjust = acrtc->dm_irq_params.vrr_params.adjust;
8219         new_stream->vrr_infopacket = vrr_infopacket;
8220
8221         if (new_crtc_state->freesync_vrr_info_changed)
8222                 DRM_DEBUG_KMS("VRR packet update: crtc=%u enabled=%d state=%d",
8223                               new_crtc_state->base.crtc->base.id,
8224                               (int)new_crtc_state->base.vrr_enabled,
8225                               (int)vrr_params.state);
8226
8227         spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
8228 }
8229
8230 static void update_stream_irq_parameters(
8231         struct amdgpu_display_manager *dm,
8232         struct dm_crtc_state *new_crtc_state)
8233 {
8234         struct dc_stream_state *new_stream = new_crtc_state->stream;
8235         struct mod_vrr_params vrr_params;
8236         struct mod_freesync_config config = new_crtc_state->freesync_config;
8237         struct amdgpu_device *adev = dm->adev;
8238         struct amdgpu_crtc *acrtc = to_amdgpu_crtc(new_crtc_state->base.crtc);
8239         unsigned long flags;
8240
8241         if (!new_stream)
8242                 return;
8243
8244         /*
8245          * TODO: Determine why min/max totals and vrefresh can be 0 here.
8246          * For now it's sufficient to just guard against these conditions.
8247          */
8248         if (!new_stream->timing.h_total || !new_stream->timing.v_total)
8249                 return;
8250
8251         spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
8252         vrr_params = acrtc->dm_irq_params.vrr_params;
8253
8254         if (new_crtc_state->vrr_supported &&
8255             config.min_refresh_in_uhz &&
8256             config.max_refresh_in_uhz) {
8257                 /*
8258                  * if freesync compatible mode was set, config.state will be set
8259                  * in atomic check
8260                  */
8261                 if (config.state == VRR_STATE_ACTIVE_FIXED && config.fixed_refresh_in_uhz &&
8262                     (!drm_atomic_crtc_needs_modeset(&new_crtc_state->base) ||
8263                      new_crtc_state->freesync_config.state == VRR_STATE_ACTIVE_FIXED)) {
8264                         vrr_params.max_refresh_in_uhz = config.max_refresh_in_uhz;
8265                         vrr_params.min_refresh_in_uhz = config.min_refresh_in_uhz;
8266                         vrr_params.fixed_refresh_in_uhz = config.fixed_refresh_in_uhz;
8267                         vrr_params.state = VRR_STATE_ACTIVE_FIXED;
8268                 } else {
8269                         config.state = new_crtc_state->base.vrr_enabled ?
8270                                                      VRR_STATE_ACTIVE_VARIABLE :
8271                                                      VRR_STATE_INACTIVE;
8272                 }
8273         } else {
8274                 config.state = VRR_STATE_UNSUPPORTED;
8275         }
8276
8277         mod_freesync_build_vrr_params(dm->freesync_module,
8278                                       new_stream,
8279                                       &config, &vrr_params);
8280
8281         new_crtc_state->freesync_timing_changed |=
8282                 (memcmp(&acrtc->dm_irq_params.vrr_params.adjust,
8283                         &vrr_params.adjust, sizeof(vrr_params.adjust)) != 0);
8284
8285         new_crtc_state->freesync_config = config;
8286         /* Copy state for access from DM IRQ handler */
8287         acrtc->dm_irq_params.freesync_config = config;
8288         acrtc->dm_irq_params.active_planes = new_crtc_state->active_planes;
8289         acrtc->dm_irq_params.vrr_params = vrr_params;
8290         spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
8291 }
8292
8293 static void amdgpu_dm_handle_vrr_transition(struct dm_crtc_state *old_state,
8294                                             struct dm_crtc_state *new_state)
8295 {
8296         bool old_vrr_active = amdgpu_dm_vrr_active(old_state);
8297         bool new_vrr_active = amdgpu_dm_vrr_active(new_state);
8298
8299         if (!old_vrr_active && new_vrr_active) {
8300                 /* Transition VRR inactive -> active:
8301                  * While VRR is active, we must not disable vblank irq, as a
8302                  * reenable after disable would compute bogus vblank/pflip
8303                  * timestamps if it likely happened inside display front-porch.
8304                  *
8305                  * We also need vupdate irq for the actual core vblank handling
8306                  * at end of vblank.
8307                  */
8308                 dm_set_vupdate_irq(new_state->base.crtc, true);
8309                 drm_crtc_vblank_get(new_state->base.crtc);
8310                 DRM_DEBUG_DRIVER("%s: crtc=%u VRR off->on: Get vblank ref\n",
8311                                  __func__, new_state->base.crtc->base.id);
8312         } else if (old_vrr_active && !new_vrr_active) {
8313                 /* Transition VRR active -> inactive:
8314                  * Allow vblank irq disable again for fixed refresh rate.
8315                  */
8316                 dm_set_vupdate_irq(new_state->base.crtc, false);
8317                 drm_crtc_vblank_put(new_state->base.crtc);
8318                 DRM_DEBUG_DRIVER("%s: crtc=%u VRR on->off: Drop vblank ref\n",
8319                                  __func__, new_state->base.crtc->base.id);
8320         }
8321 }
8322
8323 static void amdgpu_dm_commit_cursors(struct drm_atomic_state *state)
8324 {
8325         struct drm_plane *plane;
8326         struct drm_plane_state *old_plane_state;
8327         int i;
8328
8329         /*
8330          * TODO: Make this per-stream so we don't issue redundant updates for
8331          * commits with multiple streams.
8332          */
8333         for_each_old_plane_in_state(state, plane, old_plane_state, i)
8334                 if (plane->type == DRM_PLANE_TYPE_CURSOR)
8335                         handle_cursor_update(plane, old_plane_state);
8336 }
8337
8338 static void amdgpu_dm_commit_planes(struct drm_atomic_state *state,
8339                                     struct dc_state *dc_state,
8340                                     struct drm_device *dev,
8341                                     struct amdgpu_display_manager *dm,
8342                                     struct drm_crtc *pcrtc,
8343                                     bool wait_for_vblank)
8344 {
8345         uint32_t i;
8346         uint64_t timestamp_ns;
8347         struct drm_plane *plane;
8348         struct drm_plane_state *old_plane_state, *new_plane_state;
8349         struct amdgpu_crtc *acrtc_attach = to_amdgpu_crtc(pcrtc);
8350         struct drm_crtc_state *new_pcrtc_state =
8351                         drm_atomic_get_new_crtc_state(state, pcrtc);
8352         struct dm_crtc_state *acrtc_state = to_dm_crtc_state(new_pcrtc_state);
8353         struct dm_crtc_state *dm_old_crtc_state =
8354                         to_dm_crtc_state(drm_atomic_get_old_crtc_state(state, pcrtc));
8355         int planes_count = 0, vpos, hpos;
8356         long r;
8357         unsigned long flags;
8358         struct amdgpu_bo *abo;
8359         uint32_t target_vblank, last_flip_vblank;
8360         bool vrr_active = amdgpu_dm_vrr_active(acrtc_state);
8361         bool pflip_present = false;
8362         struct {
8363                 struct dc_surface_update surface_updates[MAX_SURFACES];
8364                 struct dc_plane_info plane_infos[MAX_SURFACES];
8365                 struct dc_scaling_info scaling_infos[MAX_SURFACES];
8366                 struct dc_flip_addrs flip_addrs[MAX_SURFACES];
8367                 struct dc_stream_update stream_update;
8368         } *bundle;
8369
8370         bundle = kzalloc(sizeof(*bundle), GFP_KERNEL);
8371
8372         if (!bundle) {
8373                 dm_error("Failed to allocate update bundle\n");
8374                 goto cleanup;
8375         }
8376
8377         /*
8378          * Disable the cursor first if we're disabling all the planes.
8379          * It'll remain on the screen after the planes are re-enabled
8380          * if we don't.
8381          */
8382         if (acrtc_state->active_planes == 0)
8383                 amdgpu_dm_commit_cursors(state);
8384
8385         /* update planes when needed */
8386         for_each_oldnew_plane_in_state(state, plane, old_plane_state, new_plane_state, i) {
8387                 struct drm_crtc *crtc = new_plane_state->crtc;
8388                 struct drm_crtc_state *new_crtc_state;
8389                 struct drm_framebuffer *fb = new_plane_state->fb;
8390                 struct amdgpu_framebuffer *afb = (struct amdgpu_framebuffer *)fb;
8391                 bool plane_needs_flip;
8392                 struct dc_plane_state *dc_plane;
8393                 struct dm_plane_state *dm_new_plane_state = to_dm_plane_state(new_plane_state);
8394
8395                 /* Cursor plane is handled after stream updates */
8396                 if (plane->type == DRM_PLANE_TYPE_CURSOR)
8397                         continue;
8398
8399                 if (!fb || !crtc || pcrtc != crtc)
8400                         continue;
8401
8402                 new_crtc_state = drm_atomic_get_new_crtc_state(state, crtc);
8403                 if (!new_crtc_state->active)
8404                         continue;
8405
8406                 dc_plane = dm_new_plane_state->dc_state;
8407
8408                 bundle->surface_updates[planes_count].surface = dc_plane;
8409                 if (new_pcrtc_state->color_mgmt_changed) {
8410                         bundle->surface_updates[planes_count].gamma = dc_plane->gamma_correction;
8411                         bundle->surface_updates[planes_count].in_transfer_func = dc_plane->in_transfer_func;
8412                         bundle->surface_updates[planes_count].gamut_remap_matrix = &dc_plane->gamut_remap_matrix;
8413                 }
8414
8415                 fill_dc_scaling_info(new_plane_state,
8416                                      &bundle->scaling_infos[planes_count]);
8417
8418                 bundle->surface_updates[planes_count].scaling_info =
8419                         &bundle->scaling_infos[planes_count];
8420
8421                 plane_needs_flip = old_plane_state->fb && new_plane_state->fb;
8422
8423                 pflip_present = pflip_present || plane_needs_flip;
8424
8425                 if (!plane_needs_flip) {
8426                         planes_count += 1;
8427                         continue;
8428                 }
8429
8430                 abo = gem_to_amdgpu_bo(fb->obj[0]);
8431
8432                 /*
8433                  * Wait for all fences on this FB. Do limited wait to avoid
8434                  * deadlock during GPU reset when this fence will not signal
8435                  * but we hold reservation lock for the BO.
8436                  */
8437                 r = dma_resv_wait_timeout_rcu(abo->tbo.base.resv, true,
8438                                                         false,
8439                                                         msecs_to_jiffies(5000));
8440                 if (unlikely(r <= 0))
8441                         DRM_ERROR("Waiting for fences timed out!");
8442
8443                 fill_dc_plane_info_and_addr(
8444                         dm->adev, new_plane_state,
8445                         afb->tiling_flags,
8446                         &bundle->plane_infos[planes_count],
8447                         &bundle->flip_addrs[planes_count].address,
8448                         afb->tmz_surface, false);
8449
8450                 DRM_DEBUG_ATOMIC("plane: id=%d dcc_en=%d\n",
8451                                  new_plane_state->plane->index,
8452                                  bundle->plane_infos[planes_count].dcc.enable);
8453
8454                 bundle->surface_updates[planes_count].plane_info =
8455                         &bundle->plane_infos[planes_count];
8456
8457                 /*
8458                  * Only allow immediate flips for fast updates that don't
8459                  * change FB pitch, DCC state, rotation or mirroing.
8460                  */
8461                 bundle->flip_addrs[planes_count].flip_immediate =
8462                         crtc->state->async_flip &&
8463                         acrtc_state->update_type == UPDATE_TYPE_FAST;
8464
8465                 timestamp_ns = ktime_get_ns();
8466                 bundle->flip_addrs[planes_count].flip_timestamp_in_us = div_u64(timestamp_ns, 1000);
8467                 bundle->surface_updates[planes_count].flip_addr = &bundle->flip_addrs[planes_count];
8468                 bundle->surface_updates[planes_count].surface = dc_plane;
8469
8470                 if (!bundle->surface_updates[planes_count].surface) {
8471                         DRM_ERROR("No surface for CRTC: id=%d\n",
8472                                         acrtc_attach->crtc_id);
8473                         continue;
8474                 }
8475
8476                 if (plane == pcrtc->primary)
8477                         update_freesync_state_on_stream(
8478                                 dm,
8479                                 acrtc_state,
8480                                 acrtc_state->stream,
8481                                 dc_plane,
8482                                 bundle->flip_addrs[planes_count].flip_timestamp_in_us);
8483
8484                 DRM_DEBUG_ATOMIC("%s Flipping to hi: 0x%x, low: 0x%x\n",
8485                                  __func__,
8486                                  bundle->flip_addrs[planes_count].address.grph.addr.high_part,
8487                                  bundle->flip_addrs[planes_count].address.grph.addr.low_part);
8488
8489                 planes_count += 1;
8490
8491         }
8492
8493         if (pflip_present) {
8494                 if (!vrr_active) {
8495                         /* Use old throttling in non-vrr fixed refresh rate mode
8496                          * to keep flip scheduling based on target vblank counts
8497                          * working in a backwards compatible way, e.g., for
8498                          * clients using the GLX_OML_sync_control extension or
8499                          * DRI3/Present extension with defined target_msc.
8500                          */
8501                         last_flip_vblank = amdgpu_get_vblank_counter_kms(pcrtc);
8502                 }
8503                 else {
8504                         /* For variable refresh rate mode only:
8505                          * Get vblank of last completed flip to avoid > 1 vrr
8506                          * flips per video frame by use of throttling, but allow
8507                          * flip programming anywhere in the possibly large
8508                          * variable vrr vblank interval for fine-grained flip
8509                          * timing control and more opportunity to avoid stutter
8510                          * on late submission of flips.
8511                          */
8512                         spin_lock_irqsave(&pcrtc->dev->event_lock, flags);
8513                         last_flip_vblank = acrtc_attach->dm_irq_params.last_flip_vblank;
8514                         spin_unlock_irqrestore(&pcrtc->dev->event_lock, flags);
8515                 }
8516
8517                 target_vblank = last_flip_vblank + wait_for_vblank;
8518
8519                 /*
8520                  * Wait until we're out of the vertical blank period before the one
8521                  * targeted by the flip
8522                  */
8523                 while ((acrtc_attach->enabled &&
8524                         (amdgpu_display_get_crtc_scanoutpos(dm->ddev, acrtc_attach->crtc_id,
8525                                                             0, &vpos, &hpos, NULL,
8526                                                             NULL, &pcrtc->hwmode)
8527                          & (DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_IN_VBLANK)) ==
8528                         (DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_IN_VBLANK) &&
8529                         (int)(target_vblank -
8530                           amdgpu_get_vblank_counter_kms(pcrtc)) > 0)) {
8531                         usleep_range(1000, 1100);
8532                 }
8533
8534                 /**
8535                  * Prepare the flip event for the pageflip interrupt to handle.
8536                  *
8537                  * This only works in the case where we've already turned on the
8538                  * appropriate hardware blocks (eg. HUBP) so in the transition case
8539                  * from 0 -> n planes we have to skip a hardware generated event
8540                  * and rely on sending it from software.
8541                  */
8542                 if (acrtc_attach->base.state->event &&
8543                     acrtc_state->active_planes > 0) {
8544                         drm_crtc_vblank_get(pcrtc);
8545
8546                         spin_lock_irqsave(&pcrtc->dev->event_lock, flags);
8547
8548                         WARN_ON(acrtc_attach->pflip_status != AMDGPU_FLIP_NONE);
8549                         prepare_flip_isr(acrtc_attach);
8550
8551                         spin_unlock_irqrestore(&pcrtc->dev->event_lock, flags);
8552                 }
8553
8554                 if (acrtc_state->stream) {
8555                         if (acrtc_state->freesync_vrr_info_changed)
8556                                 bundle->stream_update.vrr_infopacket =
8557                                         &acrtc_state->stream->vrr_infopacket;
8558                 }
8559         }
8560
8561         /* Update the planes if changed or disable if we don't have any. */
8562         if ((planes_count || acrtc_state->active_planes == 0) &&
8563                 acrtc_state->stream) {
8564                 bundle->stream_update.stream = acrtc_state->stream;
8565                 if (new_pcrtc_state->mode_changed) {
8566                         bundle->stream_update.src = acrtc_state->stream->src;
8567                         bundle->stream_update.dst = acrtc_state->stream->dst;
8568                 }
8569
8570                 if (new_pcrtc_state->color_mgmt_changed) {
8571                         /*
8572                          * TODO: This isn't fully correct since we've actually
8573                          * already modified the stream in place.
8574                          */
8575                         bundle->stream_update.gamut_remap =
8576                                 &acrtc_state->stream->gamut_remap_matrix;
8577                         bundle->stream_update.output_csc_transform =
8578                                 &acrtc_state->stream->csc_color_matrix;
8579                         bundle->stream_update.out_transfer_func =
8580                                 acrtc_state->stream->out_transfer_func;
8581                 }
8582
8583                 acrtc_state->stream->abm_level = acrtc_state->abm_level;
8584                 if (acrtc_state->abm_level != dm_old_crtc_state->abm_level)
8585                         bundle->stream_update.abm_level = &acrtc_state->abm_level;
8586
8587                 /*
8588                  * If FreeSync state on the stream has changed then we need to
8589                  * re-adjust the min/max bounds now that DC doesn't handle this
8590                  * as part of commit.
8591                  */
8592                 if (is_dc_timing_adjust_needed(dm_old_crtc_state, acrtc_state)) {
8593                         spin_lock_irqsave(&pcrtc->dev->event_lock, flags);
8594                         dc_stream_adjust_vmin_vmax(
8595                                 dm->dc, acrtc_state->stream,
8596                                 &acrtc_attach->dm_irq_params.vrr_params.adjust);
8597                         spin_unlock_irqrestore(&pcrtc->dev->event_lock, flags);
8598                 }
8599                 mutex_lock(&dm->dc_lock);
8600                 if ((acrtc_state->update_type > UPDATE_TYPE_FAST) &&
8601                                 acrtc_state->stream->link->psr_settings.psr_allow_active)
8602                         amdgpu_dm_psr_disable(acrtc_state->stream);
8603
8604                 dc_commit_updates_for_stream(dm->dc,
8605                                                      bundle->surface_updates,
8606                                                      planes_count,
8607                                                      acrtc_state->stream,
8608                                                      &bundle->stream_update,
8609                                                      dc_state);
8610
8611                 /**
8612                  * Enable or disable the interrupts on the backend.
8613                  *
8614                  * Most pipes are put into power gating when unused.
8615                  *
8616                  * When power gating is enabled on a pipe we lose the
8617                  * interrupt enablement state when power gating is disabled.
8618                  *
8619                  * So we need to update the IRQ control state in hardware
8620                  * whenever the pipe turns on (since it could be previously
8621                  * power gated) or off (since some pipes can't be power gated
8622                  * on some ASICs).
8623                  */
8624                 if (dm_old_crtc_state->active_planes != acrtc_state->active_planes)
8625                         dm_update_pflip_irq_state(drm_to_adev(dev),
8626                                                   acrtc_attach);
8627
8628                 if ((acrtc_state->update_type > UPDATE_TYPE_FAST) &&
8629                                 acrtc_state->stream->link->psr_settings.psr_version != DC_PSR_VERSION_UNSUPPORTED &&
8630                                 !acrtc_state->stream->link->psr_settings.psr_feature_enabled)
8631                         amdgpu_dm_link_setup_psr(acrtc_state->stream);
8632                 else if ((acrtc_state->update_type == UPDATE_TYPE_FAST) &&
8633                                 acrtc_state->stream->link->psr_settings.psr_feature_enabled &&
8634                                 !acrtc_state->stream->link->psr_settings.psr_allow_active) {
8635                         amdgpu_dm_psr_enable(acrtc_state->stream);
8636                 }
8637
8638                 mutex_unlock(&dm->dc_lock);
8639         }
8640
8641         /*
8642          * Update cursor state *after* programming all the planes.
8643          * This avoids redundant programming in the case where we're going
8644          * to be disabling a single plane - those pipes are being disabled.
8645          */
8646         if (acrtc_state->active_planes)
8647                 amdgpu_dm_commit_cursors(state);
8648
8649 cleanup:
8650         kfree(bundle);
8651 }
8652
8653 static void amdgpu_dm_commit_audio(struct drm_device *dev,
8654                                    struct drm_atomic_state *state)
8655 {
8656         struct amdgpu_device *adev = drm_to_adev(dev);
8657         struct amdgpu_dm_connector *aconnector;
8658         struct drm_connector *connector;
8659         struct drm_connector_state *old_con_state, *new_con_state;
8660         struct drm_crtc_state *new_crtc_state;
8661         struct dm_crtc_state *new_dm_crtc_state;
8662         const struct dc_stream_status *status;
8663         int i, inst;
8664
8665         /* Notify device removals. */
8666         for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
8667                 if (old_con_state->crtc != new_con_state->crtc) {
8668                         /* CRTC changes require notification. */
8669                         goto notify;
8670                 }
8671
8672                 if (!new_con_state->crtc)
8673                         continue;
8674
8675                 new_crtc_state = drm_atomic_get_new_crtc_state(
8676                         state, new_con_state->crtc);
8677
8678                 if (!new_crtc_state)
8679                         continue;
8680
8681                 if (!drm_atomic_crtc_needs_modeset(new_crtc_state))
8682                         continue;
8683
8684         notify:
8685                 aconnector = to_amdgpu_dm_connector(connector);
8686
8687                 mutex_lock(&adev->dm.audio_lock);
8688                 inst = aconnector->audio_inst;
8689                 aconnector->audio_inst = -1;
8690                 mutex_unlock(&adev->dm.audio_lock);
8691
8692                 amdgpu_dm_audio_eld_notify(adev, inst);
8693         }
8694
8695         /* Notify audio device additions. */
8696         for_each_new_connector_in_state(state, connector, new_con_state, i) {
8697                 if (!new_con_state->crtc)
8698                         continue;
8699
8700                 new_crtc_state = drm_atomic_get_new_crtc_state(
8701                         state, new_con_state->crtc);
8702
8703                 if (!new_crtc_state)
8704                         continue;
8705
8706                 if (!drm_atomic_crtc_needs_modeset(new_crtc_state))
8707                         continue;
8708
8709                 new_dm_crtc_state = to_dm_crtc_state(new_crtc_state);
8710                 if (!new_dm_crtc_state->stream)
8711                         continue;
8712
8713                 status = dc_stream_get_status(new_dm_crtc_state->stream);
8714                 if (!status)
8715                         continue;
8716
8717                 aconnector = to_amdgpu_dm_connector(connector);
8718
8719                 mutex_lock(&adev->dm.audio_lock);
8720                 inst = status->audio_inst;
8721                 aconnector->audio_inst = inst;
8722                 mutex_unlock(&adev->dm.audio_lock);
8723
8724                 amdgpu_dm_audio_eld_notify(adev, inst);
8725         }
8726 }
8727
8728 /*
8729  * amdgpu_dm_crtc_copy_transient_flags - copy mirrored flags from DRM to DC
8730  * @crtc_state: the DRM CRTC state
8731  * @stream_state: the DC stream state.
8732  *
8733  * Copy the mirrored transient state flags from DRM, to DC. It is used to bring
8734  * a dc_stream_state's flags in sync with a drm_crtc_state's flags.
8735  */
8736 static void amdgpu_dm_crtc_copy_transient_flags(struct drm_crtc_state *crtc_state,
8737                                                 struct dc_stream_state *stream_state)
8738 {
8739         stream_state->mode_changed = drm_atomic_crtc_needs_modeset(crtc_state);
8740 }
8741
8742 /**
8743  * amdgpu_dm_atomic_commit_tail() - AMDgpu DM's commit tail implementation.
8744  * @state: The atomic state to commit
8745  *
8746  * This will tell DC to commit the constructed DC state from atomic_check,
8747  * programming the hardware. Any failures here implies a hardware failure, since
8748  * atomic check should have filtered anything non-kosher.
8749  */
8750 static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state)
8751 {
8752         struct drm_device *dev = state->dev;
8753         struct amdgpu_device *adev = drm_to_adev(dev);
8754         struct amdgpu_display_manager *dm = &adev->dm;
8755         struct dm_atomic_state *dm_state;
8756         struct dc_state *dc_state = NULL, *dc_state_temp = NULL;
8757         uint32_t i, j;
8758         struct drm_crtc *crtc;
8759         struct drm_crtc_state *old_crtc_state, *new_crtc_state;
8760         unsigned long flags;
8761         bool wait_for_vblank = true;
8762         struct drm_connector *connector;
8763         struct drm_connector_state *old_con_state, *new_con_state;
8764         struct dm_crtc_state *dm_old_crtc_state, *dm_new_crtc_state;
8765         int crtc_disable_count = 0;
8766         bool mode_set_reset_required = false;
8767
8768         trace_amdgpu_dm_atomic_commit_tail_begin(state);
8769
8770         drm_atomic_helper_update_legacy_modeset_state(dev, state);
8771
8772         dm_state = dm_atomic_get_new_state(state);
8773         if (dm_state && dm_state->context) {
8774                 dc_state = dm_state->context;
8775         } else {
8776                 /* No state changes, retain current state. */
8777                 dc_state_temp = dc_create_state(dm->dc);
8778                 ASSERT(dc_state_temp);
8779                 dc_state = dc_state_temp;
8780                 dc_resource_state_copy_construct_current(dm->dc, dc_state);
8781         }
8782
8783         for_each_oldnew_crtc_in_state (state, crtc, old_crtc_state,
8784                                        new_crtc_state, i) {
8785                 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
8786
8787                 dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
8788
8789                 if (old_crtc_state->active &&
8790                     (!new_crtc_state->active ||
8791                      drm_atomic_crtc_needs_modeset(new_crtc_state))) {
8792                         manage_dm_interrupts(adev, acrtc, false);
8793                         dc_stream_release(dm_old_crtc_state->stream);
8794                 }
8795         }
8796
8797         drm_atomic_helper_calc_timestamping_constants(state);
8798
8799         /* update changed items */
8800         for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
8801                 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
8802
8803                 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
8804                 dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
8805
8806                 DRM_DEBUG_ATOMIC(
8807                         "amdgpu_crtc id:%d crtc_state_flags: enable:%d, active:%d, "
8808                         "planes_changed:%d, mode_changed:%d,active_changed:%d,"
8809                         "connectors_changed:%d\n",
8810                         acrtc->crtc_id,
8811                         new_crtc_state->enable,
8812                         new_crtc_state->active,
8813                         new_crtc_state->planes_changed,
8814                         new_crtc_state->mode_changed,
8815                         new_crtc_state->active_changed,
8816                         new_crtc_state->connectors_changed);
8817
8818                 /* Disable cursor if disabling crtc */
8819                 if (old_crtc_state->active && !new_crtc_state->active) {
8820                         struct dc_cursor_position position;
8821
8822                         memset(&position, 0, sizeof(position));
8823                         mutex_lock(&dm->dc_lock);
8824                         dc_stream_set_cursor_position(dm_old_crtc_state->stream, &position);
8825                         mutex_unlock(&dm->dc_lock);
8826                 }
8827
8828                 /* Copy all transient state flags into dc state */
8829                 if (dm_new_crtc_state->stream) {
8830                         amdgpu_dm_crtc_copy_transient_flags(&dm_new_crtc_state->base,
8831                                                             dm_new_crtc_state->stream);
8832                 }
8833
8834                 /* handles headless hotplug case, updating new_state and
8835                  * aconnector as needed
8836                  */
8837
8838                 if (modeset_required(new_crtc_state, dm_new_crtc_state->stream, dm_old_crtc_state->stream)) {
8839
8840                         DRM_DEBUG_ATOMIC("Atomic commit: SET crtc id %d: [%p]\n", acrtc->crtc_id, acrtc);
8841
8842                         if (!dm_new_crtc_state->stream) {
8843                                 /*
8844                                  * this could happen because of issues with
8845                                  * userspace notifications delivery.
8846                                  * In this case userspace tries to set mode on
8847                                  * display which is disconnected in fact.
8848                                  * dc_sink is NULL in this case on aconnector.
8849                                  * We expect reset mode will come soon.
8850                                  *
8851                                  * This can also happen when unplug is done
8852                                  * during resume sequence ended
8853                                  *
8854                                  * In this case, we want to pretend we still
8855                                  * have a sink to keep the pipe running so that
8856                                  * hw state is consistent with the sw state
8857                                  */
8858                                 DRM_DEBUG_DRIVER("%s: Failed to create new stream for crtc %d\n",
8859                                                 __func__, acrtc->base.base.id);
8860                                 continue;
8861                         }
8862
8863                         if (dm_old_crtc_state->stream)
8864                                 remove_stream(adev, acrtc, dm_old_crtc_state->stream);
8865
8866                         pm_runtime_get_noresume(dev->dev);
8867
8868                         acrtc->enabled = true;
8869                         acrtc->hw_mode = new_crtc_state->mode;
8870                         crtc->hwmode = new_crtc_state->mode;
8871                         mode_set_reset_required = true;
8872                 } else if (modereset_required(new_crtc_state)) {
8873                         DRM_DEBUG_ATOMIC("Atomic commit: RESET. crtc id %d:[%p]\n", acrtc->crtc_id, acrtc);
8874                         /* i.e. reset mode */
8875                         if (dm_old_crtc_state->stream)
8876                                 remove_stream(adev, acrtc, dm_old_crtc_state->stream);
8877
8878                         mode_set_reset_required = true;
8879                 }
8880         } /* for_each_crtc_in_state() */
8881
8882         if (dc_state) {
8883                 /* if there mode set or reset, disable eDP PSR */
8884                 if (mode_set_reset_required)
8885                         amdgpu_dm_psr_disable_all(dm);
8886
8887                 dm_enable_per_frame_crtc_master_sync(dc_state);
8888                 mutex_lock(&dm->dc_lock);
8889                 WARN_ON(!dc_commit_state(dm->dc, dc_state));
8890 #if defined(CONFIG_DRM_AMD_DC_DCN)
8891                /* Allow idle optimization when vblank count is 0 for display off */
8892                if (dm->active_vblank_irq_count == 0)
8893                    dc_allow_idle_optimizations(dm->dc,true);
8894 #endif
8895                 mutex_unlock(&dm->dc_lock);
8896         }
8897
8898         for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) {
8899                 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
8900
8901                 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
8902
8903                 if (dm_new_crtc_state->stream != NULL) {
8904                         const struct dc_stream_status *status =
8905                                         dc_stream_get_status(dm_new_crtc_state->stream);
8906
8907                         if (!status)
8908                                 status = dc_stream_get_status_from_state(dc_state,
8909                                                                          dm_new_crtc_state->stream);
8910                         if (!status)
8911                                 DC_ERR("got no status for stream %p on acrtc%p\n", dm_new_crtc_state->stream, acrtc);
8912                         else
8913                                 acrtc->otg_inst = status->primary_otg_inst;
8914                 }
8915         }
8916 #ifdef CONFIG_DRM_AMD_DC_HDCP
8917         for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
8918                 struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state);
8919                 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(dm_new_con_state->base.crtc);
8920                 struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
8921
8922                 new_crtc_state = NULL;
8923
8924                 if (acrtc)
8925                         new_crtc_state = drm_atomic_get_new_crtc_state(state, &acrtc->base);
8926
8927                 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
8928
8929                 if (dm_new_crtc_state && dm_new_crtc_state->stream == NULL &&
8930                     connector->state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED) {
8931                         hdcp_reset_display(adev->dm.hdcp_workqueue, aconnector->dc_link->link_index);
8932                         new_con_state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED;
8933                         dm_new_con_state->update_hdcp = true;
8934                         continue;
8935                 }
8936
8937                 if (is_content_protection_different(new_con_state, old_con_state, connector, adev->dm.hdcp_workqueue))
8938                         hdcp_update_display(
8939                                 adev->dm.hdcp_workqueue, aconnector->dc_link->link_index, aconnector,
8940                                 new_con_state->hdcp_content_type,
8941                                 new_con_state->content_protection == DRM_MODE_CONTENT_PROTECTION_DESIRED);
8942         }
8943 #endif
8944
8945         /* Handle connector state changes */
8946         for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
8947                 struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state);
8948                 struct dm_connector_state *dm_old_con_state = to_dm_connector_state(old_con_state);
8949                 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(dm_new_con_state->base.crtc);
8950                 struct dc_surface_update dummy_updates[MAX_SURFACES];
8951                 struct dc_stream_update stream_update;
8952                 struct dc_info_packet hdr_packet;
8953                 struct dc_stream_status *status = NULL;
8954                 bool abm_changed, hdr_changed, scaling_changed;
8955
8956                 memset(&dummy_updates, 0, sizeof(dummy_updates));
8957                 memset(&stream_update, 0, sizeof(stream_update));
8958
8959                 if (acrtc) {
8960                         new_crtc_state = drm_atomic_get_new_crtc_state(state, &acrtc->base);
8961                         old_crtc_state = drm_atomic_get_old_crtc_state(state, &acrtc->base);
8962                 }
8963
8964                 /* Skip any modesets/resets */
8965                 if (!acrtc || drm_atomic_crtc_needs_modeset(new_crtc_state))
8966                         continue;
8967
8968                 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
8969                 dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
8970
8971                 scaling_changed = is_scaling_state_different(dm_new_con_state,
8972                                                              dm_old_con_state);
8973
8974                 abm_changed = dm_new_crtc_state->abm_level !=
8975                               dm_old_crtc_state->abm_level;
8976
8977                 hdr_changed =
8978                         !drm_connector_atomic_hdr_metadata_equal(old_con_state, new_con_state);
8979
8980                 if (!scaling_changed && !abm_changed && !hdr_changed)
8981                         continue;
8982
8983                 stream_update.stream = dm_new_crtc_state->stream;
8984                 if (scaling_changed) {
8985                         update_stream_scaling_settings(&dm_new_con_state->base.crtc->mode,
8986                                         dm_new_con_state, dm_new_crtc_state->stream);
8987
8988                         stream_update.src = dm_new_crtc_state->stream->src;
8989                         stream_update.dst = dm_new_crtc_state->stream->dst;
8990                 }
8991
8992                 if (abm_changed) {
8993                         dm_new_crtc_state->stream->abm_level = dm_new_crtc_state->abm_level;
8994
8995                         stream_update.abm_level = &dm_new_crtc_state->abm_level;
8996                 }
8997
8998                 if (hdr_changed) {
8999                         fill_hdr_info_packet(new_con_state, &hdr_packet);
9000                         stream_update.hdr_static_metadata = &hdr_packet;
9001                 }
9002
9003                 status = dc_stream_get_status(dm_new_crtc_state->stream);
9004                 WARN_ON(!status);
9005                 WARN_ON(!status->plane_count);
9006
9007                 /*
9008                  * TODO: DC refuses to perform stream updates without a dc_surface_update.
9009                  * Here we create an empty update on each plane.
9010                  * To fix this, DC should permit updating only stream properties.
9011                  */
9012                 for (j = 0; j < status->plane_count; j++)
9013                         dummy_updates[j].surface = status->plane_states[0];
9014
9015
9016                 mutex_lock(&dm->dc_lock);
9017                 dc_commit_updates_for_stream(dm->dc,
9018                                                      dummy_updates,
9019                                                      status->plane_count,
9020                                                      dm_new_crtc_state->stream,
9021                                                      &stream_update,
9022                                                      dc_state);
9023                 mutex_unlock(&dm->dc_lock);
9024         }
9025
9026         /* Count number of newly disabled CRTCs for dropping PM refs later. */
9027         for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state,
9028                                       new_crtc_state, i) {
9029                 if (old_crtc_state->active && !new_crtc_state->active)
9030                         crtc_disable_count++;
9031
9032                 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
9033                 dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
9034
9035                 /* For freesync config update on crtc state and params for irq */
9036                 update_stream_irq_parameters(dm, dm_new_crtc_state);
9037
9038                 /* Handle vrr on->off / off->on transitions */
9039                 amdgpu_dm_handle_vrr_transition(dm_old_crtc_state,
9040                                                 dm_new_crtc_state);
9041         }
9042
9043         /**
9044          * Enable interrupts for CRTCs that are newly enabled or went through
9045          * a modeset. It was intentionally deferred until after the front end
9046          * state was modified to wait until the OTG was on and so the IRQ
9047          * handlers didn't access stale or invalid state.
9048          */
9049         for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
9050                 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
9051 #ifdef CONFIG_DEBUG_FS
9052                 bool configure_crc = false;
9053                 enum amdgpu_dm_pipe_crc_source cur_crc_src;
9054 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
9055                 struct crc_rd_work *crc_rd_wrk = dm->crc_rd_wrk;
9056 #endif
9057                 spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
9058                 cur_crc_src = acrtc->dm_irq_params.crc_src;
9059                 spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
9060 #endif
9061                 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
9062
9063                 if (new_crtc_state->active &&
9064                     (!old_crtc_state->active ||
9065                      drm_atomic_crtc_needs_modeset(new_crtc_state))) {
9066                         dc_stream_retain(dm_new_crtc_state->stream);
9067                         acrtc->dm_irq_params.stream = dm_new_crtc_state->stream;
9068                         manage_dm_interrupts(adev, acrtc, true);
9069
9070 #ifdef CONFIG_DEBUG_FS
9071                         /**
9072                          * Frontend may have changed so reapply the CRC capture
9073                          * settings for the stream.
9074                          */
9075                         dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
9076
9077                         if (amdgpu_dm_is_valid_crc_source(cur_crc_src)) {
9078                                 configure_crc = true;
9079 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
9080                                 if (amdgpu_dm_crc_window_is_activated(crtc)) {
9081                                         spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
9082                                         acrtc->dm_irq_params.crc_window.update_win = true;
9083                                         acrtc->dm_irq_params.crc_window.skip_frame_cnt = 2;
9084                                         spin_lock_irq(&crc_rd_wrk->crc_rd_work_lock);
9085                                         crc_rd_wrk->crtc = crtc;
9086                                         spin_unlock_irq(&crc_rd_wrk->crc_rd_work_lock);
9087                                         spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
9088                                 }
9089 #endif
9090                         }
9091
9092                         if (configure_crc)
9093                                 if (amdgpu_dm_crtc_configure_crc_source(
9094                                         crtc, dm_new_crtc_state, cur_crc_src))
9095                                         DRM_DEBUG_DRIVER("Failed to configure crc source");
9096 #endif
9097                 }
9098         }
9099
9100         for_each_new_crtc_in_state(state, crtc, new_crtc_state, j)
9101                 if (new_crtc_state->async_flip)
9102                         wait_for_vblank = false;
9103
9104         /* update planes when needed per crtc*/
9105         for_each_new_crtc_in_state(state, crtc, new_crtc_state, j) {
9106                 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
9107
9108                 if (dm_new_crtc_state->stream)
9109                         amdgpu_dm_commit_planes(state, dc_state, dev,
9110                                                 dm, crtc, wait_for_vblank);
9111         }
9112
9113         /* Update audio instances for each connector. */
9114         amdgpu_dm_commit_audio(dev, state);
9115
9116 #if defined(CONFIG_BACKLIGHT_CLASS_DEVICE) ||           \
9117         defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE)
9118         /* restore the backlight level */
9119         if (dm->backlight_dev)
9120                 amdgpu_dm_backlight_set_level(dm, dm->brightness[0]);
9121 #endif
9122         /*
9123          * send vblank event on all events not handled in flip and
9124          * mark consumed event for drm_atomic_helper_commit_hw_done
9125          */
9126         spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
9127         for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) {
9128
9129                 if (new_crtc_state->event)
9130                         drm_send_event_locked(dev, &new_crtc_state->event->base);
9131
9132                 new_crtc_state->event = NULL;
9133         }
9134         spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
9135
9136         /* Signal HW programming completion */
9137         drm_atomic_helper_commit_hw_done(state);
9138
9139         if (wait_for_vblank)
9140                 drm_atomic_helper_wait_for_flip_done(dev, state);
9141
9142         drm_atomic_helper_cleanup_planes(dev, state);
9143
9144         /* return the stolen vga memory back to VRAM */
9145         if (!adev->mman.keep_stolen_vga_memory)
9146                 amdgpu_bo_free_kernel(&adev->mman.stolen_vga_memory, NULL, NULL);
9147         amdgpu_bo_free_kernel(&adev->mman.stolen_extended_memory, NULL, NULL);
9148
9149         /*
9150          * Finally, drop a runtime PM reference for each newly disabled CRTC,
9151          * so we can put the GPU into runtime suspend if we're not driving any
9152          * displays anymore
9153          */
9154         for (i = 0; i < crtc_disable_count; i++)
9155                 pm_runtime_put_autosuspend(dev->dev);
9156         pm_runtime_mark_last_busy(dev->dev);
9157
9158         if (dc_state_temp)
9159                 dc_release_state(dc_state_temp);
9160 }
9161
9162
9163 static int dm_force_atomic_commit(struct drm_connector *connector)
9164 {
9165         int ret = 0;
9166         struct drm_device *ddev = connector->dev;
9167         struct drm_atomic_state *state = drm_atomic_state_alloc(ddev);
9168         struct amdgpu_crtc *disconnected_acrtc = to_amdgpu_crtc(connector->encoder->crtc);
9169         struct drm_plane *plane = disconnected_acrtc->base.primary;
9170         struct drm_connector_state *conn_state;
9171         struct drm_crtc_state *crtc_state;
9172         struct drm_plane_state *plane_state;
9173
9174         if (!state)
9175                 return -ENOMEM;
9176
9177         state->acquire_ctx = ddev->mode_config.acquire_ctx;
9178
9179         /* Construct an atomic state to restore previous display setting */
9180
9181         /*
9182          * Attach connectors to drm_atomic_state
9183          */
9184         conn_state = drm_atomic_get_connector_state(state, connector);
9185
9186         ret = PTR_ERR_OR_ZERO(conn_state);
9187         if (ret)
9188                 goto out;
9189
9190         /* Attach crtc to drm_atomic_state*/
9191         crtc_state = drm_atomic_get_crtc_state(state, &disconnected_acrtc->base);
9192
9193         ret = PTR_ERR_OR_ZERO(crtc_state);
9194         if (ret)
9195                 goto out;
9196
9197         /* force a restore */
9198         crtc_state->mode_changed = true;
9199
9200         /* Attach plane to drm_atomic_state */
9201         plane_state = drm_atomic_get_plane_state(state, plane);
9202
9203         ret = PTR_ERR_OR_ZERO(plane_state);
9204         if (ret)
9205                 goto out;
9206
9207         /* Call commit internally with the state we just constructed */
9208         ret = drm_atomic_commit(state);
9209
9210 out:
9211         drm_atomic_state_put(state);
9212         if (ret)
9213                 DRM_ERROR("Restoring old state failed with %i\n", ret);
9214
9215         return ret;
9216 }
9217
9218 /*
9219  * This function handles all cases when set mode does not come upon hotplug.
9220  * This includes when a display is unplugged then plugged back into the
9221  * same port and when running without usermode desktop manager supprot
9222  */
9223 void dm_restore_drm_connector_state(struct drm_device *dev,
9224                                     struct drm_connector *connector)
9225 {
9226         struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
9227         struct amdgpu_crtc *disconnected_acrtc;
9228         struct dm_crtc_state *acrtc_state;
9229
9230         if (!aconnector->dc_sink || !connector->state || !connector->encoder)
9231                 return;
9232
9233         disconnected_acrtc = to_amdgpu_crtc(connector->encoder->crtc);
9234         if (!disconnected_acrtc)
9235                 return;
9236
9237         acrtc_state = to_dm_crtc_state(disconnected_acrtc->base.state);
9238         if (!acrtc_state->stream)
9239                 return;
9240
9241         /*
9242          * If the previous sink is not released and different from the current,
9243          * we deduce we are in a state where we can not rely on usermode call
9244          * to turn on the display, so we do it here
9245          */
9246         if (acrtc_state->stream->sink != aconnector->dc_sink)
9247                 dm_force_atomic_commit(&aconnector->base);
9248 }
9249
9250 /*
9251  * Grabs all modesetting locks to serialize against any blocking commits,
9252  * Waits for completion of all non blocking commits.
9253  */
9254 static int do_aquire_global_lock(struct drm_device *dev,
9255                                  struct drm_atomic_state *state)
9256 {
9257         struct drm_crtc *crtc;
9258         struct drm_crtc_commit *commit;
9259         long ret;
9260
9261         /*
9262          * Adding all modeset locks to aquire_ctx will
9263          * ensure that when the framework release it the
9264          * extra locks we are locking here will get released to
9265          */
9266         ret = drm_modeset_lock_all_ctx(dev, state->acquire_ctx);
9267         if (ret)
9268                 return ret;
9269
9270         list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
9271                 spin_lock(&crtc->commit_lock);
9272                 commit = list_first_entry_or_null(&crtc->commit_list,
9273                                 struct drm_crtc_commit, commit_entry);
9274                 if (commit)
9275                         drm_crtc_commit_get(commit);
9276                 spin_unlock(&crtc->commit_lock);
9277
9278                 if (!commit)
9279                         continue;
9280
9281                 /*
9282                  * Make sure all pending HW programming completed and
9283                  * page flips done
9284                  */
9285                 ret = wait_for_completion_interruptible_timeout(&commit->hw_done, 10*HZ);
9286
9287                 if (ret > 0)
9288                         ret = wait_for_completion_interruptible_timeout(
9289                                         &commit->flip_done, 10*HZ);
9290
9291                 if (ret == 0)
9292                         DRM_ERROR("[CRTC:%d:%s] hw_done or flip_done "
9293                                   "timed out\n", crtc->base.id, crtc->name);
9294
9295                 drm_crtc_commit_put(commit);
9296         }
9297
9298         return ret < 0 ? ret : 0;
9299 }
9300
9301 static void get_freesync_config_for_crtc(
9302         struct dm_crtc_state *new_crtc_state,
9303         struct dm_connector_state *new_con_state)
9304 {
9305         struct mod_freesync_config config = {0};
9306         struct amdgpu_dm_connector *aconnector =
9307                         to_amdgpu_dm_connector(new_con_state->base.connector);
9308         struct drm_display_mode *mode = &new_crtc_state->base.mode;
9309         int vrefresh = drm_mode_vrefresh(mode);
9310         bool fs_vid_mode = false;
9311
9312         new_crtc_state->vrr_supported = new_con_state->freesync_capable &&
9313                                         vrefresh >= aconnector->min_vfreq &&
9314                                         vrefresh <= aconnector->max_vfreq;
9315
9316         if (new_crtc_state->vrr_supported) {
9317                 new_crtc_state->stream->ignore_msa_timing_param = true;
9318                 fs_vid_mode = new_crtc_state->freesync_config.state == VRR_STATE_ACTIVE_FIXED;
9319
9320                 config.min_refresh_in_uhz = aconnector->min_vfreq * 1000000;
9321                 config.max_refresh_in_uhz = aconnector->max_vfreq * 1000000;
9322                 config.vsif_supported = true;
9323                 config.btr = true;
9324
9325                 if (fs_vid_mode) {
9326                         config.state = VRR_STATE_ACTIVE_FIXED;
9327                         config.fixed_refresh_in_uhz = new_crtc_state->freesync_config.fixed_refresh_in_uhz;
9328                         goto out;
9329                 } else if (new_crtc_state->base.vrr_enabled) {
9330                         config.state = VRR_STATE_ACTIVE_VARIABLE;
9331                 } else {
9332                         config.state = VRR_STATE_INACTIVE;
9333                 }
9334         }
9335 out:
9336         new_crtc_state->freesync_config = config;
9337 }
9338
9339 static void reset_freesync_config_for_crtc(
9340         struct dm_crtc_state *new_crtc_state)
9341 {
9342         new_crtc_state->vrr_supported = false;
9343
9344         memset(&new_crtc_state->vrr_infopacket, 0,
9345                sizeof(new_crtc_state->vrr_infopacket));
9346 }
9347
9348 static bool
9349 is_timing_unchanged_for_freesync(struct drm_crtc_state *old_crtc_state,
9350                                  struct drm_crtc_state *new_crtc_state)
9351 {
9352         struct drm_display_mode old_mode, new_mode;
9353
9354         if (!old_crtc_state || !new_crtc_state)
9355                 return false;
9356
9357         old_mode = old_crtc_state->mode;
9358         new_mode = new_crtc_state->mode;
9359
9360         if (old_mode.clock       == new_mode.clock &&
9361             old_mode.hdisplay    == new_mode.hdisplay &&
9362             old_mode.vdisplay    == new_mode.vdisplay &&
9363             old_mode.htotal      == new_mode.htotal &&
9364             old_mode.vtotal      != new_mode.vtotal &&
9365             old_mode.hsync_start == new_mode.hsync_start &&
9366             old_mode.vsync_start != new_mode.vsync_start &&
9367             old_mode.hsync_end   == new_mode.hsync_end &&
9368             old_mode.vsync_end   != new_mode.vsync_end &&
9369             old_mode.hskew       == new_mode.hskew &&
9370             old_mode.vscan       == new_mode.vscan &&
9371             (old_mode.vsync_end - old_mode.vsync_start) ==
9372             (new_mode.vsync_end - new_mode.vsync_start))
9373                 return true;
9374
9375         return false;
9376 }
9377
9378 static void set_freesync_fixed_config(struct dm_crtc_state *dm_new_crtc_state) {
9379         uint64_t num, den, res;
9380         struct drm_crtc_state *new_crtc_state = &dm_new_crtc_state->base;
9381
9382         dm_new_crtc_state->freesync_config.state = VRR_STATE_ACTIVE_FIXED;
9383
9384         num = (unsigned long long)new_crtc_state->mode.clock * 1000 * 1000000;
9385         den = (unsigned long long)new_crtc_state->mode.htotal *
9386               (unsigned long long)new_crtc_state->mode.vtotal;
9387
9388         res = div_u64(num, den);
9389         dm_new_crtc_state->freesync_config.fixed_refresh_in_uhz = res;
9390 }
9391
9392 static int dm_update_crtc_state(struct amdgpu_display_manager *dm,
9393                                 struct drm_atomic_state *state,
9394                                 struct drm_crtc *crtc,
9395                                 struct drm_crtc_state *old_crtc_state,
9396                                 struct drm_crtc_state *new_crtc_state,
9397                                 bool enable,
9398                                 bool *lock_and_validation_needed)
9399 {
9400         struct dm_atomic_state *dm_state = NULL;
9401         struct dm_crtc_state *dm_old_crtc_state, *dm_new_crtc_state;
9402         struct dc_stream_state *new_stream;
9403         int ret = 0;
9404
9405         /*
9406          * TODO Move this code into dm_crtc_atomic_check once we get rid of dc_validation_set
9407          * update changed items
9408          */
9409         struct amdgpu_crtc *acrtc = NULL;
9410         struct amdgpu_dm_connector *aconnector = NULL;
9411         struct drm_connector_state *drm_new_conn_state = NULL, *drm_old_conn_state = NULL;
9412         struct dm_connector_state *dm_new_conn_state = NULL, *dm_old_conn_state = NULL;
9413
9414         new_stream = NULL;
9415
9416         dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
9417         dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
9418         acrtc = to_amdgpu_crtc(crtc);
9419         aconnector = amdgpu_dm_find_first_crtc_matching_connector(state, crtc);
9420
9421         /* TODO This hack should go away */
9422         if (aconnector && enable) {
9423                 /* Make sure fake sink is created in plug-in scenario */
9424                 drm_new_conn_state = drm_atomic_get_new_connector_state(state,
9425                                                             &aconnector->base);
9426                 drm_old_conn_state = drm_atomic_get_old_connector_state(state,
9427                                                             &aconnector->base);
9428
9429                 if (IS_ERR(drm_new_conn_state)) {
9430                         ret = PTR_ERR_OR_ZERO(drm_new_conn_state);
9431                         goto fail;
9432                 }
9433
9434                 dm_new_conn_state = to_dm_connector_state(drm_new_conn_state);
9435                 dm_old_conn_state = to_dm_connector_state(drm_old_conn_state);
9436
9437                 if (!drm_atomic_crtc_needs_modeset(new_crtc_state))
9438                         goto skip_modeset;
9439
9440                 new_stream = create_validate_stream_for_sink(aconnector,
9441                                                              &new_crtc_state->mode,
9442                                                              dm_new_conn_state,
9443                                                              dm_old_crtc_state->stream);
9444
9445                 /*
9446                  * we can have no stream on ACTION_SET if a display
9447                  * was disconnected during S3, in this case it is not an
9448                  * error, the OS will be updated after detection, and
9449                  * will do the right thing on next atomic commit
9450                  */
9451
9452                 if (!new_stream) {
9453                         DRM_DEBUG_DRIVER("%s: Failed to create new stream for crtc %d\n",
9454                                         __func__, acrtc->base.base.id);
9455                         ret = -ENOMEM;
9456                         goto fail;
9457                 }
9458
9459                 /*
9460                  * TODO: Check VSDB bits to decide whether this should
9461                  * be enabled or not.
9462                  */
9463                 new_stream->triggered_crtc_reset.enabled =
9464                         dm->force_timing_sync;
9465
9466                 dm_new_crtc_state->abm_level = dm_new_conn_state->abm_level;
9467
9468                 ret = fill_hdr_info_packet(drm_new_conn_state,
9469                                            &new_stream->hdr_static_metadata);
9470                 if (ret)
9471                         goto fail;
9472
9473                 /*
9474                  * If we already removed the old stream from the context
9475                  * (and set the new stream to NULL) then we can't reuse
9476                  * the old stream even if the stream and scaling are unchanged.
9477                  * We'll hit the BUG_ON and black screen.
9478                  *
9479                  * TODO: Refactor this function to allow this check to work
9480                  * in all conditions.
9481                  */
9482                 if (amdgpu_freesync_vid_mode &&
9483                     dm_new_crtc_state->stream &&
9484                     is_timing_unchanged_for_freesync(new_crtc_state, old_crtc_state))
9485                         goto skip_modeset;
9486
9487                 if (dm_new_crtc_state->stream &&
9488                     dc_is_stream_unchanged(new_stream, dm_old_crtc_state->stream) &&
9489                     dc_is_stream_scaling_unchanged(new_stream, dm_old_crtc_state->stream)) {
9490                         new_crtc_state->mode_changed = false;
9491                         DRM_DEBUG_DRIVER("Mode change not required, setting mode_changed to %d",
9492                                          new_crtc_state->mode_changed);
9493                 }
9494         }
9495
9496         /* mode_changed flag may get updated above, need to check again */
9497         if (!drm_atomic_crtc_needs_modeset(new_crtc_state))
9498                 goto skip_modeset;
9499
9500         DRM_DEBUG_ATOMIC(
9501                 "amdgpu_crtc id:%d crtc_state_flags: enable:%d, active:%d, "
9502                 "planes_changed:%d, mode_changed:%d,active_changed:%d,"
9503                 "connectors_changed:%d\n",
9504                 acrtc->crtc_id,
9505                 new_crtc_state->enable,
9506                 new_crtc_state->active,
9507                 new_crtc_state->planes_changed,
9508                 new_crtc_state->mode_changed,
9509                 new_crtc_state->active_changed,
9510                 new_crtc_state->connectors_changed);
9511
9512         /* Remove stream for any changed/disabled CRTC */
9513         if (!enable) {
9514
9515                 if (!dm_old_crtc_state->stream)
9516                         goto skip_modeset;
9517
9518                 if (amdgpu_freesync_vid_mode && dm_new_crtc_state->stream &&
9519                     is_timing_unchanged_for_freesync(new_crtc_state,
9520                                                      old_crtc_state)) {
9521                         new_crtc_state->mode_changed = false;
9522                         DRM_DEBUG_DRIVER(
9523                                 "Mode change not required for front porch change, "
9524                                 "setting mode_changed to %d",
9525                                 new_crtc_state->mode_changed);
9526
9527                         set_freesync_fixed_config(dm_new_crtc_state);
9528
9529                         goto skip_modeset;
9530                 } else if (amdgpu_freesync_vid_mode && aconnector &&
9531                            is_freesync_video_mode(&new_crtc_state->mode,
9532                                                   aconnector)) {
9533                         set_freesync_fixed_config(dm_new_crtc_state);
9534                 }
9535
9536                 ret = dm_atomic_get_state(state, &dm_state);
9537                 if (ret)
9538                         goto fail;
9539
9540                 DRM_DEBUG_DRIVER("Disabling DRM crtc: %d\n",
9541                                 crtc->base.id);
9542
9543                 /* i.e. reset mode */
9544                 if (dc_remove_stream_from_ctx(
9545                                 dm->dc,
9546                                 dm_state->context,
9547                                 dm_old_crtc_state->stream) != DC_OK) {
9548                         ret = -EINVAL;
9549                         goto fail;
9550                 }
9551
9552                 dc_stream_release(dm_old_crtc_state->stream);
9553                 dm_new_crtc_state->stream = NULL;
9554
9555                 reset_freesync_config_for_crtc(dm_new_crtc_state);
9556
9557                 *lock_and_validation_needed = true;
9558
9559         } else {/* Add stream for any updated/enabled CRTC */
9560                 /*
9561                  * Quick fix to prevent NULL pointer on new_stream when
9562                  * added MST connectors not found in existing crtc_state in the chained mode
9563                  * TODO: need to dig out the root cause of that
9564                  */
9565                 if (!aconnector || (!aconnector->dc_sink && aconnector->mst_port))
9566                         goto skip_modeset;
9567
9568                 if (modereset_required(new_crtc_state))
9569                         goto skip_modeset;
9570
9571                 if (modeset_required(new_crtc_state, new_stream,
9572                                      dm_old_crtc_state->stream)) {
9573
9574                         WARN_ON(dm_new_crtc_state->stream);
9575
9576                         ret = dm_atomic_get_state(state, &dm_state);
9577                         if (ret)
9578                                 goto fail;
9579
9580                         dm_new_crtc_state->stream = new_stream;
9581
9582                         dc_stream_retain(new_stream);
9583
9584                         DRM_DEBUG_ATOMIC("Enabling DRM crtc: %d\n",
9585                                          crtc->base.id);
9586
9587                         if (dc_add_stream_to_ctx(
9588                                         dm->dc,
9589                                         dm_state->context,
9590                                         dm_new_crtc_state->stream) != DC_OK) {
9591                                 ret = -EINVAL;
9592                                 goto fail;
9593                         }
9594
9595                         *lock_and_validation_needed = true;
9596                 }
9597         }
9598
9599 skip_modeset:
9600         /* Release extra reference */
9601         if (new_stream)
9602                  dc_stream_release(new_stream);
9603
9604         /*
9605          * We want to do dc stream updates that do not require a
9606          * full modeset below.
9607          */
9608         if (!(enable && aconnector && new_crtc_state->active))
9609                 return 0;
9610         /*
9611          * Given above conditions, the dc state cannot be NULL because:
9612          * 1. We're in the process of enabling CRTCs (just been added
9613          *    to the dc context, or already is on the context)
9614          * 2. Has a valid connector attached, and
9615          * 3. Is currently active and enabled.
9616          * => The dc stream state currently exists.
9617          */
9618         BUG_ON(dm_new_crtc_state->stream == NULL);
9619
9620         /* Scaling or underscan settings */
9621         if (is_scaling_state_different(dm_old_conn_state, dm_new_conn_state))
9622                 update_stream_scaling_settings(
9623                         &new_crtc_state->mode, dm_new_conn_state, dm_new_crtc_state->stream);
9624
9625         /* ABM settings */
9626         dm_new_crtc_state->abm_level = dm_new_conn_state->abm_level;
9627
9628         /*
9629          * Color management settings. We also update color properties
9630          * when a modeset is needed, to ensure it gets reprogrammed.
9631          */
9632         if (dm_new_crtc_state->base.color_mgmt_changed ||
9633             drm_atomic_crtc_needs_modeset(new_crtc_state)) {
9634                 ret = amdgpu_dm_update_crtc_color_mgmt(dm_new_crtc_state);
9635                 if (ret)
9636                         goto fail;
9637         }
9638
9639         /* Update Freesync settings. */
9640         get_freesync_config_for_crtc(dm_new_crtc_state,
9641                                      dm_new_conn_state);
9642
9643         return ret;
9644
9645 fail:
9646         if (new_stream)
9647                 dc_stream_release(new_stream);
9648         return ret;
9649 }
9650
9651 static bool should_reset_plane(struct drm_atomic_state *state,
9652                                struct drm_plane *plane,
9653                                struct drm_plane_state *old_plane_state,
9654                                struct drm_plane_state *new_plane_state)
9655 {
9656         struct drm_plane *other;
9657         struct drm_plane_state *old_other_state, *new_other_state;
9658         struct drm_crtc_state *new_crtc_state;
9659         int i;
9660
9661         /*
9662          * TODO: Remove this hack once the checks below are sufficient
9663          * enough to determine when we need to reset all the planes on
9664          * the stream.
9665          */
9666         if (state->allow_modeset)
9667                 return true;
9668
9669         /* Exit early if we know that we're adding or removing the plane. */
9670         if (old_plane_state->crtc != new_plane_state->crtc)
9671                 return true;
9672
9673         /* old crtc == new_crtc == NULL, plane not in context. */
9674         if (!new_plane_state->crtc)
9675                 return false;
9676
9677         new_crtc_state =
9678                 drm_atomic_get_new_crtc_state(state, new_plane_state->crtc);
9679
9680         if (!new_crtc_state)
9681                 return true;
9682
9683         /* CRTC Degamma changes currently require us to recreate planes. */
9684         if (new_crtc_state->color_mgmt_changed)
9685                 return true;
9686
9687         if (drm_atomic_crtc_needs_modeset(new_crtc_state))
9688                 return true;
9689
9690         /*
9691          * If there are any new primary or overlay planes being added or
9692          * removed then the z-order can potentially change. To ensure
9693          * correct z-order and pipe acquisition the current DC architecture
9694          * requires us to remove and recreate all existing planes.
9695          *
9696          * TODO: Come up with a more elegant solution for this.
9697          */
9698         for_each_oldnew_plane_in_state(state, other, old_other_state, new_other_state, i) {
9699                 struct amdgpu_framebuffer *old_afb, *new_afb;
9700                 if (other->type == DRM_PLANE_TYPE_CURSOR)
9701                         continue;
9702
9703                 if (old_other_state->crtc != new_plane_state->crtc &&
9704                     new_other_state->crtc != new_plane_state->crtc)
9705                         continue;
9706
9707                 if (old_other_state->crtc != new_other_state->crtc)
9708                         return true;
9709
9710                 /* Src/dst size and scaling updates. */
9711                 if (old_other_state->src_w != new_other_state->src_w ||
9712                     old_other_state->src_h != new_other_state->src_h ||
9713                     old_other_state->crtc_w != new_other_state->crtc_w ||
9714                     old_other_state->crtc_h != new_other_state->crtc_h)
9715                         return true;
9716
9717                 /* Rotation / mirroring updates. */
9718                 if (old_other_state->rotation != new_other_state->rotation)
9719                         return true;
9720
9721                 /* Blending updates. */
9722                 if (old_other_state->pixel_blend_mode !=
9723                     new_other_state->pixel_blend_mode)
9724                         return true;
9725
9726                 /* Alpha updates. */
9727                 if (old_other_state->alpha != new_other_state->alpha)
9728                         return true;
9729
9730                 /* Colorspace changes. */
9731                 if (old_other_state->color_range != new_other_state->color_range ||
9732                     old_other_state->color_encoding != new_other_state->color_encoding)
9733                         return true;
9734
9735                 /* Framebuffer checks fall at the end. */
9736                 if (!old_other_state->fb || !new_other_state->fb)
9737                         continue;
9738
9739                 /* Pixel format changes can require bandwidth updates. */
9740                 if (old_other_state->fb->format != new_other_state->fb->format)
9741                         return true;
9742
9743                 old_afb = (struct amdgpu_framebuffer *)old_other_state->fb;
9744                 new_afb = (struct amdgpu_framebuffer *)new_other_state->fb;
9745
9746                 /* Tiling and DCC changes also require bandwidth updates. */
9747                 if (old_afb->tiling_flags != new_afb->tiling_flags ||
9748                     old_afb->base.modifier != new_afb->base.modifier)
9749                         return true;
9750         }
9751
9752         return false;
9753 }
9754
9755 static int dm_check_cursor_fb(struct amdgpu_crtc *new_acrtc,
9756                               struct drm_plane_state *new_plane_state,
9757                               struct drm_framebuffer *fb)
9758 {
9759         struct amdgpu_device *adev = drm_to_adev(new_acrtc->base.dev);
9760         struct amdgpu_framebuffer *afb = to_amdgpu_framebuffer(fb);
9761         unsigned int pitch;
9762         bool linear;
9763
9764         if (fb->width > new_acrtc->max_cursor_width ||
9765             fb->height > new_acrtc->max_cursor_height) {
9766                 DRM_DEBUG_ATOMIC("Bad cursor FB size %dx%d\n",
9767                                  new_plane_state->fb->width,
9768                                  new_plane_state->fb->height);
9769                 return -EINVAL;
9770         }
9771         if (new_plane_state->src_w != fb->width << 16 ||
9772             new_plane_state->src_h != fb->height << 16) {
9773                 DRM_DEBUG_ATOMIC("Cropping not supported for cursor plane\n");
9774                 return -EINVAL;
9775         }
9776
9777         /* Pitch in pixels */
9778         pitch = fb->pitches[0] / fb->format->cpp[0];
9779
9780         if (fb->width != pitch) {
9781                 DRM_DEBUG_ATOMIC("Cursor FB width %d doesn't match pitch %d",
9782                                  fb->width, pitch);
9783                 return -EINVAL;
9784         }
9785
9786         switch (pitch) {
9787         case 64:
9788         case 128:
9789         case 256:
9790                 /* FB pitch is supported by cursor plane */
9791                 break;
9792         default:
9793                 DRM_DEBUG_ATOMIC("Bad cursor FB pitch %d px\n", pitch);
9794                 return -EINVAL;
9795         }
9796
9797         /* Core DRM takes care of checking FB modifiers, so we only need to
9798          * check tiling flags when the FB doesn't have a modifier. */
9799         if (!(fb->flags & DRM_MODE_FB_MODIFIERS)) {
9800                 if (adev->family < AMDGPU_FAMILY_AI) {
9801                         linear = AMDGPU_TILING_GET(afb->tiling_flags, ARRAY_MODE) != DC_ARRAY_2D_TILED_THIN1 &&
9802                                  AMDGPU_TILING_GET(afb->tiling_flags, ARRAY_MODE) != DC_ARRAY_1D_TILED_THIN1 &&
9803                                  AMDGPU_TILING_GET(afb->tiling_flags, MICRO_TILE_MODE) == 0;
9804                 } else {
9805                         linear = AMDGPU_TILING_GET(afb->tiling_flags, SWIZZLE_MODE) == 0;
9806                 }
9807                 if (!linear) {
9808                         DRM_DEBUG_ATOMIC("Cursor FB not linear");
9809                         return -EINVAL;
9810                 }
9811         }
9812
9813         return 0;
9814 }
9815
9816 static int dm_update_plane_state(struct dc *dc,
9817                                  struct drm_atomic_state *state,
9818                                  struct drm_plane *plane,
9819                                  struct drm_plane_state *old_plane_state,
9820                                  struct drm_plane_state *new_plane_state,
9821                                  bool enable,
9822                                  bool *lock_and_validation_needed)
9823 {
9824
9825         struct dm_atomic_state *dm_state = NULL;
9826         struct drm_crtc *new_plane_crtc, *old_plane_crtc;
9827         struct drm_crtc_state *old_crtc_state, *new_crtc_state;
9828         struct dm_crtc_state *dm_new_crtc_state, *dm_old_crtc_state;
9829         struct dm_plane_state *dm_new_plane_state, *dm_old_plane_state;
9830         struct amdgpu_crtc *new_acrtc;
9831         bool needs_reset;
9832         int ret = 0;
9833
9834
9835         new_plane_crtc = new_plane_state->crtc;
9836         old_plane_crtc = old_plane_state->crtc;
9837         dm_new_plane_state = to_dm_plane_state(new_plane_state);
9838         dm_old_plane_state = to_dm_plane_state(old_plane_state);
9839
9840         if (plane->type == DRM_PLANE_TYPE_CURSOR) {
9841                 if (!enable || !new_plane_crtc ||
9842                         drm_atomic_plane_disabling(plane->state, new_plane_state))
9843                         return 0;
9844
9845                 new_acrtc = to_amdgpu_crtc(new_plane_crtc);
9846
9847                 if (new_plane_state->src_x != 0 || new_plane_state->src_y != 0) {
9848                         DRM_DEBUG_ATOMIC("Cropping not supported for cursor plane\n");
9849                         return -EINVAL;
9850                 }
9851
9852                 if (new_plane_state->fb) {
9853                         ret = dm_check_cursor_fb(new_acrtc, new_plane_state,
9854                                                  new_plane_state->fb);
9855                         if (ret)
9856                                 return ret;
9857                 }
9858
9859                 return 0;
9860         }
9861
9862         needs_reset = should_reset_plane(state, plane, old_plane_state,
9863                                          new_plane_state);
9864
9865         /* Remove any changed/removed planes */
9866         if (!enable) {
9867                 if (!needs_reset)
9868                         return 0;
9869
9870                 if (!old_plane_crtc)
9871                         return 0;
9872
9873                 old_crtc_state = drm_atomic_get_old_crtc_state(
9874                                 state, old_plane_crtc);
9875                 dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
9876
9877                 if (!dm_old_crtc_state->stream)
9878                         return 0;
9879
9880                 DRM_DEBUG_ATOMIC("Disabling DRM plane: %d on DRM crtc %d\n",
9881                                 plane->base.id, old_plane_crtc->base.id);
9882
9883                 ret = dm_atomic_get_state(state, &dm_state);
9884                 if (ret)
9885                         return ret;
9886
9887                 if (!dc_remove_plane_from_context(
9888                                 dc,
9889                                 dm_old_crtc_state->stream,
9890                                 dm_old_plane_state->dc_state,
9891                                 dm_state->context)) {
9892
9893                         return -EINVAL;
9894                 }
9895
9896
9897                 dc_plane_state_release(dm_old_plane_state->dc_state);
9898                 dm_new_plane_state->dc_state = NULL;
9899
9900                 *lock_and_validation_needed = true;
9901
9902         } else { /* Add new planes */
9903                 struct dc_plane_state *dc_new_plane_state;
9904
9905                 if (drm_atomic_plane_disabling(plane->state, new_plane_state))
9906                         return 0;
9907
9908                 if (!new_plane_crtc)
9909                         return 0;
9910
9911                 new_crtc_state = drm_atomic_get_new_crtc_state(state, new_plane_crtc);
9912                 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
9913
9914                 if (!dm_new_crtc_state->stream)
9915                         return 0;
9916
9917                 if (!needs_reset)
9918                         return 0;
9919
9920                 ret = dm_plane_helper_check_state(new_plane_state, new_crtc_state);
9921                 if (ret)
9922                         return ret;
9923
9924                 WARN_ON(dm_new_plane_state->dc_state);
9925
9926                 dc_new_plane_state = dc_create_plane_state(dc);
9927                 if (!dc_new_plane_state)
9928                         return -ENOMEM;
9929
9930                 DRM_DEBUG_ATOMIC("Enabling DRM plane: %d on DRM crtc %d\n",
9931                                  plane->base.id, new_plane_crtc->base.id);
9932
9933                 ret = fill_dc_plane_attributes(
9934                         drm_to_adev(new_plane_crtc->dev),
9935                         dc_new_plane_state,
9936                         new_plane_state,
9937                         new_crtc_state);
9938                 if (ret) {
9939                         dc_plane_state_release(dc_new_plane_state);
9940                         return ret;
9941                 }
9942
9943                 ret = dm_atomic_get_state(state, &dm_state);
9944                 if (ret) {
9945                         dc_plane_state_release(dc_new_plane_state);
9946                         return ret;
9947                 }
9948
9949                 /*
9950                  * Any atomic check errors that occur after this will
9951                  * not need a release. The plane state will be attached
9952                  * to the stream, and therefore part of the atomic
9953                  * state. It'll be released when the atomic state is
9954                  * cleaned.
9955                  */
9956                 if (!dc_add_plane_to_context(
9957                                 dc,
9958                                 dm_new_crtc_state->stream,
9959                                 dc_new_plane_state,
9960                                 dm_state->context)) {
9961
9962                         dc_plane_state_release(dc_new_plane_state);
9963                         return -EINVAL;
9964                 }
9965
9966                 dm_new_plane_state->dc_state = dc_new_plane_state;
9967
9968                 /* Tell DC to do a full surface update every time there
9969                  * is a plane change. Inefficient, but works for now.
9970                  */
9971                 dm_new_plane_state->dc_state->update_flags.bits.full_update = 1;
9972
9973                 *lock_and_validation_needed = true;
9974         }
9975
9976
9977         return ret;
9978 }
9979
9980 static int dm_check_crtc_cursor(struct drm_atomic_state *state,
9981                                 struct drm_crtc *crtc,
9982                                 struct drm_crtc_state *new_crtc_state)
9983 {
9984         struct drm_plane_state *new_cursor_state, *new_primary_state;
9985         int cursor_scale_w, cursor_scale_h, primary_scale_w, primary_scale_h;
9986
9987         /* On DCE and DCN there is no dedicated hardware cursor plane. We get a
9988          * cursor per pipe but it's going to inherit the scaling and
9989          * positioning from the underlying pipe. Check the cursor plane's
9990          * blending properties match the primary plane's. */
9991
9992         new_cursor_state = drm_atomic_get_new_plane_state(state, crtc->cursor);
9993         new_primary_state = drm_atomic_get_new_plane_state(state, crtc->primary);
9994         if (!new_cursor_state || !new_primary_state ||
9995             !new_cursor_state->fb || !new_primary_state->fb) {
9996                 return 0;
9997         }
9998
9999         cursor_scale_w = new_cursor_state->crtc_w * 1000 /
10000                          (new_cursor_state->src_w >> 16);
10001         cursor_scale_h = new_cursor_state->crtc_h * 1000 /
10002                          (new_cursor_state->src_h >> 16);
10003
10004         primary_scale_w = new_primary_state->crtc_w * 1000 /
10005                          (new_primary_state->src_w >> 16);
10006         primary_scale_h = new_primary_state->crtc_h * 1000 /
10007                          (new_primary_state->src_h >> 16);
10008
10009         if (cursor_scale_w != primary_scale_w ||
10010             cursor_scale_h != primary_scale_h) {
10011                 DRM_DEBUG_ATOMIC("Cursor plane scaling doesn't match primary plane\n");
10012                 return -EINVAL;
10013         }
10014
10015         return 0;
10016 }
10017
10018 #if defined(CONFIG_DRM_AMD_DC_DCN)
10019 static int add_affected_mst_dsc_crtcs(struct drm_atomic_state *state, struct drm_crtc *crtc)
10020 {
10021         struct drm_connector *connector;
10022         struct drm_connector_state *conn_state;
10023         struct amdgpu_dm_connector *aconnector = NULL;
10024         int i;
10025         for_each_new_connector_in_state(state, connector, conn_state, i) {
10026                 if (conn_state->crtc != crtc)
10027                         continue;
10028
10029                 aconnector = to_amdgpu_dm_connector(connector);
10030                 if (!aconnector->port || !aconnector->mst_port)
10031                         aconnector = NULL;
10032                 else
10033                         break;
10034         }
10035
10036         if (!aconnector)
10037                 return 0;
10038
10039         return drm_dp_mst_add_affected_dsc_crtcs(state, &aconnector->mst_port->mst_mgr);
10040 }
10041 #endif
10042
10043 static int validate_overlay(struct drm_atomic_state *state)
10044 {
10045         int i;
10046         struct drm_plane *plane;
10047         struct drm_plane_state *old_plane_state, *new_plane_state;
10048         struct drm_plane_state *primary_state, *cursor_state, *overlay_state = NULL;
10049
10050         /* Check if primary plane is contained inside overlay */
10051         for_each_oldnew_plane_in_state_reverse(state, plane, old_plane_state, new_plane_state, i) {
10052                 if (plane->type == DRM_PLANE_TYPE_OVERLAY) {
10053                         if (drm_atomic_plane_disabling(plane->state, new_plane_state))
10054                                 return 0;
10055
10056                         overlay_state = new_plane_state;
10057                         continue;
10058                 }
10059         }
10060
10061         /* check if we're making changes to the overlay plane */
10062         if (!overlay_state)
10063                 return 0;
10064
10065         /* check if overlay plane is enabled */
10066         if (!overlay_state->crtc)
10067                 return 0;
10068
10069         /* find the primary plane for the CRTC that the overlay is enabled on */
10070         primary_state = drm_atomic_get_plane_state(state, overlay_state->crtc->primary);
10071         if (IS_ERR(primary_state))
10072                 return PTR_ERR(primary_state);
10073
10074         /* check if primary plane is enabled */
10075         if (!primary_state->crtc)
10076                 return 0;
10077
10078         /* check if cursor plane is enabled */
10079         cursor_state = drm_atomic_get_plane_state(state, overlay_state->crtc->cursor);
10080         if (IS_ERR(cursor_state))
10081                 return PTR_ERR(cursor_state);
10082
10083         if (drm_atomic_plane_disabling(plane->state, cursor_state))
10084                 return 0;
10085
10086         /* Perform the bounds check to ensure the overlay plane covers the primary */
10087         if (primary_state->crtc_x < overlay_state->crtc_x ||
10088             primary_state->crtc_y < overlay_state->crtc_y ||
10089             primary_state->crtc_x + primary_state->crtc_w > overlay_state->crtc_x + overlay_state->crtc_w ||
10090             primary_state->crtc_y + primary_state->crtc_h > overlay_state->crtc_y + overlay_state->crtc_h) {
10091                 DRM_DEBUG_ATOMIC("Overlay plane is enabled with hardware cursor but does not fully cover primary plane\n");
10092                 return -EINVAL;
10093         }
10094
10095         return 0;
10096 }
10097
10098 /**
10099  * amdgpu_dm_atomic_check() - Atomic check implementation for AMDgpu DM.
10100  * @dev: The DRM device
10101  * @state: The atomic state to commit
10102  *
10103  * Validate that the given atomic state is programmable by DC into hardware.
10104  * This involves constructing a &struct dc_state reflecting the new hardware
10105  * state we wish to commit, then querying DC to see if it is programmable. It's
10106  * important not to modify the existing DC state. Otherwise, atomic_check
10107  * may unexpectedly commit hardware changes.
10108  *
10109  * When validating the DC state, it's important that the right locks are
10110  * acquired. For full updates case which removes/adds/updates streams on one
10111  * CRTC while flipping on another CRTC, acquiring global lock will guarantee
10112  * that any such full update commit will wait for completion of any outstanding
10113  * flip using DRMs synchronization events.
10114  *
10115  * Note that DM adds the affected connectors for all CRTCs in state, when that
10116  * might not seem necessary. This is because DC stream creation requires the
10117  * DC sink, which is tied to the DRM connector state. Cleaning this up should
10118  * be possible but non-trivial - a possible TODO item.
10119  *
10120  * Return: -Error code if validation failed.
10121  */
10122 static int amdgpu_dm_atomic_check(struct drm_device *dev,
10123                                   struct drm_atomic_state *state)
10124 {
10125         struct amdgpu_device *adev = drm_to_adev(dev);
10126         struct dm_atomic_state *dm_state = NULL;
10127         struct dc *dc = adev->dm.dc;
10128         struct drm_connector *connector;
10129         struct drm_connector_state *old_con_state, *new_con_state;
10130         struct drm_crtc *crtc;
10131         struct drm_crtc_state *old_crtc_state, *new_crtc_state;
10132         struct drm_plane *plane;
10133         struct drm_plane_state *old_plane_state, *new_plane_state;
10134         enum dc_status status;
10135         int ret, i;
10136         bool lock_and_validation_needed = false;
10137         struct dm_crtc_state *dm_old_crtc_state;
10138
10139         trace_amdgpu_dm_atomic_check_begin(state);
10140
10141         ret = drm_atomic_helper_check_modeset(dev, state);
10142         if (ret)
10143                 goto fail;
10144
10145         /* Check connector changes */
10146         for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
10147                 struct dm_connector_state *dm_old_con_state = to_dm_connector_state(old_con_state);
10148                 struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state);
10149
10150                 /* Skip connectors that are disabled or part of modeset already. */
10151                 if (!old_con_state->crtc && !new_con_state->crtc)
10152                         continue;
10153
10154                 if (!new_con_state->crtc)
10155                         continue;
10156
10157                 new_crtc_state = drm_atomic_get_crtc_state(state, new_con_state->crtc);
10158                 if (IS_ERR(new_crtc_state)) {
10159                         ret = PTR_ERR(new_crtc_state);
10160                         goto fail;
10161                 }
10162
10163                 if (dm_old_con_state->abm_level !=
10164                     dm_new_con_state->abm_level)
10165                         new_crtc_state->connectors_changed = true;
10166         }
10167
10168 #if defined(CONFIG_DRM_AMD_DC_DCN)
10169         if (dc_resource_is_dsc_encoding_supported(dc)) {
10170                 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
10171                         if (drm_atomic_crtc_needs_modeset(new_crtc_state)) {
10172                                 ret = add_affected_mst_dsc_crtcs(state, crtc);
10173                                 if (ret)
10174                                         goto fail;
10175                         }
10176                 }
10177         }
10178 #endif
10179         for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
10180                 dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
10181
10182                 if (!drm_atomic_crtc_needs_modeset(new_crtc_state) &&
10183                     !new_crtc_state->color_mgmt_changed &&
10184                     old_crtc_state->vrr_enabled == new_crtc_state->vrr_enabled &&
10185                         dm_old_crtc_state->dsc_force_changed == false)
10186                         continue;
10187
10188                 if (!new_crtc_state->enable)
10189                         continue;
10190
10191                 ret = drm_atomic_add_affected_connectors(state, crtc);
10192                 if (ret)
10193                         return ret;
10194
10195                 ret = drm_atomic_add_affected_planes(state, crtc);
10196                 if (ret)
10197                         goto fail;
10198
10199                 if (dm_old_crtc_state->dsc_force_changed)
10200                         new_crtc_state->mode_changed = true;
10201         }
10202
10203         /*
10204          * Add all primary and overlay planes on the CRTC to the state
10205          * whenever a plane is enabled to maintain correct z-ordering
10206          * and to enable fast surface updates.
10207          */
10208         drm_for_each_crtc(crtc, dev) {
10209                 bool modified = false;
10210
10211                 for_each_oldnew_plane_in_state(state, plane, old_plane_state, new_plane_state, i) {
10212                         if (plane->type == DRM_PLANE_TYPE_CURSOR)
10213                                 continue;
10214
10215                         if (new_plane_state->crtc == crtc ||
10216                             old_plane_state->crtc == crtc) {
10217                                 modified = true;
10218                                 break;
10219                         }
10220                 }
10221
10222                 if (!modified)
10223                         continue;
10224
10225                 drm_for_each_plane_mask(plane, state->dev, crtc->state->plane_mask) {
10226                         if (plane->type == DRM_PLANE_TYPE_CURSOR)
10227                                 continue;
10228
10229                         new_plane_state =
10230                                 drm_atomic_get_plane_state(state, plane);
10231
10232                         if (IS_ERR(new_plane_state)) {
10233                                 ret = PTR_ERR(new_plane_state);
10234                                 goto fail;
10235                         }
10236                 }
10237         }
10238
10239         /* Remove exiting planes if they are modified */
10240         for_each_oldnew_plane_in_state_reverse(state, plane, old_plane_state, new_plane_state, i) {
10241                 ret = dm_update_plane_state(dc, state, plane,
10242                                             old_plane_state,
10243                                             new_plane_state,
10244                                             false,
10245                                             &lock_and_validation_needed);
10246                 if (ret)
10247                         goto fail;
10248         }
10249
10250         /* Disable all crtcs which require disable */
10251         for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
10252                 ret = dm_update_crtc_state(&adev->dm, state, crtc,
10253                                            old_crtc_state,
10254                                            new_crtc_state,
10255                                            false,
10256                                            &lock_and_validation_needed);
10257                 if (ret)
10258                         goto fail;
10259         }
10260
10261         /* Enable all crtcs which require enable */
10262         for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
10263                 ret = dm_update_crtc_state(&adev->dm, state, crtc,
10264                                            old_crtc_state,
10265                                            new_crtc_state,
10266                                            true,
10267                                            &lock_and_validation_needed);
10268                 if (ret)
10269                         goto fail;
10270         }
10271
10272         ret = validate_overlay(state);
10273         if (ret)
10274                 goto fail;
10275
10276         /* Add new/modified planes */
10277         for_each_oldnew_plane_in_state_reverse(state, plane, old_plane_state, new_plane_state, i) {
10278                 ret = dm_update_plane_state(dc, state, plane,
10279                                             old_plane_state,
10280                                             new_plane_state,
10281                                             true,
10282                                             &lock_and_validation_needed);
10283                 if (ret)
10284                         goto fail;
10285         }
10286
10287         /* Run this here since we want to validate the streams we created */
10288         ret = drm_atomic_helper_check_planes(dev, state);
10289         if (ret)
10290                 goto fail;
10291
10292         /* Check cursor planes scaling */
10293         for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) {
10294                 ret = dm_check_crtc_cursor(state, crtc, new_crtc_state);
10295                 if (ret)
10296                         goto fail;
10297         }
10298
10299         if (state->legacy_cursor_update) {
10300                 /*
10301                  * This is a fast cursor update coming from the plane update
10302                  * helper, check if it can be done asynchronously for better
10303                  * performance.
10304                  */
10305                 state->async_update =
10306                         !drm_atomic_helper_async_check(dev, state);
10307
10308                 /*
10309                  * Skip the remaining global validation if this is an async
10310                  * update. Cursor updates can be done without affecting
10311                  * state or bandwidth calcs and this avoids the performance
10312                  * penalty of locking the private state object and
10313                  * allocating a new dc_state.
10314                  */
10315                 if (state->async_update)
10316                         return 0;
10317         }
10318
10319         /* Check scaling and underscan changes*/
10320         /* TODO Removed scaling changes validation due to inability to commit
10321          * new stream into context w\o causing full reset. Need to
10322          * decide how to handle.
10323          */
10324         for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
10325                 struct dm_connector_state *dm_old_con_state = to_dm_connector_state(old_con_state);
10326                 struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state);
10327                 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(dm_new_con_state->base.crtc);
10328
10329                 /* Skip any modesets/resets */
10330                 if (!acrtc || drm_atomic_crtc_needs_modeset(
10331                                 drm_atomic_get_new_crtc_state(state, &acrtc->base)))
10332                         continue;
10333
10334                 /* Skip any thing not scale or underscan changes */
10335                 if (!is_scaling_state_different(dm_new_con_state, dm_old_con_state))
10336                         continue;
10337
10338                 lock_and_validation_needed = true;
10339         }
10340
10341         /**
10342          * Streams and planes are reset when there are changes that affect
10343          * bandwidth. Anything that affects bandwidth needs to go through
10344          * DC global validation to ensure that the configuration can be applied
10345          * to hardware.
10346          *
10347          * We have to currently stall out here in atomic_check for outstanding
10348          * commits to finish in this case because our IRQ handlers reference
10349          * DRM state directly - we can end up disabling interrupts too early
10350          * if we don't.
10351          *
10352          * TODO: Remove this stall and drop DM state private objects.
10353          */
10354         if (lock_and_validation_needed) {
10355                 ret = dm_atomic_get_state(state, &dm_state);
10356                 if (ret)
10357                         goto fail;
10358
10359                 ret = do_aquire_global_lock(dev, state);
10360                 if (ret)
10361                         goto fail;
10362
10363 #if defined(CONFIG_DRM_AMD_DC_DCN)
10364                 if (!compute_mst_dsc_configs_for_state(state, dm_state->context))
10365                         goto fail;
10366
10367                 ret = dm_update_mst_vcpi_slots_for_dsc(state, dm_state->context);
10368                 if (ret)
10369                         goto fail;
10370 #endif
10371
10372                 /*
10373                  * Perform validation of MST topology in the state:
10374                  * We need to perform MST atomic check before calling
10375                  * dc_validate_global_state(), or there is a chance
10376                  * to get stuck in an infinite loop and hang eventually.
10377                  */
10378                 ret = drm_dp_mst_atomic_check(state);
10379                 if (ret)
10380                         goto fail;
10381                 status = dc_validate_global_state(dc, dm_state->context, false);
10382                 if (status != DC_OK) {
10383                         DC_LOG_WARNING("DC global validation failure: %s (%d)",
10384                                        dc_status_to_str(status), status);
10385                         ret = -EINVAL;
10386                         goto fail;
10387                 }
10388         } else {
10389                 /*
10390                  * The commit is a fast update. Fast updates shouldn't change
10391                  * the DC context, affect global validation, and can have their
10392                  * commit work done in parallel with other commits not touching
10393                  * the same resource. If we have a new DC context as part of
10394                  * the DM atomic state from validation we need to free it and
10395                  * retain the existing one instead.
10396                  *
10397                  * Furthermore, since the DM atomic state only contains the DC
10398                  * context and can safely be annulled, we can free the state
10399                  * and clear the associated private object now to free
10400                  * some memory and avoid a possible use-after-free later.
10401                  */
10402
10403                 for (i = 0; i < state->num_private_objs; i++) {
10404                         struct drm_private_obj *obj = state->private_objs[i].ptr;
10405
10406                         if (obj->funcs == adev->dm.atomic_obj.funcs) {
10407                                 int j = state->num_private_objs-1;
10408
10409                                 dm_atomic_destroy_state(obj,
10410                                                 state->private_objs[i].state);
10411
10412                                 /* If i is not at the end of the array then the
10413                                  * last element needs to be moved to where i was
10414                                  * before the array can safely be truncated.
10415                                  */
10416                                 if (i != j)
10417                                         state->private_objs[i] =
10418                                                 state->private_objs[j];
10419
10420                                 state->private_objs[j].ptr = NULL;
10421                                 state->private_objs[j].state = NULL;
10422                                 state->private_objs[j].old_state = NULL;
10423                                 state->private_objs[j].new_state = NULL;
10424
10425                                 state->num_private_objs = j;
10426                                 break;
10427                         }
10428                 }
10429         }
10430
10431         /* Store the overall update type for use later in atomic check. */
10432         for_each_new_crtc_in_state (state, crtc, new_crtc_state, i) {
10433                 struct dm_crtc_state *dm_new_crtc_state =
10434                         to_dm_crtc_state(new_crtc_state);
10435
10436                 dm_new_crtc_state->update_type = lock_and_validation_needed ?
10437                                                          UPDATE_TYPE_FULL :
10438                                                          UPDATE_TYPE_FAST;
10439         }
10440
10441         /* Must be success */
10442         WARN_ON(ret);
10443
10444         trace_amdgpu_dm_atomic_check_finish(state, ret);
10445
10446         return ret;
10447
10448 fail:
10449         if (ret == -EDEADLK)
10450                 DRM_DEBUG_DRIVER("Atomic check stopped to avoid deadlock.\n");
10451         else if (ret == -EINTR || ret == -EAGAIN || ret == -ERESTARTSYS)
10452                 DRM_DEBUG_DRIVER("Atomic check stopped due to signal.\n");
10453         else
10454                 DRM_DEBUG_DRIVER("Atomic check failed with err: %d \n", ret);
10455
10456         trace_amdgpu_dm_atomic_check_finish(state, ret);
10457
10458         return ret;
10459 }
10460
10461 static bool is_dp_capable_without_timing_msa(struct dc *dc,
10462                                              struct amdgpu_dm_connector *amdgpu_dm_connector)
10463 {
10464         uint8_t dpcd_data;
10465         bool capable = false;
10466
10467         if (amdgpu_dm_connector->dc_link &&
10468                 dm_helpers_dp_read_dpcd(
10469                                 NULL,
10470                                 amdgpu_dm_connector->dc_link,
10471                                 DP_DOWN_STREAM_PORT_COUNT,
10472                                 &dpcd_data,
10473                                 sizeof(dpcd_data))) {
10474                 capable = (dpcd_data & DP_MSA_TIMING_PAR_IGNORED) ? true:false;
10475         }
10476
10477         return capable;
10478 }
10479
10480 static bool parse_edid_cea(struct amdgpu_dm_connector *aconnector,
10481                 uint8_t *edid_ext, int len,
10482                 struct amdgpu_hdmi_vsdb_info *vsdb_info)
10483 {
10484         int i;
10485         struct amdgpu_device *adev = drm_to_adev(aconnector->base.dev);
10486         struct dc *dc = adev->dm.dc;
10487
10488         /* send extension block to DMCU for parsing */
10489         for (i = 0; i < len; i += 8) {
10490                 bool res;
10491                 int offset;
10492
10493                 /* send 8 bytes a time */
10494                 if (!dc_edid_parser_send_cea(dc, i, len, &edid_ext[i], 8))
10495                         return false;
10496
10497                 if (i+8 == len) {
10498                         /* EDID block sent completed, expect result */
10499                         int version, min_rate, max_rate;
10500
10501                         res = dc_edid_parser_recv_amd_vsdb(dc, &version, &min_rate, &max_rate);
10502                         if (res) {
10503                                 /* amd vsdb found */
10504                                 vsdb_info->freesync_supported = 1;
10505                                 vsdb_info->amd_vsdb_version = version;
10506                                 vsdb_info->min_refresh_rate_hz = min_rate;
10507                                 vsdb_info->max_refresh_rate_hz = max_rate;
10508                                 return true;
10509                         }
10510                         /* not amd vsdb */
10511                         return false;
10512                 }
10513
10514                 /* check for ack*/
10515                 res = dc_edid_parser_recv_cea_ack(dc, &offset);
10516                 if (!res)
10517                         return false;
10518         }
10519
10520         return false;
10521 }
10522
10523 static int parse_hdmi_amd_vsdb(struct amdgpu_dm_connector *aconnector,
10524                 struct edid *edid, struct amdgpu_hdmi_vsdb_info *vsdb_info)
10525 {
10526         uint8_t *edid_ext = NULL;
10527         int i;
10528         bool valid_vsdb_found = false;
10529
10530         /*----- drm_find_cea_extension() -----*/
10531         /* No EDID or EDID extensions */
10532         if (edid == NULL || edid->extensions == 0)
10533                 return -ENODEV;
10534
10535         /* Find CEA extension */
10536         for (i = 0; i < edid->extensions; i++) {
10537                 edid_ext = (uint8_t *)edid + EDID_LENGTH * (i + 1);
10538                 if (edid_ext[0] == CEA_EXT)
10539                         break;
10540         }
10541
10542         if (i == edid->extensions)
10543                 return -ENODEV;
10544
10545         /*----- cea_db_offsets() -----*/
10546         if (edid_ext[0] != CEA_EXT)
10547                 return -ENODEV;
10548
10549         valid_vsdb_found = parse_edid_cea(aconnector, edid_ext, EDID_LENGTH, vsdb_info);
10550
10551         return valid_vsdb_found ? i : -ENODEV;
10552 }
10553
10554 void amdgpu_dm_update_freesync_caps(struct drm_connector *connector,
10555                                         struct edid *edid)
10556 {
10557         int i = 0;
10558         struct detailed_timing *timing;
10559         struct detailed_non_pixel *data;
10560         struct detailed_data_monitor_range *range;
10561         struct amdgpu_dm_connector *amdgpu_dm_connector =
10562                         to_amdgpu_dm_connector(connector);
10563         struct dm_connector_state *dm_con_state = NULL;
10564
10565         struct drm_device *dev = connector->dev;
10566         struct amdgpu_device *adev = drm_to_adev(dev);
10567         bool freesync_capable = false;
10568         struct amdgpu_hdmi_vsdb_info vsdb_info = {0};
10569
10570         if (!connector->state) {
10571                 DRM_ERROR("%s - Connector has no state", __func__);
10572                 goto update;
10573         }
10574
10575         if (!edid) {
10576                 dm_con_state = to_dm_connector_state(connector->state);
10577
10578                 amdgpu_dm_connector->min_vfreq = 0;
10579                 amdgpu_dm_connector->max_vfreq = 0;
10580                 amdgpu_dm_connector->pixel_clock_mhz = 0;
10581
10582                 goto update;
10583         }
10584
10585         dm_con_state = to_dm_connector_state(connector->state);
10586
10587         if (!amdgpu_dm_connector->dc_sink) {
10588                 DRM_ERROR("dc_sink NULL, could not add free_sync module.\n");
10589                 goto update;
10590         }
10591         if (!adev->dm.freesync_module)
10592                 goto update;
10593
10594
10595         if (amdgpu_dm_connector->dc_sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT
10596                 || amdgpu_dm_connector->dc_sink->sink_signal == SIGNAL_TYPE_EDP) {
10597                 bool edid_check_required = false;
10598
10599                 if (edid) {
10600                         edid_check_required = is_dp_capable_without_timing_msa(
10601                                                 adev->dm.dc,
10602                                                 amdgpu_dm_connector);
10603                 }
10604
10605                 if (edid_check_required == true && (edid->version > 1 ||
10606                    (edid->version == 1 && edid->revision > 1))) {
10607                         for (i = 0; i < 4; i++) {
10608
10609                                 timing  = &edid->detailed_timings[i];
10610                                 data    = &timing->data.other_data;
10611                                 range   = &data->data.range;
10612                                 /*
10613                                  * Check if monitor has continuous frequency mode
10614                                  */
10615                                 if (data->type != EDID_DETAIL_MONITOR_RANGE)
10616                                         continue;
10617                                 /*
10618                                  * Check for flag range limits only. If flag == 1 then
10619                                  * no additional timing information provided.
10620                                  * Default GTF, GTF Secondary curve and CVT are not
10621                                  * supported
10622                                  */
10623                                 if (range->flags != 1)
10624                                         continue;
10625
10626                                 amdgpu_dm_connector->min_vfreq = range->min_vfreq;
10627                                 amdgpu_dm_connector->max_vfreq = range->max_vfreq;
10628                                 amdgpu_dm_connector->pixel_clock_mhz =
10629                                         range->pixel_clock_mhz * 10;
10630
10631                                 connector->display_info.monitor_range.min_vfreq = range->min_vfreq;
10632                                 connector->display_info.monitor_range.max_vfreq = range->max_vfreq;
10633
10634                                 break;
10635                         }
10636
10637                         if (amdgpu_dm_connector->max_vfreq -
10638                             amdgpu_dm_connector->min_vfreq > 10) {
10639
10640                                 freesync_capable = true;
10641                         }
10642                 }
10643         } else if (edid && amdgpu_dm_connector->dc_sink->sink_signal == SIGNAL_TYPE_HDMI_TYPE_A) {
10644                 i = parse_hdmi_amd_vsdb(amdgpu_dm_connector, edid, &vsdb_info);
10645                 if (i >= 0 && vsdb_info.freesync_supported) {
10646                         timing  = &edid->detailed_timings[i];
10647                         data    = &timing->data.other_data;
10648
10649                         amdgpu_dm_connector->min_vfreq = vsdb_info.min_refresh_rate_hz;
10650                         amdgpu_dm_connector->max_vfreq = vsdb_info.max_refresh_rate_hz;
10651                         if (amdgpu_dm_connector->max_vfreq - amdgpu_dm_connector->min_vfreq > 10)
10652                                 freesync_capable = true;
10653
10654                         connector->display_info.monitor_range.min_vfreq = vsdb_info.min_refresh_rate_hz;
10655                         connector->display_info.monitor_range.max_vfreq = vsdb_info.max_refresh_rate_hz;
10656                 }
10657         }
10658
10659 update:
10660         if (dm_con_state)
10661                 dm_con_state->freesync_capable = freesync_capable;
10662
10663         if (connector->vrr_capable_property)
10664                 drm_connector_set_vrr_capable_property(connector,
10665                                                        freesync_capable);
10666 }
10667
10668 static void amdgpu_dm_set_psr_caps(struct dc_link *link)
10669 {
10670         uint8_t dpcd_data[EDP_PSR_RECEIVER_CAP_SIZE];
10671
10672         if (!(link->connector_signal & SIGNAL_TYPE_EDP))
10673                 return;
10674         if (link->type == dc_connection_none)
10675                 return;
10676         if (dm_helpers_dp_read_dpcd(NULL, link, DP_PSR_SUPPORT,
10677                                         dpcd_data, sizeof(dpcd_data))) {
10678                 link->dpcd_caps.psr_caps.psr_version = dpcd_data[0];
10679
10680                 if (dpcd_data[0] == 0) {
10681                         link->psr_settings.psr_version = DC_PSR_VERSION_UNSUPPORTED;
10682                         link->psr_settings.psr_feature_enabled = false;
10683                 } else {
10684                         link->psr_settings.psr_version = DC_PSR_VERSION_1;
10685                         link->psr_settings.psr_feature_enabled = true;
10686                 }
10687
10688                 DRM_INFO("PSR support:%d\n", link->psr_settings.psr_feature_enabled);
10689         }
10690 }
10691
10692 /*
10693  * amdgpu_dm_link_setup_psr() - configure psr link
10694  * @stream: stream state
10695  *
10696  * Return: true if success
10697  */
10698 static bool amdgpu_dm_link_setup_psr(struct dc_stream_state *stream)
10699 {
10700         struct dc_link *link = NULL;
10701         struct psr_config psr_config = {0};
10702         struct psr_context psr_context = {0};
10703         bool ret = false;
10704
10705         if (stream == NULL)
10706                 return false;
10707
10708         link = stream->link;
10709
10710         psr_config.psr_version = link->dpcd_caps.psr_caps.psr_version;
10711
10712         if (psr_config.psr_version > 0) {
10713                 psr_config.psr_exit_link_training_required = 0x1;
10714                 psr_config.psr_frame_capture_indication_req = 0;
10715                 psr_config.psr_rfb_setup_time = 0x37;
10716                 psr_config.psr_sdp_transmit_line_num_deadline = 0x20;
10717                 psr_config.allow_smu_optimizations = 0x0;
10718
10719                 ret = dc_link_setup_psr(link, stream, &psr_config, &psr_context);
10720
10721         }
10722         DRM_DEBUG_DRIVER("PSR link: %d\n",      link->psr_settings.psr_feature_enabled);
10723
10724         return ret;
10725 }
10726
10727 /*
10728  * amdgpu_dm_psr_enable() - enable psr f/w
10729  * @stream: stream state
10730  *
10731  * Return: true if success
10732  */
10733 bool amdgpu_dm_psr_enable(struct dc_stream_state *stream)
10734 {
10735         struct dc_link *link = stream->link;
10736         unsigned int vsync_rate_hz = 0;
10737         struct dc_static_screen_params params = {0};
10738         /* Calculate number of static frames before generating interrupt to
10739          * enter PSR.
10740          */
10741         // Init fail safe of 2 frames static
10742         unsigned int num_frames_static = 2;
10743
10744         DRM_DEBUG_DRIVER("Enabling psr...\n");
10745
10746         vsync_rate_hz = div64_u64(div64_u64((
10747                         stream->timing.pix_clk_100hz * 100),
10748                         stream->timing.v_total),
10749                         stream->timing.h_total);
10750
10751         /* Round up
10752          * Calculate number of frames such that at least 30 ms of time has
10753          * passed.
10754          */
10755         if (vsync_rate_hz != 0) {
10756                 unsigned int frame_time_microsec = 1000000 / vsync_rate_hz;
10757                 num_frames_static = (30000 / frame_time_microsec) + 1;
10758         }
10759
10760         params.triggers.cursor_update = true;
10761         params.triggers.overlay_update = true;
10762         params.triggers.surface_update = true;
10763         params.num_frames = num_frames_static;
10764
10765         dc_stream_set_static_screen_params(link->ctx->dc,
10766                                            &stream, 1,
10767                                            &params);
10768
10769         return dc_link_set_psr_allow_active(link, true, false, false);
10770 }
10771
10772 /*
10773  * amdgpu_dm_psr_disable() - disable psr f/w
10774  * @stream:  stream state
10775  *
10776  * Return: true if success
10777  */
10778 static bool amdgpu_dm_psr_disable(struct dc_stream_state *stream)
10779 {
10780
10781         DRM_DEBUG_DRIVER("Disabling psr...\n");
10782
10783         return dc_link_set_psr_allow_active(stream->link, false, true, false);
10784 }
10785
10786 /*
10787  * amdgpu_dm_psr_disable() - disable psr f/w
10788  * if psr is enabled on any stream
10789  *
10790  * Return: true if success
10791  */
10792 static bool amdgpu_dm_psr_disable_all(struct amdgpu_display_manager *dm)
10793 {
10794         DRM_DEBUG_DRIVER("Disabling psr if psr is enabled on any stream\n");
10795         return dc_set_psr_allow_active(dm->dc, false);
10796 }
10797
10798 void amdgpu_dm_trigger_timing_sync(struct drm_device *dev)
10799 {
10800         struct amdgpu_device *adev = drm_to_adev(dev);
10801         struct dc *dc = adev->dm.dc;
10802         int i;
10803
10804         mutex_lock(&adev->dm.dc_lock);
10805         if (dc->current_state) {
10806                 for (i = 0; i < dc->current_state->stream_count; ++i)
10807                         dc->current_state->streams[i]
10808                                 ->triggered_crtc_reset.enabled =
10809                                 adev->dm.force_timing_sync;
10810
10811                 dm_enable_per_frame_crtc_master_sync(dc->current_state);
10812                 dc_trigger_sync(dc, dc->current_state);
10813         }
10814         mutex_unlock(&adev->dm.dc_lock);
10815 }
10816
10817 void dm_write_reg_func(const struct dc_context *ctx, uint32_t address,
10818                        uint32_t value, const char *func_name)
10819 {
10820 #ifdef DM_CHECK_ADDR_0
10821         if (address == 0) {
10822                 DC_ERR("invalid register write. address = 0");
10823                 return;
10824         }
10825 #endif
10826         cgs_write_register(ctx->cgs_device, address, value);
10827         trace_amdgpu_dc_wreg(&ctx->perf_trace->write_count, address, value);
10828 }
10829
10830 uint32_t dm_read_reg_func(const struct dc_context *ctx, uint32_t address,
10831                           const char *func_name)
10832 {
10833         uint32_t value;
10834 #ifdef DM_CHECK_ADDR_0
10835         if (address == 0) {
10836                 DC_ERR("invalid register read; address = 0\n");
10837                 return 0;
10838         }
10839 #endif
10840
10841         if (ctx->dmub_srv &&
10842             ctx->dmub_srv->reg_helper_offload.gather_in_progress &&
10843             !ctx->dmub_srv->reg_helper_offload.should_burst_write) {
10844                 ASSERT(false);
10845                 return 0;
10846         }
10847
10848         value = cgs_read_register(ctx->cgs_device, address);
10849
10850         trace_amdgpu_dc_rreg(&ctx->perf_trace->read_count, address, value);
10851
10852         return value;
10853 }
10854
10855 int amdgpu_dm_process_dmub_aux_transfer_sync(struct dc_context *ctx, unsigned int linkIndex,
10856                                 struct aux_payload *payload, enum aux_return_code_type *operation_result)
10857 {
10858         struct amdgpu_device *adev = ctx->driver_context;
10859         int ret = 0;
10860
10861         dc_process_dmub_aux_transfer_async(ctx->dc, linkIndex, payload);
10862         ret = wait_for_completion_interruptible_timeout(&adev->dm.dmub_aux_transfer_done, 10*HZ);
10863         if (ret == 0) {
10864                 *operation_result = AUX_RET_ERROR_TIMEOUT;
10865                 return -1;
10866         }
10867         *operation_result = (enum aux_return_code_type)adev->dm.dmub_notify->result;
10868
10869         if (adev->dm.dmub_notify->result == AUX_RET_SUCCESS) {
10870                 (*payload->reply) = adev->dm.dmub_notify->aux_reply.command;
10871
10872                 // For read case, Copy data to payload
10873                 if (!payload->write && adev->dm.dmub_notify->aux_reply.length &&
10874                 (*payload->reply == AUX_TRANSACTION_REPLY_AUX_ACK))
10875                         memcpy(payload->data, adev->dm.dmub_notify->aux_reply.data,
10876                         adev->dm.dmub_notify->aux_reply.length);
10877         }
10878
10879         return adev->dm.dmub_notify->aux_reply.length;
10880 }
This page took 0.711535 seconds and 4 git commands to generate.