]> Git Repo - J-linux.git/blob - drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
drm/amdgpu/display: fix build when CONFIG_DRM_AMD_DC_DCN is not set
[J-linux.git] / drivers / gpu / drm / amd / display / amdgpu_dm / amdgpu_dm.c
1 /*
2  * Copyright 2015 Advanced Micro Devices, Inc.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20  * OTHER DEALINGS IN THE SOFTWARE.
21  *
22  * Authors: AMD
23  *
24  */
25
26 /* The caprices of the preprocessor require that this be declared right here */
27 #define CREATE_TRACE_POINTS
28
29 #include "dm_services_types.h"
30 #include "dc.h"
31 #include "dc_link_dp.h"
32 #include "link_enc_cfg.h"
33 #include "dc/inc/core_types.h"
34 #include "dal_asic_id.h"
35 #include "dmub/dmub_srv.h"
36 #include "dc/inc/hw/dmcu.h"
37 #include "dc/inc/hw/abm.h"
38 #include "dc/dc_dmub_srv.h"
39 #include "dc/dc_edid_parser.h"
40 #include "dc/dc_stat.h"
41 #include "amdgpu_dm_trace.h"
42
43 #include "vid.h"
44 #include "amdgpu.h"
45 #include "amdgpu_display.h"
46 #include "amdgpu_ucode.h"
47 #include "atom.h"
48 #include "amdgpu_dm.h"
49 #ifdef CONFIG_DRM_AMD_DC_HDCP
50 #include "amdgpu_dm_hdcp.h"
51 #include <drm/drm_hdcp.h>
52 #endif
53 #include "amdgpu_pm.h"
54
55 #include "amd_shared.h"
56 #include "amdgpu_dm_irq.h"
57 #include "dm_helpers.h"
58 #include "amdgpu_dm_mst_types.h"
59 #if defined(CONFIG_DEBUG_FS)
60 #include "amdgpu_dm_debugfs.h"
61 #endif
62 #include "amdgpu_dm_psr.h"
63
64 #include "ivsrcid/ivsrcid_vislands30.h"
65
66 #include "i2caux_interface.h"
67 #include <linux/module.h>
68 #include <linux/moduleparam.h>
69 #include <linux/types.h>
70 #include <linux/pm_runtime.h>
71 #include <linux/pci.h>
72 #include <linux/firmware.h>
73 #include <linux/component.h>
74
75 #include <drm/drm_atomic.h>
76 #include <drm/drm_atomic_uapi.h>
77 #include <drm/drm_atomic_helper.h>
78 #include <drm/drm_dp_mst_helper.h>
79 #include <drm/drm_fb_helper.h>
80 #include <drm/drm_fourcc.h>
81 #include <drm/drm_edid.h>
82 #include <drm/drm_vblank.h>
83 #include <drm/drm_audio_component.h>
84
85 #if defined(CONFIG_DRM_AMD_DC_DCN)
86 #include "ivsrcid/dcn/irqsrcs_dcn_1_0.h"
87
88 #include "dcn/dcn_1_0_offset.h"
89 #include "dcn/dcn_1_0_sh_mask.h"
90 #include "soc15_hw_ip.h"
91 #include "vega10_ip_offset.h"
92
93 #include "soc15_common.h"
94 #endif
95
96 #include "modules/inc/mod_freesync.h"
97 #include "modules/power/power_helpers.h"
98 #include "modules/inc/mod_info_packet.h"
99
100 #define FIRMWARE_RENOIR_DMUB "amdgpu/renoir_dmcub.bin"
101 MODULE_FIRMWARE(FIRMWARE_RENOIR_DMUB);
102 #define FIRMWARE_SIENNA_CICHLID_DMUB "amdgpu/sienna_cichlid_dmcub.bin"
103 MODULE_FIRMWARE(FIRMWARE_SIENNA_CICHLID_DMUB);
104 #define FIRMWARE_NAVY_FLOUNDER_DMUB "amdgpu/navy_flounder_dmcub.bin"
105 MODULE_FIRMWARE(FIRMWARE_NAVY_FLOUNDER_DMUB);
106 #define FIRMWARE_GREEN_SARDINE_DMUB "amdgpu/green_sardine_dmcub.bin"
107 MODULE_FIRMWARE(FIRMWARE_GREEN_SARDINE_DMUB);
108 #define FIRMWARE_VANGOGH_DMUB "amdgpu/vangogh_dmcub.bin"
109 MODULE_FIRMWARE(FIRMWARE_VANGOGH_DMUB);
110 #define FIRMWARE_DIMGREY_CAVEFISH_DMUB "amdgpu/dimgrey_cavefish_dmcub.bin"
111 MODULE_FIRMWARE(FIRMWARE_DIMGREY_CAVEFISH_DMUB);
112 #define FIRMWARE_BEIGE_GOBY_DMUB "amdgpu/beige_goby_dmcub.bin"
113 MODULE_FIRMWARE(FIRMWARE_BEIGE_GOBY_DMUB);
114 #define FIRMWARE_YELLOW_CARP_DMUB "amdgpu/yellow_carp_dmcub.bin"
115 MODULE_FIRMWARE(FIRMWARE_YELLOW_CARP_DMUB);
116
117 #define FIRMWARE_RAVEN_DMCU             "amdgpu/raven_dmcu.bin"
118 MODULE_FIRMWARE(FIRMWARE_RAVEN_DMCU);
119
120 #define FIRMWARE_NAVI12_DMCU            "amdgpu/navi12_dmcu.bin"
121 MODULE_FIRMWARE(FIRMWARE_NAVI12_DMCU);
122
123 /* Number of bytes in PSP header for firmware. */
124 #define PSP_HEADER_BYTES 0x100
125
126 /* Number of bytes in PSP footer for firmware. */
127 #define PSP_FOOTER_BYTES 0x100
128
129 /**
130  * DOC: overview
131  *
132  * The AMDgpu display manager, **amdgpu_dm** (or even simpler,
133  * **dm**) sits between DRM and DC. It acts as a liaison, converting DRM
134  * requests into DC requests, and DC responses into DRM responses.
135  *
136  * The root control structure is &struct amdgpu_display_manager.
137  */
138
139 /* basic init/fini API */
140 static int amdgpu_dm_init(struct amdgpu_device *adev);
141 static void amdgpu_dm_fini(struct amdgpu_device *adev);
142 static bool is_freesync_video_mode(const struct drm_display_mode *mode, struct amdgpu_dm_connector *aconnector);
143
144 static enum drm_mode_subconnector get_subconnector_type(struct dc_link *link)
145 {
146         switch (link->dpcd_caps.dongle_type) {
147         case DISPLAY_DONGLE_NONE:
148                 return DRM_MODE_SUBCONNECTOR_Native;
149         case DISPLAY_DONGLE_DP_VGA_CONVERTER:
150                 return DRM_MODE_SUBCONNECTOR_VGA;
151         case DISPLAY_DONGLE_DP_DVI_CONVERTER:
152         case DISPLAY_DONGLE_DP_DVI_DONGLE:
153                 return DRM_MODE_SUBCONNECTOR_DVID;
154         case DISPLAY_DONGLE_DP_HDMI_CONVERTER:
155         case DISPLAY_DONGLE_DP_HDMI_DONGLE:
156                 return DRM_MODE_SUBCONNECTOR_HDMIA;
157         case DISPLAY_DONGLE_DP_HDMI_MISMATCHED_DONGLE:
158         default:
159                 return DRM_MODE_SUBCONNECTOR_Unknown;
160         }
161 }
162
163 static void update_subconnector_property(struct amdgpu_dm_connector *aconnector)
164 {
165         struct dc_link *link = aconnector->dc_link;
166         struct drm_connector *connector = &aconnector->base;
167         enum drm_mode_subconnector subconnector = DRM_MODE_SUBCONNECTOR_Unknown;
168
169         if (connector->connector_type != DRM_MODE_CONNECTOR_DisplayPort)
170                 return;
171
172         if (aconnector->dc_sink)
173                 subconnector = get_subconnector_type(link);
174
175         drm_object_property_set_value(&connector->base,
176                         connector->dev->mode_config.dp_subconnector_property,
177                         subconnector);
178 }
179
180 /*
181  * initializes drm_device display related structures, based on the information
182  * provided by DAL. The drm strcutures are: drm_crtc, drm_connector,
183  * drm_encoder, drm_mode_config
184  *
185  * Returns 0 on success
186  */
187 static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev);
188 /* removes and deallocates the drm structures, created by the above function */
189 static void amdgpu_dm_destroy_drm_device(struct amdgpu_display_manager *dm);
190
191 static int amdgpu_dm_plane_init(struct amdgpu_display_manager *dm,
192                                 struct drm_plane *plane,
193                                 unsigned long possible_crtcs,
194                                 const struct dc_plane_cap *plane_cap);
195 static int amdgpu_dm_crtc_init(struct amdgpu_display_manager *dm,
196                                struct drm_plane *plane,
197                                uint32_t link_index);
198 static int amdgpu_dm_connector_init(struct amdgpu_display_manager *dm,
199                                     struct amdgpu_dm_connector *amdgpu_dm_connector,
200                                     uint32_t link_index,
201                                     struct amdgpu_encoder *amdgpu_encoder);
202 static int amdgpu_dm_encoder_init(struct drm_device *dev,
203                                   struct amdgpu_encoder *aencoder,
204                                   uint32_t link_index);
205
206 static int amdgpu_dm_connector_get_modes(struct drm_connector *connector);
207
208 static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state);
209
210 static int amdgpu_dm_atomic_check(struct drm_device *dev,
211                                   struct drm_atomic_state *state);
212
213 static void handle_cursor_update(struct drm_plane *plane,
214                                  struct drm_plane_state *old_plane_state);
215
216 static const struct drm_format_info *
217 amd_get_format_info(const struct drm_mode_fb_cmd2 *cmd);
218
219 static void handle_hpd_irq_helper(struct amdgpu_dm_connector *aconnector);
220
221 static bool
222 is_timing_unchanged_for_freesync(struct drm_crtc_state *old_crtc_state,
223                                  struct drm_crtc_state *new_crtc_state);
224 /*
225  * dm_vblank_get_counter
226  *
227  * @brief
228  * Get counter for number of vertical blanks
229  *
230  * @param
231  * struct amdgpu_device *adev - [in] desired amdgpu device
232  * int disp_idx - [in] which CRTC to get the counter from
233  *
234  * @return
235  * Counter for vertical blanks
236  */
237 static u32 dm_vblank_get_counter(struct amdgpu_device *adev, int crtc)
238 {
239         if (crtc >= adev->mode_info.num_crtc)
240                 return 0;
241         else {
242                 struct amdgpu_crtc *acrtc = adev->mode_info.crtcs[crtc];
243
244                 if (acrtc->dm_irq_params.stream == NULL) {
245                         DRM_ERROR("dc_stream_state is NULL for crtc '%d'!\n",
246                                   crtc);
247                         return 0;
248                 }
249
250                 return dc_stream_get_vblank_counter(acrtc->dm_irq_params.stream);
251         }
252 }
253
254 static int dm_crtc_get_scanoutpos(struct amdgpu_device *adev, int crtc,
255                                   u32 *vbl, u32 *position)
256 {
257         uint32_t v_blank_start, v_blank_end, h_position, v_position;
258
259         if ((crtc < 0) || (crtc >= adev->mode_info.num_crtc))
260                 return -EINVAL;
261         else {
262                 struct amdgpu_crtc *acrtc = adev->mode_info.crtcs[crtc];
263
264                 if (acrtc->dm_irq_params.stream ==  NULL) {
265                         DRM_ERROR("dc_stream_state is NULL for crtc '%d'!\n",
266                                   crtc);
267                         return 0;
268                 }
269
270                 /*
271                  * TODO rework base driver to use values directly.
272                  * for now parse it back into reg-format
273                  */
274                 dc_stream_get_scanoutpos(acrtc->dm_irq_params.stream,
275                                          &v_blank_start,
276                                          &v_blank_end,
277                                          &h_position,
278                                          &v_position);
279
280                 *position = v_position | (h_position << 16);
281                 *vbl = v_blank_start | (v_blank_end << 16);
282         }
283
284         return 0;
285 }
286
287 static bool dm_is_idle(void *handle)
288 {
289         /* XXX todo */
290         return true;
291 }
292
293 static int dm_wait_for_idle(void *handle)
294 {
295         /* XXX todo */
296         return 0;
297 }
298
299 static bool dm_check_soft_reset(void *handle)
300 {
301         return false;
302 }
303
304 static int dm_soft_reset(void *handle)
305 {
306         /* XXX todo */
307         return 0;
308 }
309
310 static struct amdgpu_crtc *
311 get_crtc_by_otg_inst(struct amdgpu_device *adev,
312                      int otg_inst)
313 {
314         struct drm_device *dev = adev_to_drm(adev);
315         struct drm_crtc *crtc;
316         struct amdgpu_crtc *amdgpu_crtc;
317
318         if (WARN_ON(otg_inst == -1))
319                 return adev->mode_info.crtcs[0];
320
321         list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
322                 amdgpu_crtc = to_amdgpu_crtc(crtc);
323
324                 if (amdgpu_crtc->otg_inst == otg_inst)
325                         return amdgpu_crtc;
326         }
327
328         return NULL;
329 }
330
331 static inline bool amdgpu_dm_vrr_active_irq(struct amdgpu_crtc *acrtc)
332 {
333         return acrtc->dm_irq_params.freesync_config.state ==
334                        VRR_STATE_ACTIVE_VARIABLE ||
335                acrtc->dm_irq_params.freesync_config.state ==
336                        VRR_STATE_ACTIVE_FIXED;
337 }
338
339 static inline bool amdgpu_dm_vrr_active(struct dm_crtc_state *dm_state)
340 {
341         return dm_state->freesync_config.state == VRR_STATE_ACTIVE_VARIABLE ||
342                dm_state->freesync_config.state == VRR_STATE_ACTIVE_FIXED;
343 }
344
345 static inline bool is_dc_timing_adjust_needed(struct dm_crtc_state *old_state,
346                                               struct dm_crtc_state *new_state)
347 {
348         if (new_state->freesync_config.state ==  VRR_STATE_ACTIVE_FIXED)
349                 return true;
350         else if (amdgpu_dm_vrr_active(old_state) != amdgpu_dm_vrr_active(new_state))
351                 return true;
352         else
353                 return false;
354 }
355
356 /**
357  * dm_pflip_high_irq() - Handle pageflip interrupt
358  * @interrupt_params: ignored
359  *
360  * Handles the pageflip interrupt by notifying all interested parties
361  * that the pageflip has been completed.
362  */
363 static void dm_pflip_high_irq(void *interrupt_params)
364 {
365         struct amdgpu_crtc *amdgpu_crtc;
366         struct common_irq_params *irq_params = interrupt_params;
367         struct amdgpu_device *adev = irq_params->adev;
368         unsigned long flags;
369         struct drm_pending_vblank_event *e;
370         uint32_t vpos, hpos, v_blank_start, v_blank_end;
371         bool vrr_active;
372
373         amdgpu_crtc = get_crtc_by_otg_inst(adev, irq_params->irq_src - IRQ_TYPE_PFLIP);
374
375         /* IRQ could occur when in initial stage */
376         /* TODO work and BO cleanup */
377         if (amdgpu_crtc == NULL) {
378                 DC_LOG_PFLIP("CRTC is null, returning.\n");
379                 return;
380         }
381
382         spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
383
384         if (amdgpu_crtc->pflip_status != AMDGPU_FLIP_SUBMITTED){
385                 DC_LOG_PFLIP("amdgpu_crtc->pflip_status = %d !=AMDGPU_FLIP_SUBMITTED(%d) on crtc:%d[%p] \n",
386                                                  amdgpu_crtc->pflip_status,
387                                                  AMDGPU_FLIP_SUBMITTED,
388                                                  amdgpu_crtc->crtc_id,
389                                                  amdgpu_crtc);
390                 spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
391                 return;
392         }
393
394         /* page flip completed. */
395         e = amdgpu_crtc->event;
396         amdgpu_crtc->event = NULL;
397
398         WARN_ON(!e);
399
400         vrr_active = amdgpu_dm_vrr_active_irq(amdgpu_crtc);
401
402         /* Fixed refresh rate, or VRR scanout position outside front-porch? */
403         if (!vrr_active ||
404             !dc_stream_get_scanoutpos(amdgpu_crtc->dm_irq_params.stream, &v_blank_start,
405                                       &v_blank_end, &hpos, &vpos) ||
406             (vpos < v_blank_start)) {
407                 /* Update to correct count and vblank timestamp if racing with
408                  * vblank irq. This also updates to the correct vblank timestamp
409                  * even in VRR mode, as scanout is past the front-porch atm.
410                  */
411                 drm_crtc_accurate_vblank_count(&amdgpu_crtc->base);
412
413                 /* Wake up userspace by sending the pageflip event with proper
414                  * count and timestamp of vblank of flip completion.
415                  */
416                 if (e) {
417                         drm_crtc_send_vblank_event(&amdgpu_crtc->base, e);
418
419                         /* Event sent, so done with vblank for this flip */
420                         drm_crtc_vblank_put(&amdgpu_crtc->base);
421                 }
422         } else if (e) {
423                 /* VRR active and inside front-porch: vblank count and
424                  * timestamp for pageflip event will only be up to date after
425                  * drm_crtc_handle_vblank() has been executed from late vblank
426                  * irq handler after start of back-porch (vline 0). We queue the
427                  * pageflip event for send-out by drm_crtc_handle_vblank() with
428                  * updated timestamp and count, once it runs after us.
429                  *
430                  * We need to open-code this instead of using the helper
431                  * drm_crtc_arm_vblank_event(), as that helper would
432                  * call drm_crtc_accurate_vblank_count(), which we must
433                  * not call in VRR mode while we are in front-porch!
434                  */
435
436                 /* sequence will be replaced by real count during send-out. */
437                 e->sequence = drm_crtc_vblank_count(&amdgpu_crtc->base);
438                 e->pipe = amdgpu_crtc->crtc_id;
439
440                 list_add_tail(&e->base.link, &adev_to_drm(adev)->vblank_event_list);
441                 e = NULL;
442         }
443
444         /* Keep track of vblank of this flip for flip throttling. We use the
445          * cooked hw counter, as that one incremented at start of this vblank
446          * of pageflip completion, so last_flip_vblank is the forbidden count
447          * for queueing new pageflips if vsync + VRR is enabled.
448          */
449         amdgpu_crtc->dm_irq_params.last_flip_vblank =
450                 amdgpu_get_vblank_counter_kms(&amdgpu_crtc->base);
451
452         amdgpu_crtc->pflip_status = AMDGPU_FLIP_NONE;
453         spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
454
455         DC_LOG_PFLIP("crtc:%d[%p], pflip_stat:AMDGPU_FLIP_NONE, vrr[%d]-fp %d\n",
456                      amdgpu_crtc->crtc_id, amdgpu_crtc,
457                      vrr_active, (int) !e);
458 }
459
460 static void dm_vupdate_high_irq(void *interrupt_params)
461 {
462         struct common_irq_params *irq_params = interrupt_params;
463         struct amdgpu_device *adev = irq_params->adev;
464         struct amdgpu_crtc *acrtc;
465         struct drm_device *drm_dev;
466         struct drm_vblank_crtc *vblank;
467         ktime_t frame_duration_ns, previous_timestamp;
468         unsigned long flags;
469         int vrr_active;
470
471         acrtc = get_crtc_by_otg_inst(adev, irq_params->irq_src - IRQ_TYPE_VUPDATE);
472
473         if (acrtc) {
474                 vrr_active = amdgpu_dm_vrr_active_irq(acrtc);
475                 drm_dev = acrtc->base.dev;
476                 vblank = &drm_dev->vblank[acrtc->base.index];
477                 previous_timestamp = atomic64_read(&irq_params->previous_timestamp);
478                 frame_duration_ns = vblank->time - previous_timestamp;
479
480                 if (frame_duration_ns > 0) {
481                         trace_amdgpu_refresh_rate_track(acrtc->base.index,
482                                                 frame_duration_ns,
483                                                 ktime_divns(NSEC_PER_SEC, frame_duration_ns));
484                         atomic64_set(&irq_params->previous_timestamp, vblank->time);
485                 }
486
487                 DC_LOG_VBLANK("crtc:%d, vupdate-vrr:%d\n",
488                               acrtc->crtc_id,
489                               vrr_active);
490
491                 /* Core vblank handling is done here after end of front-porch in
492                  * vrr mode, as vblank timestamping will give valid results
493                  * while now done after front-porch. This will also deliver
494                  * page-flip completion events that have been queued to us
495                  * if a pageflip happened inside front-porch.
496                  */
497                 if (vrr_active) {
498                         drm_crtc_handle_vblank(&acrtc->base);
499
500                         /* BTR processing for pre-DCE12 ASICs */
501                         if (acrtc->dm_irq_params.stream &&
502                             adev->family < AMDGPU_FAMILY_AI) {
503                                 spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
504                                 mod_freesync_handle_v_update(
505                                     adev->dm.freesync_module,
506                                     acrtc->dm_irq_params.stream,
507                                     &acrtc->dm_irq_params.vrr_params);
508
509                                 dc_stream_adjust_vmin_vmax(
510                                     adev->dm.dc,
511                                     acrtc->dm_irq_params.stream,
512                                     &acrtc->dm_irq_params.vrr_params.adjust);
513                                 spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
514                         }
515                 }
516         }
517 }
518
519 /**
520  * dm_crtc_high_irq() - Handles CRTC interrupt
521  * @interrupt_params: used for determining the CRTC instance
522  *
523  * Handles the CRTC/VSYNC interrupt by notfying DRM's VBLANK
524  * event handler.
525  */
526 static void dm_crtc_high_irq(void *interrupt_params)
527 {
528         struct common_irq_params *irq_params = interrupt_params;
529         struct amdgpu_device *adev = irq_params->adev;
530         struct amdgpu_crtc *acrtc;
531         unsigned long flags;
532         int vrr_active;
533
534         acrtc = get_crtc_by_otg_inst(adev, irq_params->irq_src - IRQ_TYPE_VBLANK);
535         if (!acrtc)
536                 return;
537
538         vrr_active = amdgpu_dm_vrr_active_irq(acrtc);
539
540         DC_LOG_VBLANK("crtc:%d, vupdate-vrr:%d, planes:%d\n", acrtc->crtc_id,
541                       vrr_active, acrtc->dm_irq_params.active_planes);
542
543         /**
544          * Core vblank handling at start of front-porch is only possible
545          * in non-vrr mode, as only there vblank timestamping will give
546          * valid results while done in front-porch. Otherwise defer it
547          * to dm_vupdate_high_irq after end of front-porch.
548          */
549         if (!vrr_active)
550                 drm_crtc_handle_vblank(&acrtc->base);
551
552         /**
553          * Following stuff must happen at start of vblank, for crc
554          * computation and below-the-range btr support in vrr mode.
555          */
556         amdgpu_dm_crtc_handle_crc_irq(&acrtc->base);
557
558         /* BTR updates need to happen before VUPDATE on Vega and above. */
559         if (adev->family < AMDGPU_FAMILY_AI)
560                 return;
561
562         spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
563
564         if (acrtc->dm_irq_params.stream &&
565             acrtc->dm_irq_params.vrr_params.supported &&
566             acrtc->dm_irq_params.freesync_config.state ==
567                     VRR_STATE_ACTIVE_VARIABLE) {
568                 mod_freesync_handle_v_update(adev->dm.freesync_module,
569                                              acrtc->dm_irq_params.stream,
570                                              &acrtc->dm_irq_params.vrr_params);
571
572                 dc_stream_adjust_vmin_vmax(adev->dm.dc, acrtc->dm_irq_params.stream,
573                                            &acrtc->dm_irq_params.vrr_params.adjust);
574         }
575
576         /*
577          * If there aren't any active_planes then DCH HUBP may be clock-gated.
578          * In that case, pageflip completion interrupts won't fire and pageflip
579          * completion events won't get delivered. Prevent this by sending
580          * pending pageflip events from here if a flip is still pending.
581          *
582          * If any planes are enabled, use dm_pflip_high_irq() instead, to
583          * avoid race conditions between flip programming and completion,
584          * which could cause too early flip completion events.
585          */
586         if (adev->family >= AMDGPU_FAMILY_RV &&
587             acrtc->pflip_status == AMDGPU_FLIP_SUBMITTED &&
588             acrtc->dm_irq_params.active_planes == 0) {
589                 if (acrtc->event) {
590                         drm_crtc_send_vblank_event(&acrtc->base, acrtc->event);
591                         acrtc->event = NULL;
592                         drm_crtc_vblank_put(&acrtc->base);
593                 }
594                 acrtc->pflip_status = AMDGPU_FLIP_NONE;
595         }
596
597         spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
598 }
599
600 #if defined(CONFIG_DRM_AMD_DC_DCN)
601 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
602 /**
603  * dm_dcn_vertical_interrupt0_high_irq() - Handles OTG Vertical interrupt0 for
604  * DCN generation ASICs
605  * @interrupt_params: interrupt parameters
606  *
607  * Used to set crc window/read out crc value at vertical line 0 position
608  */
609 static void dm_dcn_vertical_interrupt0_high_irq(void *interrupt_params)
610 {
611         struct common_irq_params *irq_params = interrupt_params;
612         struct amdgpu_device *adev = irq_params->adev;
613         struct amdgpu_crtc *acrtc;
614
615         acrtc = get_crtc_by_otg_inst(adev, irq_params->irq_src - IRQ_TYPE_VLINE0);
616
617         if (!acrtc)
618                 return;
619
620         amdgpu_dm_crtc_handle_crc_window_irq(&acrtc->base);
621 }
622 #endif
623
624 /**
625  * dmub_aux_setconfig_reply_callback - Callback for AUX or SET_CONFIG command.
626  * @adev: amdgpu_device pointer
627  * @notify: dmub notification structure
628  *
629  * Dmub AUX or SET_CONFIG command completion processing callback
630  * Copies dmub notification to DM which is to be read by AUX command.
631  * issuing thread and also signals the event to wake up the thread.
632  */
633 void dmub_aux_setconfig_callback(struct amdgpu_device *adev, struct dmub_notification *notify)
634 {
635         if (adev->dm.dmub_notify)
636                 memcpy(adev->dm.dmub_notify, notify, sizeof(struct dmub_notification));
637         if (notify->type == DMUB_NOTIFICATION_AUX_REPLY)
638                 complete(&adev->dm.dmub_aux_transfer_done);
639 }
640
641 /**
642  * dmub_hpd_callback - DMUB HPD interrupt processing callback.
643  * @adev: amdgpu_device pointer
644  * @notify: dmub notification structure
645  *
646  * Dmub Hpd interrupt processing callback. Gets displayindex through the
647  * ink index and calls helper to do the processing.
648  */
649 void dmub_hpd_callback(struct amdgpu_device *adev, struct dmub_notification *notify)
650 {
651         struct amdgpu_dm_connector *aconnector;
652         struct amdgpu_dm_connector *hpd_aconnector = NULL;
653         struct drm_connector *connector;
654         struct drm_connector_list_iter iter;
655         struct dc_link *link;
656         uint8_t link_index = 0;
657         struct drm_device *dev = adev->dm.ddev;
658
659         if (adev == NULL)
660                 return;
661
662         if (notify == NULL) {
663                 DRM_ERROR("DMUB HPD callback notification was NULL");
664                 return;
665         }
666
667         if (notify->link_index > adev->dm.dc->link_count) {
668                 DRM_ERROR("DMUB HPD index (%u)is abnormal", notify->link_index);
669                 return;
670         }
671
672         drm_modeset_lock(&dev->mode_config.connection_mutex, NULL);
673
674         link_index = notify->link_index;
675
676         link = adev->dm.dc->links[link_index];
677
678         drm_connector_list_iter_begin(dev, &iter);
679         drm_for_each_connector_iter(connector, &iter) {
680                 aconnector = to_amdgpu_dm_connector(connector);
681                 if (link && aconnector->dc_link == link) {
682                         DRM_INFO("DMUB HPD callback: link_index=%u\n", link_index);
683                         hpd_aconnector = aconnector;
684                         break;
685                 }
686         }
687         drm_connector_list_iter_end(&iter);
688         drm_modeset_unlock(&dev->mode_config.connection_mutex);
689
690         if (hpd_aconnector)
691                 handle_hpd_irq_helper(hpd_aconnector);
692 }
693
694 /**
695  * register_dmub_notify_callback - Sets callback for DMUB notify
696  * @adev: amdgpu_device pointer
697  * @type: Type of dmub notification
698  * @callback: Dmub interrupt callback function
699  * @dmub_int_thread_offload: offload indicator
700  *
701  * API to register a dmub callback handler for a dmub notification
702  * Also sets indicator whether callback processing to be offloaded.
703  * to dmub interrupt handling thread
704  * Return: true if successfully registered, false if there is existing registration
705  */
706 bool register_dmub_notify_callback(struct amdgpu_device *adev, enum dmub_notification_type type,
707 dmub_notify_interrupt_callback_t callback, bool dmub_int_thread_offload)
708 {
709         if (callback != NULL && type < ARRAY_SIZE(adev->dm.dmub_thread_offload)) {
710                 adev->dm.dmub_callback[type] = callback;
711                 adev->dm.dmub_thread_offload[type] = dmub_int_thread_offload;
712         } else
713                 return false;
714
715         return true;
716 }
717
718 static void dm_handle_hpd_work(struct work_struct *work)
719 {
720         struct dmub_hpd_work *dmub_hpd_wrk;
721
722         dmub_hpd_wrk = container_of(work, struct dmub_hpd_work, handle_hpd_work);
723
724         if (!dmub_hpd_wrk->dmub_notify) {
725                 DRM_ERROR("dmub_hpd_wrk dmub_notify is NULL");
726                 return;
727         }
728
729         if (dmub_hpd_wrk->dmub_notify->type < ARRAY_SIZE(dmub_hpd_wrk->adev->dm.dmub_callback)) {
730                 dmub_hpd_wrk->adev->dm.dmub_callback[dmub_hpd_wrk->dmub_notify->type](dmub_hpd_wrk->adev,
731                 dmub_hpd_wrk->dmub_notify);
732         }
733         kfree(dmub_hpd_wrk);
734
735 }
736
737 #define DMUB_TRACE_MAX_READ 64
738 /**
739  * dm_dmub_outbox1_low_irq() - Handles Outbox interrupt
740  * @interrupt_params: used for determining the Outbox instance
741  *
742  * Handles the Outbox Interrupt
743  * event handler.
744  */
745 static void dm_dmub_outbox1_low_irq(void *interrupt_params)
746 {
747         struct dmub_notification notify;
748         struct common_irq_params *irq_params = interrupt_params;
749         struct amdgpu_device *adev = irq_params->adev;
750         struct amdgpu_display_manager *dm = &adev->dm;
751         struct dmcub_trace_buf_entry entry = { 0 };
752         uint32_t count = 0;
753         struct dmub_hpd_work *dmub_hpd_wrk;
754         struct dc_link *plink = NULL;
755
756         if (dc_enable_dmub_notifications(adev->dm.dc) &&
757                 irq_params->irq_src == DC_IRQ_SOURCE_DMCUB_OUTBOX) {
758                 dmub_hpd_wrk = kzalloc(sizeof(*dmub_hpd_wrk), GFP_ATOMIC);
759                 if (!dmub_hpd_wrk) {
760                         DRM_ERROR("Failed to allocate dmub_hpd_wrk");
761                         return;
762                 }
763                 INIT_WORK(&dmub_hpd_wrk->handle_hpd_work, dm_handle_hpd_work);
764
765                 do {
766                         dc_stat_get_dmub_notification(adev->dm.dc, &notify);
767                         if (notify.type > ARRAY_SIZE(dm->dmub_thread_offload)) {
768                                 DRM_ERROR("DM: notify type %d invalid!", notify.type);
769                                 continue;
770                         }
771                         if (dm->dmub_thread_offload[notify.type] == true) {
772                                 dmub_hpd_wrk->dmub_notify = &notify;
773                                 dmub_hpd_wrk->adev = adev;
774                                 if (notify.type == DMUB_NOTIFICATION_HPD) {
775                                         plink = adev->dm.dc->links[notify.link_index];
776                                         if (plink) {
777                                                 plink->hpd_status =
778                                                         notify.hpd_status ==
779                                                         DP_HPD_PLUG ? true : false;
780                                         }
781                                 }
782                                 queue_work(adev->dm.delayed_hpd_wq, &dmub_hpd_wrk->handle_hpd_work);
783                         } else {
784                                 dm->dmub_callback[notify.type](adev, &notify);
785                         }
786                 } while (notify.pending_notification);
787         }
788
789
790         do {
791                 if (dc_dmub_srv_get_dmub_outbox0_msg(dm->dc, &entry)) {
792                         trace_amdgpu_dmub_trace_high_irq(entry.trace_code, entry.tick_count,
793                                                         entry.param0, entry.param1);
794
795                         DRM_DEBUG_DRIVER("trace_code:%u, tick_count:%u, param0:%u, param1:%u\n",
796                                  entry.trace_code, entry.tick_count, entry.param0, entry.param1);
797                 } else
798                         break;
799
800                 count++;
801
802         } while (count <= DMUB_TRACE_MAX_READ);
803
804         if (count > DMUB_TRACE_MAX_READ)
805                 DRM_DEBUG_DRIVER("Warning : count > DMUB_TRACE_MAX_READ");
806 }
807 #endif
808
809 static int dm_set_clockgating_state(void *handle,
810                   enum amd_clockgating_state state)
811 {
812         return 0;
813 }
814
815 static int dm_set_powergating_state(void *handle,
816                   enum amd_powergating_state state)
817 {
818         return 0;
819 }
820
821 /* Prototypes of private functions */
822 static int dm_early_init(void* handle);
823
824 /* Allocate memory for FBC compressed data  */
825 static void amdgpu_dm_fbc_init(struct drm_connector *connector)
826 {
827         struct drm_device *dev = connector->dev;
828         struct amdgpu_device *adev = drm_to_adev(dev);
829         struct dm_compressor_info *compressor = &adev->dm.compressor;
830         struct amdgpu_dm_connector *aconn = to_amdgpu_dm_connector(connector);
831         struct drm_display_mode *mode;
832         unsigned long max_size = 0;
833
834         if (adev->dm.dc->fbc_compressor == NULL)
835                 return;
836
837         if (aconn->dc_link->connector_signal != SIGNAL_TYPE_EDP)
838                 return;
839
840         if (compressor->bo_ptr)
841                 return;
842
843
844         list_for_each_entry(mode, &connector->modes, head) {
845                 if (max_size < mode->htotal * mode->vtotal)
846                         max_size = mode->htotal * mode->vtotal;
847         }
848
849         if (max_size) {
850                 int r = amdgpu_bo_create_kernel(adev, max_size * 4, PAGE_SIZE,
851                             AMDGPU_GEM_DOMAIN_GTT, &compressor->bo_ptr,
852                             &compressor->gpu_addr, &compressor->cpu_addr);
853
854                 if (r)
855                         DRM_ERROR("DM: Failed to initialize FBC\n");
856                 else {
857                         adev->dm.dc->ctx->fbc_gpu_addr = compressor->gpu_addr;
858                         DRM_INFO("DM: FBC alloc %lu\n", max_size*4);
859                 }
860
861         }
862
863 }
864
865 static int amdgpu_dm_audio_component_get_eld(struct device *kdev, int port,
866                                           int pipe, bool *enabled,
867                                           unsigned char *buf, int max_bytes)
868 {
869         struct drm_device *dev = dev_get_drvdata(kdev);
870         struct amdgpu_device *adev = drm_to_adev(dev);
871         struct drm_connector *connector;
872         struct drm_connector_list_iter conn_iter;
873         struct amdgpu_dm_connector *aconnector;
874         int ret = 0;
875
876         *enabled = false;
877
878         mutex_lock(&adev->dm.audio_lock);
879
880         drm_connector_list_iter_begin(dev, &conn_iter);
881         drm_for_each_connector_iter(connector, &conn_iter) {
882                 aconnector = to_amdgpu_dm_connector(connector);
883                 if (aconnector->audio_inst != port)
884                         continue;
885
886                 *enabled = true;
887                 ret = drm_eld_size(connector->eld);
888                 memcpy(buf, connector->eld, min(max_bytes, ret));
889
890                 break;
891         }
892         drm_connector_list_iter_end(&conn_iter);
893
894         mutex_unlock(&adev->dm.audio_lock);
895
896         DRM_DEBUG_KMS("Get ELD : idx=%d ret=%d en=%d\n", port, ret, *enabled);
897
898         return ret;
899 }
900
901 static const struct drm_audio_component_ops amdgpu_dm_audio_component_ops = {
902         .get_eld = amdgpu_dm_audio_component_get_eld,
903 };
904
905 static int amdgpu_dm_audio_component_bind(struct device *kdev,
906                                        struct device *hda_kdev, void *data)
907 {
908         struct drm_device *dev = dev_get_drvdata(kdev);
909         struct amdgpu_device *adev = drm_to_adev(dev);
910         struct drm_audio_component *acomp = data;
911
912         acomp->ops = &amdgpu_dm_audio_component_ops;
913         acomp->dev = kdev;
914         adev->dm.audio_component = acomp;
915
916         return 0;
917 }
918
919 static void amdgpu_dm_audio_component_unbind(struct device *kdev,
920                                           struct device *hda_kdev, void *data)
921 {
922         struct drm_device *dev = dev_get_drvdata(kdev);
923         struct amdgpu_device *adev = drm_to_adev(dev);
924         struct drm_audio_component *acomp = data;
925
926         acomp->ops = NULL;
927         acomp->dev = NULL;
928         adev->dm.audio_component = NULL;
929 }
930
931 static const struct component_ops amdgpu_dm_audio_component_bind_ops = {
932         .bind   = amdgpu_dm_audio_component_bind,
933         .unbind = amdgpu_dm_audio_component_unbind,
934 };
935
936 static int amdgpu_dm_audio_init(struct amdgpu_device *adev)
937 {
938         int i, ret;
939
940         if (!amdgpu_audio)
941                 return 0;
942
943         adev->mode_info.audio.enabled = true;
944
945         adev->mode_info.audio.num_pins = adev->dm.dc->res_pool->audio_count;
946
947         for (i = 0; i < adev->mode_info.audio.num_pins; i++) {
948                 adev->mode_info.audio.pin[i].channels = -1;
949                 adev->mode_info.audio.pin[i].rate = -1;
950                 adev->mode_info.audio.pin[i].bits_per_sample = -1;
951                 adev->mode_info.audio.pin[i].status_bits = 0;
952                 adev->mode_info.audio.pin[i].category_code = 0;
953                 adev->mode_info.audio.pin[i].connected = false;
954                 adev->mode_info.audio.pin[i].id =
955                         adev->dm.dc->res_pool->audios[i]->inst;
956                 adev->mode_info.audio.pin[i].offset = 0;
957         }
958
959         ret = component_add(adev->dev, &amdgpu_dm_audio_component_bind_ops);
960         if (ret < 0)
961                 return ret;
962
963         adev->dm.audio_registered = true;
964
965         return 0;
966 }
967
968 static void amdgpu_dm_audio_fini(struct amdgpu_device *adev)
969 {
970         if (!amdgpu_audio)
971                 return;
972
973         if (!adev->mode_info.audio.enabled)
974                 return;
975
976         if (adev->dm.audio_registered) {
977                 component_del(adev->dev, &amdgpu_dm_audio_component_bind_ops);
978                 adev->dm.audio_registered = false;
979         }
980
981         /* TODO: Disable audio? */
982
983         adev->mode_info.audio.enabled = false;
984 }
985
986 static  void amdgpu_dm_audio_eld_notify(struct amdgpu_device *adev, int pin)
987 {
988         struct drm_audio_component *acomp = adev->dm.audio_component;
989
990         if (acomp && acomp->audio_ops && acomp->audio_ops->pin_eld_notify) {
991                 DRM_DEBUG_KMS("Notify ELD: %d\n", pin);
992
993                 acomp->audio_ops->pin_eld_notify(acomp->audio_ops->audio_ptr,
994                                                  pin, -1);
995         }
996 }
997
998 static int dm_dmub_hw_init(struct amdgpu_device *adev)
999 {
1000         const struct dmcub_firmware_header_v1_0 *hdr;
1001         struct dmub_srv *dmub_srv = adev->dm.dmub_srv;
1002         struct dmub_srv_fb_info *fb_info = adev->dm.dmub_fb_info;
1003         const struct firmware *dmub_fw = adev->dm.dmub_fw;
1004         struct dmcu *dmcu = adev->dm.dc->res_pool->dmcu;
1005         struct abm *abm = adev->dm.dc->res_pool->abm;
1006         struct dmub_srv_hw_params hw_params;
1007         enum dmub_status status;
1008         const unsigned char *fw_inst_const, *fw_bss_data;
1009         uint32_t i, fw_inst_const_size, fw_bss_data_size;
1010         bool has_hw_support;
1011
1012         if (!dmub_srv)
1013                 /* DMUB isn't supported on the ASIC. */
1014                 return 0;
1015
1016         if (!fb_info) {
1017                 DRM_ERROR("No framebuffer info for DMUB service.\n");
1018                 return -EINVAL;
1019         }
1020
1021         if (!dmub_fw) {
1022                 /* Firmware required for DMUB support. */
1023                 DRM_ERROR("No firmware provided for DMUB.\n");
1024                 return -EINVAL;
1025         }
1026
1027         status = dmub_srv_has_hw_support(dmub_srv, &has_hw_support);
1028         if (status != DMUB_STATUS_OK) {
1029                 DRM_ERROR("Error checking HW support for DMUB: %d\n", status);
1030                 return -EINVAL;
1031         }
1032
1033         if (!has_hw_support) {
1034                 DRM_INFO("DMUB unsupported on ASIC\n");
1035                 return 0;
1036         }
1037
1038         hdr = (const struct dmcub_firmware_header_v1_0 *)dmub_fw->data;
1039
1040         fw_inst_const = dmub_fw->data +
1041                         le32_to_cpu(hdr->header.ucode_array_offset_bytes) +
1042                         PSP_HEADER_BYTES;
1043
1044         fw_bss_data = dmub_fw->data +
1045                       le32_to_cpu(hdr->header.ucode_array_offset_bytes) +
1046                       le32_to_cpu(hdr->inst_const_bytes);
1047
1048         /* Copy firmware and bios info into FB memory. */
1049         fw_inst_const_size = le32_to_cpu(hdr->inst_const_bytes) -
1050                              PSP_HEADER_BYTES - PSP_FOOTER_BYTES;
1051
1052         fw_bss_data_size = le32_to_cpu(hdr->bss_data_bytes);
1053
1054         /* if adev->firmware.load_type == AMDGPU_FW_LOAD_PSP,
1055          * amdgpu_ucode_init_single_fw will load dmub firmware
1056          * fw_inst_const part to cw0; otherwise, the firmware back door load
1057          * will be done by dm_dmub_hw_init
1058          */
1059         if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) {
1060                 memcpy(fb_info->fb[DMUB_WINDOW_0_INST_CONST].cpu_addr, fw_inst_const,
1061                                 fw_inst_const_size);
1062         }
1063
1064         if (fw_bss_data_size)
1065                 memcpy(fb_info->fb[DMUB_WINDOW_2_BSS_DATA].cpu_addr,
1066                        fw_bss_data, fw_bss_data_size);
1067
1068         /* Copy firmware bios info into FB memory. */
1069         memcpy(fb_info->fb[DMUB_WINDOW_3_VBIOS].cpu_addr, adev->bios,
1070                adev->bios_size);
1071
1072         /* Reset regions that need to be reset. */
1073         memset(fb_info->fb[DMUB_WINDOW_4_MAILBOX].cpu_addr, 0,
1074         fb_info->fb[DMUB_WINDOW_4_MAILBOX].size);
1075
1076         memset(fb_info->fb[DMUB_WINDOW_5_TRACEBUFF].cpu_addr, 0,
1077                fb_info->fb[DMUB_WINDOW_5_TRACEBUFF].size);
1078
1079         memset(fb_info->fb[DMUB_WINDOW_6_FW_STATE].cpu_addr, 0,
1080                fb_info->fb[DMUB_WINDOW_6_FW_STATE].size);
1081
1082         /* Initialize hardware. */
1083         memset(&hw_params, 0, sizeof(hw_params));
1084         hw_params.fb_base = adev->gmc.fb_start;
1085         hw_params.fb_offset = adev->gmc.aper_base;
1086
1087         /* backdoor load firmware and trigger dmub running */
1088         if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP)
1089                 hw_params.load_inst_const = true;
1090
1091         if (dmcu)
1092                 hw_params.psp_version = dmcu->psp_version;
1093
1094         for (i = 0; i < fb_info->num_fb; ++i)
1095                 hw_params.fb[i] = &fb_info->fb[i];
1096
1097         status = dmub_srv_hw_init(dmub_srv, &hw_params);
1098         if (status != DMUB_STATUS_OK) {
1099                 DRM_ERROR("Error initializing DMUB HW: %d\n", status);
1100                 return -EINVAL;
1101         }
1102
1103         /* Wait for firmware load to finish. */
1104         status = dmub_srv_wait_for_auto_load(dmub_srv, 100000);
1105         if (status != DMUB_STATUS_OK)
1106                 DRM_WARN("Wait for DMUB auto-load failed: %d\n", status);
1107
1108         /* Init DMCU and ABM if available. */
1109         if (dmcu && abm) {
1110                 dmcu->funcs->dmcu_init(dmcu);
1111                 abm->dmcu_is_running = dmcu->funcs->is_dmcu_initialized(dmcu);
1112         }
1113
1114         if (!adev->dm.dc->ctx->dmub_srv)
1115                 adev->dm.dc->ctx->dmub_srv = dc_dmub_srv_create(adev->dm.dc, dmub_srv);
1116         if (!adev->dm.dc->ctx->dmub_srv) {
1117                 DRM_ERROR("Couldn't allocate DC DMUB server!\n");
1118                 return -ENOMEM;
1119         }
1120
1121         DRM_INFO("DMUB hardware initialized: version=0x%08X\n",
1122                  adev->dm.dmcub_fw_version);
1123
1124         return 0;
1125 }
1126
1127 #if defined(CONFIG_DRM_AMD_DC_DCN)
1128 static void mmhub_read_system_context(struct amdgpu_device *adev, struct dc_phy_addr_space_config *pa_config)
1129 {
1130         uint64_t pt_base;
1131         uint32_t logical_addr_low;
1132         uint32_t logical_addr_high;
1133         uint32_t agp_base, agp_bot, agp_top;
1134         PHYSICAL_ADDRESS_LOC page_table_start, page_table_end, page_table_base;
1135
1136         memset(pa_config, 0, sizeof(*pa_config));
1137
1138         logical_addr_low  = min(adev->gmc.fb_start, adev->gmc.agp_start) >> 18;
1139         pt_base = amdgpu_gmc_pd_addr(adev->gart.bo);
1140
1141         if (adev->apu_flags & AMD_APU_IS_RAVEN2)
1142                 /*
1143                  * Raven2 has a HW issue that it is unable to use the vram which
1144                  * is out of MC_VM_SYSTEM_APERTURE_HIGH_ADDR. So here is the
1145                  * workaround that increase system aperture high address (add 1)
1146                  * to get rid of the VM fault and hardware hang.
1147                  */
1148                 logical_addr_high = max((adev->gmc.fb_end >> 18) + 0x1, adev->gmc.agp_end >> 18);
1149         else
1150                 logical_addr_high = max(adev->gmc.fb_end, adev->gmc.agp_end) >> 18;
1151
1152         agp_base = 0;
1153         agp_bot = adev->gmc.agp_start >> 24;
1154         agp_top = adev->gmc.agp_end >> 24;
1155
1156
1157         page_table_start.high_part = (u32)(adev->gmc.gart_start >> 44) & 0xF;
1158         page_table_start.low_part = (u32)(adev->gmc.gart_start >> 12);
1159         page_table_end.high_part = (u32)(adev->gmc.gart_end >> 44) & 0xF;
1160         page_table_end.low_part = (u32)(adev->gmc.gart_end >> 12);
1161         page_table_base.high_part = upper_32_bits(pt_base) & 0xF;
1162         page_table_base.low_part = lower_32_bits(pt_base);
1163
1164         pa_config->system_aperture.start_addr = (uint64_t)logical_addr_low << 18;
1165         pa_config->system_aperture.end_addr = (uint64_t)logical_addr_high << 18;
1166
1167         pa_config->system_aperture.agp_base = (uint64_t)agp_base << 24 ;
1168         pa_config->system_aperture.agp_bot = (uint64_t)agp_bot << 24;
1169         pa_config->system_aperture.agp_top = (uint64_t)agp_top << 24;
1170
1171         pa_config->system_aperture.fb_base = adev->gmc.fb_start;
1172         pa_config->system_aperture.fb_offset = adev->gmc.aper_base;
1173         pa_config->system_aperture.fb_top = adev->gmc.fb_end;
1174
1175         pa_config->gart_config.page_table_start_addr = page_table_start.quad_part << 12;
1176         pa_config->gart_config.page_table_end_addr = page_table_end.quad_part << 12;
1177         pa_config->gart_config.page_table_base_addr = page_table_base.quad_part;
1178
1179         pa_config->is_hvm_enabled = 0;
1180
1181 }
1182 #endif
1183 #if defined(CONFIG_DRM_AMD_DC_DCN)
1184 static void vblank_control_worker(struct work_struct *work)
1185 {
1186         struct vblank_control_work *vblank_work =
1187                 container_of(work, struct vblank_control_work, work);
1188         struct amdgpu_display_manager *dm = vblank_work->dm;
1189
1190         mutex_lock(&dm->dc_lock);
1191
1192         if (vblank_work->enable)
1193                 dm->active_vblank_irq_count++;
1194         else if(dm->active_vblank_irq_count)
1195                 dm->active_vblank_irq_count--;
1196
1197         dc_allow_idle_optimizations(dm->dc, dm->active_vblank_irq_count == 0);
1198
1199         DRM_DEBUG_KMS("Allow idle optimizations (MALL): %d\n", dm->active_vblank_irq_count == 0);
1200
1201         /* Control PSR based on vblank requirements from OS */
1202         if (vblank_work->stream && vblank_work->stream->link) {
1203                 if (vblank_work->enable) {
1204                         if (vblank_work->stream->link->psr_settings.psr_allow_active)
1205                                 amdgpu_dm_psr_disable(vblank_work->stream);
1206                 } else if (vblank_work->stream->link->psr_settings.psr_feature_enabled &&
1207                            !vblank_work->stream->link->psr_settings.psr_allow_active &&
1208                            vblank_work->acrtc->dm_irq_params.allow_psr_entry) {
1209                         amdgpu_dm_psr_enable(vblank_work->stream);
1210                 }
1211         }
1212
1213         mutex_unlock(&dm->dc_lock);
1214
1215         dc_stream_release(vblank_work->stream);
1216
1217         kfree(vblank_work);
1218 }
1219
1220 #endif
1221
1222 static void dm_handle_hpd_rx_offload_work(struct work_struct *work)
1223 {
1224         struct hpd_rx_irq_offload_work *offload_work;
1225         struct amdgpu_dm_connector *aconnector;
1226         struct dc_link *dc_link;
1227         struct amdgpu_device *adev;
1228         enum dc_connection_type new_connection_type = dc_connection_none;
1229         unsigned long flags;
1230
1231         offload_work = container_of(work, struct hpd_rx_irq_offload_work, work);
1232         aconnector = offload_work->offload_wq->aconnector;
1233
1234         if (!aconnector) {
1235                 DRM_ERROR("Can't retrieve aconnector in hpd_rx_irq_offload_work");
1236                 goto skip;
1237         }
1238
1239         adev = drm_to_adev(aconnector->base.dev);
1240         dc_link = aconnector->dc_link;
1241
1242         mutex_lock(&aconnector->hpd_lock);
1243         if (!dc_link_detect_sink(dc_link, &new_connection_type))
1244                 DRM_ERROR("KMS: Failed to detect connector\n");
1245         mutex_unlock(&aconnector->hpd_lock);
1246
1247         if (new_connection_type == dc_connection_none)
1248                 goto skip;
1249
1250         if (amdgpu_in_reset(adev))
1251                 goto skip;
1252
1253         mutex_lock(&adev->dm.dc_lock);
1254         if (offload_work->data.bytes.device_service_irq.bits.AUTOMATED_TEST)
1255                 dc_link_dp_handle_automated_test(dc_link);
1256         else if ((dc_link->connector_signal != SIGNAL_TYPE_EDP) &&
1257                         hpd_rx_irq_check_link_loss_status(dc_link, &offload_work->data) &&
1258                         dc_link_dp_allow_hpd_rx_irq(dc_link)) {
1259                 dc_link_dp_handle_link_loss(dc_link);
1260                 spin_lock_irqsave(&offload_work->offload_wq->offload_lock, flags);
1261                 offload_work->offload_wq->is_handling_link_loss = false;
1262                 spin_unlock_irqrestore(&offload_work->offload_wq->offload_lock, flags);
1263         }
1264         mutex_unlock(&adev->dm.dc_lock);
1265
1266 skip:
1267         kfree(offload_work);
1268
1269 }
1270
1271 static struct hpd_rx_irq_offload_work_queue *hpd_rx_irq_create_workqueue(struct dc *dc)
1272 {
1273         int max_caps = dc->caps.max_links;
1274         int i = 0;
1275         struct hpd_rx_irq_offload_work_queue *hpd_rx_offload_wq = NULL;
1276
1277         hpd_rx_offload_wq = kcalloc(max_caps, sizeof(*hpd_rx_offload_wq), GFP_KERNEL);
1278
1279         if (!hpd_rx_offload_wq)
1280                 return NULL;
1281
1282
1283         for (i = 0; i < max_caps; i++) {
1284                 hpd_rx_offload_wq[i].wq =
1285                                     create_singlethread_workqueue("amdgpu_dm_hpd_rx_offload_wq");
1286
1287                 if (hpd_rx_offload_wq[i].wq == NULL) {
1288                         DRM_ERROR("create amdgpu_dm_hpd_rx_offload_wq fail!");
1289                         return NULL;
1290                 }
1291
1292                 spin_lock_init(&hpd_rx_offload_wq[i].offload_lock);
1293         }
1294
1295         return hpd_rx_offload_wq;
1296 }
1297
1298 static int amdgpu_dm_init(struct amdgpu_device *adev)
1299 {
1300         struct dc_init_data init_data;
1301 #ifdef CONFIG_DRM_AMD_DC_HDCP
1302         struct dc_callback_init init_params;
1303 #endif
1304         int r;
1305
1306         adev->dm.ddev = adev_to_drm(adev);
1307         adev->dm.adev = adev;
1308
1309         /* Zero all the fields */
1310         memset(&init_data, 0, sizeof(init_data));
1311 #ifdef CONFIG_DRM_AMD_DC_HDCP
1312         memset(&init_params, 0, sizeof(init_params));
1313 #endif
1314
1315         mutex_init(&adev->dm.dc_lock);
1316         mutex_init(&adev->dm.audio_lock);
1317 #if defined(CONFIG_DRM_AMD_DC_DCN)
1318         spin_lock_init(&adev->dm.vblank_lock);
1319 #endif
1320
1321         if(amdgpu_dm_irq_init(adev)) {
1322                 DRM_ERROR("amdgpu: failed to initialize DM IRQ support.\n");
1323                 goto error;
1324         }
1325
1326         init_data.asic_id.chip_family = adev->family;
1327
1328         init_data.asic_id.pci_revision_id = adev->pdev->revision;
1329         init_data.asic_id.hw_internal_rev = adev->external_rev_id;
1330         init_data.asic_id.chip_id = adev->pdev->device;
1331
1332         init_data.asic_id.vram_width = adev->gmc.vram_width;
1333         /* TODO: initialize init_data.asic_id.vram_type here!!!! */
1334         init_data.asic_id.atombios_base_address =
1335                 adev->mode_info.atom_context->bios;
1336
1337         init_data.driver = adev;
1338
1339         adev->dm.cgs_device = amdgpu_cgs_create_device(adev);
1340
1341         if (!adev->dm.cgs_device) {
1342                 DRM_ERROR("amdgpu: failed to create cgs device.\n");
1343                 goto error;
1344         }
1345
1346         init_data.cgs_device = adev->dm.cgs_device;
1347
1348         init_data.dce_environment = DCE_ENV_PRODUCTION_DRV;
1349
1350         switch (adev->asic_type) {
1351         case CHIP_CARRIZO:
1352         case CHIP_STONEY:
1353                 init_data.flags.gpu_vm_support = true;
1354                 break;
1355         default:
1356                 switch (adev->ip_versions[DCE_HWIP][0]) {
1357                 case IP_VERSION(2, 1, 0):
1358                         init_data.flags.gpu_vm_support = true;
1359                         if (ASICREV_IS_GREEN_SARDINE(adev->external_rev_id))
1360                                 init_data.flags.disable_dmcu = true;
1361                         break;
1362                 case IP_VERSION(1, 0, 0):
1363                 case IP_VERSION(1, 0, 1):
1364                 case IP_VERSION(3, 0, 1):
1365                 case IP_VERSION(3, 1, 2):
1366                 case IP_VERSION(3, 1, 3):
1367                         init_data.flags.gpu_vm_support = true;
1368                         break;
1369                 case IP_VERSION(2, 0, 3):
1370                         init_data.flags.disable_dmcu = true;
1371                         break;
1372                 default:
1373                         break;
1374                 }
1375                 break;
1376         }
1377
1378         if (amdgpu_dc_feature_mask & DC_FBC_MASK)
1379                 init_data.flags.fbc_support = true;
1380
1381         if (amdgpu_dc_feature_mask & DC_MULTI_MON_PP_MCLK_SWITCH_MASK)
1382                 init_data.flags.multi_mon_pp_mclk_switch = true;
1383
1384         if (amdgpu_dc_feature_mask & DC_DISABLE_FRACTIONAL_PWM_MASK)
1385                 init_data.flags.disable_fractional_pwm = true;
1386
1387         if (amdgpu_dc_feature_mask & DC_EDP_NO_POWER_SEQUENCING)
1388                 init_data.flags.edp_no_power_sequencing = true;
1389
1390         init_data.flags.power_down_display_on_boot = true;
1391
1392         INIT_LIST_HEAD(&adev->dm.da_list);
1393         /* Display Core create. */
1394         adev->dm.dc = dc_create(&init_data);
1395
1396         if (adev->dm.dc) {
1397                 DRM_INFO("Display Core initialized with v%s!\n", DC_VER);
1398         } else {
1399                 DRM_INFO("Display Core failed to initialize with v%s!\n", DC_VER);
1400                 goto error;
1401         }
1402
1403         if (amdgpu_dc_debug_mask & DC_DISABLE_PIPE_SPLIT) {
1404                 adev->dm.dc->debug.force_single_disp_pipe_split = false;
1405                 adev->dm.dc->debug.pipe_split_policy = MPC_SPLIT_AVOID;
1406         }
1407
1408         if (adev->asic_type != CHIP_CARRIZO && adev->asic_type != CHIP_STONEY)
1409                 adev->dm.dc->debug.disable_stutter = amdgpu_pp_feature_mask & PP_STUTTER_MODE ? false : true;
1410
1411         if (amdgpu_dc_debug_mask & DC_DISABLE_STUTTER)
1412                 adev->dm.dc->debug.disable_stutter = true;
1413
1414         if (amdgpu_dc_debug_mask & DC_DISABLE_DSC)
1415                 adev->dm.dc->debug.disable_dsc = true;
1416
1417         if (amdgpu_dc_debug_mask & DC_DISABLE_CLOCK_GATING)
1418                 adev->dm.dc->debug.disable_clock_gate = true;
1419
1420         r = dm_dmub_hw_init(adev);
1421         if (r) {
1422                 DRM_ERROR("DMUB interface failed to initialize: status=%d\n", r);
1423                 goto error;
1424         }
1425
1426         dc_hardware_init(adev->dm.dc);
1427
1428         adev->dm.hpd_rx_offload_wq = hpd_rx_irq_create_workqueue(adev->dm.dc);
1429         if (!adev->dm.hpd_rx_offload_wq) {
1430                 DRM_ERROR("amdgpu: failed to create hpd rx offload workqueue.\n");
1431                 goto error;
1432         }
1433
1434 #if defined(CONFIG_DRM_AMD_DC_DCN)
1435         if ((adev->flags & AMD_IS_APU) && (adev->asic_type >= CHIP_CARRIZO)) {
1436                 struct dc_phy_addr_space_config pa_config;
1437
1438                 mmhub_read_system_context(adev, &pa_config);
1439
1440                 // Call the DC init_memory func
1441                 dc_setup_system_context(adev->dm.dc, &pa_config);
1442         }
1443 #endif
1444
1445         adev->dm.freesync_module = mod_freesync_create(adev->dm.dc);
1446         if (!adev->dm.freesync_module) {
1447                 DRM_ERROR(
1448                 "amdgpu: failed to initialize freesync_module.\n");
1449         } else
1450                 DRM_DEBUG_DRIVER("amdgpu: freesync_module init done %p.\n",
1451                                 adev->dm.freesync_module);
1452
1453         amdgpu_dm_init_color_mod();
1454
1455 #if defined(CONFIG_DRM_AMD_DC_DCN)
1456         if (adev->dm.dc->caps.max_links > 0) {
1457                 adev->dm.vblank_control_workqueue =
1458                         create_singlethread_workqueue("dm_vblank_control_workqueue");
1459                 if (!adev->dm.vblank_control_workqueue)
1460                         DRM_ERROR("amdgpu: failed to initialize vblank_workqueue.\n");
1461         }
1462 #endif
1463
1464 #ifdef CONFIG_DRM_AMD_DC_HDCP
1465         if (adev->dm.dc->caps.max_links > 0 && adev->family >= AMDGPU_FAMILY_RV) {
1466                 adev->dm.hdcp_workqueue = hdcp_create_workqueue(adev, &init_params.cp_psp, adev->dm.dc);
1467
1468                 if (!adev->dm.hdcp_workqueue)
1469                         DRM_ERROR("amdgpu: failed to initialize hdcp_workqueue.\n");
1470                 else
1471                         DRM_DEBUG_DRIVER("amdgpu: hdcp_workqueue init done %p.\n", adev->dm.hdcp_workqueue);
1472
1473                 dc_init_callbacks(adev->dm.dc, &init_params);
1474         }
1475 #endif
1476 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
1477         adev->dm.crc_rd_wrk = amdgpu_dm_crtc_secure_display_create_work();
1478 #endif
1479         if (dc_enable_dmub_notifications(adev->dm.dc)) {
1480                 init_completion(&adev->dm.dmub_aux_transfer_done);
1481                 adev->dm.dmub_notify = kzalloc(sizeof(struct dmub_notification), GFP_KERNEL);
1482                 if (!adev->dm.dmub_notify) {
1483                         DRM_INFO("amdgpu: fail to allocate adev->dm.dmub_notify");
1484                         goto error;
1485                 }
1486
1487                 adev->dm.delayed_hpd_wq = create_singlethread_workqueue("amdgpu_dm_hpd_wq");
1488                 if (!adev->dm.delayed_hpd_wq) {
1489                         DRM_ERROR("amdgpu: failed to create hpd offload workqueue.\n");
1490                         goto error;
1491                 }
1492
1493                 amdgpu_dm_outbox_init(adev);
1494 #if defined(CONFIG_DRM_AMD_DC_DCN)
1495                 if (!register_dmub_notify_callback(adev, DMUB_NOTIFICATION_AUX_REPLY,
1496                         dmub_aux_setconfig_callback, false)) {
1497                         DRM_ERROR("amdgpu: fail to register dmub aux callback");
1498                         goto error;
1499                 }
1500                 if (!register_dmub_notify_callback(adev, DMUB_NOTIFICATION_HPD, dmub_hpd_callback, true)) {
1501                         DRM_ERROR("amdgpu: fail to register dmub hpd callback");
1502                         goto error;
1503                 }
1504 #endif
1505         }
1506
1507         if (amdgpu_dm_initialize_drm_device(adev)) {
1508                 DRM_ERROR(
1509                 "amdgpu: failed to initialize sw for display support.\n");
1510                 goto error;
1511         }
1512
1513         /* create fake encoders for MST */
1514         dm_dp_create_fake_mst_encoders(adev);
1515
1516         /* TODO: Add_display_info? */
1517
1518         /* TODO use dynamic cursor width */
1519         adev_to_drm(adev)->mode_config.cursor_width = adev->dm.dc->caps.max_cursor_size;
1520         adev_to_drm(adev)->mode_config.cursor_height = adev->dm.dc->caps.max_cursor_size;
1521
1522         if (drm_vblank_init(adev_to_drm(adev), adev->dm.display_indexes_num)) {
1523                 DRM_ERROR(
1524                 "amdgpu: failed to initialize sw for display support.\n");
1525                 goto error;
1526         }
1527
1528
1529         DRM_DEBUG_DRIVER("KMS initialized.\n");
1530
1531         return 0;
1532 error:
1533         amdgpu_dm_fini(adev);
1534
1535         return -EINVAL;
1536 }
1537
1538 static int amdgpu_dm_early_fini(void *handle)
1539 {
1540         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1541
1542         amdgpu_dm_audio_fini(adev);
1543
1544         return 0;
1545 }
1546
1547 static void amdgpu_dm_fini(struct amdgpu_device *adev)
1548 {
1549         int i;
1550
1551 #if defined(CONFIG_DRM_AMD_DC_DCN)
1552         if (adev->dm.vblank_control_workqueue) {
1553                 destroy_workqueue(adev->dm.vblank_control_workqueue);
1554                 adev->dm.vblank_control_workqueue = NULL;
1555         }
1556 #endif
1557
1558         for (i = 0; i < adev->dm.display_indexes_num; i++) {
1559                 drm_encoder_cleanup(&adev->dm.mst_encoders[i].base);
1560         }
1561
1562         amdgpu_dm_destroy_drm_device(&adev->dm);
1563
1564 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
1565         if (adev->dm.crc_rd_wrk) {
1566                 flush_work(&adev->dm.crc_rd_wrk->notify_ta_work);
1567                 kfree(adev->dm.crc_rd_wrk);
1568                 adev->dm.crc_rd_wrk = NULL;
1569         }
1570 #endif
1571 #ifdef CONFIG_DRM_AMD_DC_HDCP
1572         if (adev->dm.hdcp_workqueue) {
1573                 hdcp_destroy(&adev->dev->kobj, adev->dm.hdcp_workqueue);
1574                 adev->dm.hdcp_workqueue = NULL;
1575         }
1576
1577         if (adev->dm.dc)
1578                 dc_deinit_callbacks(adev->dm.dc);
1579 #endif
1580
1581         dc_dmub_srv_destroy(&adev->dm.dc->ctx->dmub_srv);
1582
1583         if (dc_enable_dmub_notifications(adev->dm.dc)) {
1584                 kfree(adev->dm.dmub_notify);
1585                 adev->dm.dmub_notify = NULL;
1586                 destroy_workqueue(adev->dm.delayed_hpd_wq);
1587                 adev->dm.delayed_hpd_wq = NULL;
1588         }
1589
1590         if (adev->dm.dmub_bo)
1591                 amdgpu_bo_free_kernel(&adev->dm.dmub_bo,
1592                                       &adev->dm.dmub_bo_gpu_addr,
1593                                       &adev->dm.dmub_bo_cpu_addr);
1594
1595         if (adev->dm.hpd_rx_offload_wq) {
1596                 for (i = 0; i < adev->dm.dc->caps.max_links; i++) {
1597                         if (adev->dm.hpd_rx_offload_wq[i].wq) {
1598                                 destroy_workqueue(adev->dm.hpd_rx_offload_wq[i].wq);
1599                                 adev->dm.hpd_rx_offload_wq[i].wq = NULL;
1600                         }
1601                 }
1602
1603                 kfree(adev->dm.hpd_rx_offload_wq);
1604                 adev->dm.hpd_rx_offload_wq = NULL;
1605         }
1606
1607         /* DC Destroy TODO: Replace destroy DAL */
1608         if (adev->dm.dc)
1609                 dc_destroy(&adev->dm.dc);
1610         /*
1611          * TODO: pageflip, vlank interrupt
1612          *
1613          * amdgpu_dm_irq_fini(adev);
1614          */
1615
1616         if (adev->dm.cgs_device) {
1617                 amdgpu_cgs_destroy_device(adev->dm.cgs_device);
1618                 adev->dm.cgs_device = NULL;
1619         }
1620         if (adev->dm.freesync_module) {
1621                 mod_freesync_destroy(adev->dm.freesync_module);
1622                 adev->dm.freesync_module = NULL;
1623         }
1624
1625         mutex_destroy(&adev->dm.audio_lock);
1626         mutex_destroy(&adev->dm.dc_lock);
1627
1628         return;
1629 }
1630
1631 static int load_dmcu_fw(struct amdgpu_device *adev)
1632 {
1633         const char *fw_name_dmcu = NULL;
1634         int r;
1635         const struct dmcu_firmware_header_v1_0 *hdr;
1636
1637         switch(adev->asic_type) {
1638 #if defined(CONFIG_DRM_AMD_DC_SI)
1639         case CHIP_TAHITI:
1640         case CHIP_PITCAIRN:
1641         case CHIP_VERDE:
1642         case CHIP_OLAND:
1643 #endif
1644         case CHIP_BONAIRE:
1645         case CHIP_HAWAII:
1646         case CHIP_KAVERI:
1647         case CHIP_KABINI:
1648         case CHIP_MULLINS:
1649         case CHIP_TONGA:
1650         case CHIP_FIJI:
1651         case CHIP_CARRIZO:
1652         case CHIP_STONEY:
1653         case CHIP_POLARIS11:
1654         case CHIP_POLARIS10:
1655         case CHIP_POLARIS12:
1656         case CHIP_VEGAM:
1657         case CHIP_VEGA10:
1658         case CHIP_VEGA12:
1659         case CHIP_VEGA20:
1660                 return 0;
1661         case CHIP_NAVI12:
1662                 fw_name_dmcu = FIRMWARE_NAVI12_DMCU;
1663                 break;
1664         case CHIP_RAVEN:
1665                 if (ASICREV_IS_PICASSO(adev->external_rev_id))
1666                         fw_name_dmcu = FIRMWARE_RAVEN_DMCU;
1667                 else if (ASICREV_IS_RAVEN2(adev->external_rev_id))
1668                         fw_name_dmcu = FIRMWARE_RAVEN_DMCU;
1669                 else
1670                         return 0;
1671                 break;
1672         default:
1673                 switch (adev->ip_versions[DCE_HWIP][0]) {
1674                 case IP_VERSION(2, 0, 2):
1675                 case IP_VERSION(2, 0, 3):
1676                 case IP_VERSION(2, 0, 0):
1677                 case IP_VERSION(2, 1, 0):
1678                 case IP_VERSION(3, 0, 0):
1679                 case IP_VERSION(3, 0, 2):
1680                 case IP_VERSION(3, 0, 3):
1681                 case IP_VERSION(3, 0, 1):
1682                 case IP_VERSION(3, 1, 2):
1683                 case IP_VERSION(3, 1, 3):
1684                         return 0;
1685                 default:
1686                         break;
1687                 }
1688                 DRM_ERROR("Unsupported ASIC type: 0x%X\n", adev->asic_type);
1689                 return -EINVAL;
1690         }
1691
1692         if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) {
1693                 DRM_DEBUG_KMS("dm: DMCU firmware not supported on direct or SMU loading\n");
1694                 return 0;
1695         }
1696
1697         r = request_firmware_direct(&adev->dm.fw_dmcu, fw_name_dmcu, adev->dev);
1698         if (r == -ENOENT) {
1699                 /* DMCU firmware is not necessary, so don't raise a fuss if it's missing */
1700                 DRM_DEBUG_KMS("dm: DMCU firmware not found\n");
1701                 adev->dm.fw_dmcu = NULL;
1702                 return 0;
1703         }
1704         if (r) {
1705                 dev_err(adev->dev, "amdgpu_dm: Can't load firmware \"%s\"\n",
1706                         fw_name_dmcu);
1707                 return r;
1708         }
1709
1710         r = amdgpu_ucode_validate(adev->dm.fw_dmcu);
1711         if (r) {
1712                 dev_err(adev->dev, "amdgpu_dm: Can't validate firmware \"%s\"\n",
1713                         fw_name_dmcu);
1714                 release_firmware(adev->dm.fw_dmcu);
1715                 adev->dm.fw_dmcu = NULL;
1716                 return r;
1717         }
1718
1719         hdr = (const struct dmcu_firmware_header_v1_0 *)adev->dm.fw_dmcu->data;
1720         adev->firmware.ucode[AMDGPU_UCODE_ID_DMCU_ERAM].ucode_id = AMDGPU_UCODE_ID_DMCU_ERAM;
1721         adev->firmware.ucode[AMDGPU_UCODE_ID_DMCU_ERAM].fw = adev->dm.fw_dmcu;
1722         adev->firmware.fw_size +=
1723                 ALIGN(le32_to_cpu(hdr->header.ucode_size_bytes) - le32_to_cpu(hdr->intv_size_bytes), PAGE_SIZE);
1724
1725         adev->firmware.ucode[AMDGPU_UCODE_ID_DMCU_INTV].ucode_id = AMDGPU_UCODE_ID_DMCU_INTV;
1726         adev->firmware.ucode[AMDGPU_UCODE_ID_DMCU_INTV].fw = adev->dm.fw_dmcu;
1727         adev->firmware.fw_size +=
1728                 ALIGN(le32_to_cpu(hdr->intv_size_bytes), PAGE_SIZE);
1729
1730         adev->dm.dmcu_fw_version = le32_to_cpu(hdr->header.ucode_version);
1731
1732         DRM_DEBUG_KMS("PSP loading DMCU firmware\n");
1733
1734         return 0;
1735 }
1736
1737 static uint32_t amdgpu_dm_dmub_reg_read(void *ctx, uint32_t address)
1738 {
1739         struct amdgpu_device *adev = ctx;
1740
1741         return dm_read_reg(adev->dm.dc->ctx, address);
1742 }
1743
1744 static void amdgpu_dm_dmub_reg_write(void *ctx, uint32_t address,
1745                                      uint32_t value)
1746 {
1747         struct amdgpu_device *adev = ctx;
1748
1749         return dm_write_reg(adev->dm.dc->ctx, address, value);
1750 }
1751
1752 static int dm_dmub_sw_init(struct amdgpu_device *adev)
1753 {
1754         struct dmub_srv_create_params create_params;
1755         struct dmub_srv_region_params region_params;
1756         struct dmub_srv_region_info region_info;
1757         struct dmub_srv_fb_params fb_params;
1758         struct dmub_srv_fb_info *fb_info;
1759         struct dmub_srv *dmub_srv;
1760         const struct dmcub_firmware_header_v1_0 *hdr;
1761         const char *fw_name_dmub;
1762         enum dmub_asic dmub_asic;
1763         enum dmub_status status;
1764         int r;
1765
1766         switch (adev->ip_versions[DCE_HWIP][0]) {
1767         case IP_VERSION(2, 1, 0):
1768                 dmub_asic = DMUB_ASIC_DCN21;
1769                 fw_name_dmub = FIRMWARE_RENOIR_DMUB;
1770                 if (ASICREV_IS_GREEN_SARDINE(adev->external_rev_id))
1771                         fw_name_dmub = FIRMWARE_GREEN_SARDINE_DMUB;
1772                 break;
1773         case IP_VERSION(3, 0, 0):
1774                 if (adev->ip_versions[GC_HWIP][0] == IP_VERSION(10, 3, 0)) {
1775                         dmub_asic = DMUB_ASIC_DCN30;
1776                         fw_name_dmub = FIRMWARE_SIENNA_CICHLID_DMUB;
1777                 } else {
1778                         dmub_asic = DMUB_ASIC_DCN30;
1779                         fw_name_dmub = FIRMWARE_NAVY_FLOUNDER_DMUB;
1780                 }
1781                 break;
1782         case IP_VERSION(3, 0, 1):
1783                 dmub_asic = DMUB_ASIC_DCN301;
1784                 fw_name_dmub = FIRMWARE_VANGOGH_DMUB;
1785                 break;
1786         case IP_VERSION(3, 0, 2):
1787                 dmub_asic = DMUB_ASIC_DCN302;
1788                 fw_name_dmub = FIRMWARE_DIMGREY_CAVEFISH_DMUB;
1789                 break;
1790         case IP_VERSION(3, 0, 3):
1791                 dmub_asic = DMUB_ASIC_DCN303;
1792                 fw_name_dmub = FIRMWARE_BEIGE_GOBY_DMUB;
1793                 break;
1794         case IP_VERSION(3, 1, 2):
1795         case IP_VERSION(3, 1, 3):
1796                 dmub_asic = DMUB_ASIC_DCN31;
1797                 fw_name_dmub = FIRMWARE_YELLOW_CARP_DMUB;
1798                 break;
1799
1800         default:
1801                 /* ASIC doesn't support DMUB. */
1802                 return 0;
1803         }
1804
1805         r = request_firmware_direct(&adev->dm.dmub_fw, fw_name_dmub, adev->dev);
1806         if (r) {
1807                 DRM_ERROR("DMUB firmware loading failed: %d\n", r);
1808                 return 0;
1809         }
1810
1811         r = amdgpu_ucode_validate(adev->dm.dmub_fw);
1812         if (r) {
1813                 DRM_ERROR("Couldn't validate DMUB firmware: %d\n", r);
1814                 return 0;
1815         }
1816
1817         hdr = (const struct dmcub_firmware_header_v1_0 *)adev->dm.dmub_fw->data;
1818         adev->dm.dmcub_fw_version = le32_to_cpu(hdr->header.ucode_version);
1819
1820         if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
1821                 adev->firmware.ucode[AMDGPU_UCODE_ID_DMCUB].ucode_id =
1822                         AMDGPU_UCODE_ID_DMCUB;
1823                 adev->firmware.ucode[AMDGPU_UCODE_ID_DMCUB].fw =
1824                         adev->dm.dmub_fw;
1825                 adev->firmware.fw_size +=
1826                         ALIGN(le32_to_cpu(hdr->inst_const_bytes), PAGE_SIZE);
1827
1828                 DRM_INFO("Loading DMUB firmware via PSP: version=0x%08X\n",
1829                          adev->dm.dmcub_fw_version);
1830         }
1831
1832
1833         adev->dm.dmub_srv = kzalloc(sizeof(*adev->dm.dmub_srv), GFP_KERNEL);
1834         dmub_srv = adev->dm.dmub_srv;
1835
1836         if (!dmub_srv) {
1837                 DRM_ERROR("Failed to allocate DMUB service!\n");
1838                 return -ENOMEM;
1839         }
1840
1841         memset(&create_params, 0, sizeof(create_params));
1842         create_params.user_ctx = adev;
1843         create_params.funcs.reg_read = amdgpu_dm_dmub_reg_read;
1844         create_params.funcs.reg_write = amdgpu_dm_dmub_reg_write;
1845         create_params.asic = dmub_asic;
1846
1847         /* Create the DMUB service. */
1848         status = dmub_srv_create(dmub_srv, &create_params);
1849         if (status != DMUB_STATUS_OK) {
1850                 DRM_ERROR("Error creating DMUB service: %d\n", status);
1851                 return -EINVAL;
1852         }
1853
1854         /* Calculate the size of all the regions for the DMUB service. */
1855         memset(&region_params, 0, sizeof(region_params));
1856
1857         region_params.inst_const_size = le32_to_cpu(hdr->inst_const_bytes) -
1858                                         PSP_HEADER_BYTES - PSP_FOOTER_BYTES;
1859         region_params.bss_data_size = le32_to_cpu(hdr->bss_data_bytes);
1860         region_params.vbios_size = adev->bios_size;
1861         region_params.fw_bss_data = region_params.bss_data_size ?
1862                 adev->dm.dmub_fw->data +
1863                 le32_to_cpu(hdr->header.ucode_array_offset_bytes) +
1864                 le32_to_cpu(hdr->inst_const_bytes) : NULL;
1865         region_params.fw_inst_const =
1866                 adev->dm.dmub_fw->data +
1867                 le32_to_cpu(hdr->header.ucode_array_offset_bytes) +
1868                 PSP_HEADER_BYTES;
1869
1870         status = dmub_srv_calc_region_info(dmub_srv, &region_params,
1871                                            &region_info);
1872
1873         if (status != DMUB_STATUS_OK) {
1874                 DRM_ERROR("Error calculating DMUB region info: %d\n", status);
1875                 return -EINVAL;
1876         }
1877
1878         /*
1879          * Allocate a framebuffer based on the total size of all the regions.
1880          * TODO: Move this into GART.
1881          */
1882         r = amdgpu_bo_create_kernel(adev, region_info.fb_size, PAGE_SIZE,
1883                                     AMDGPU_GEM_DOMAIN_VRAM, &adev->dm.dmub_bo,
1884                                     &adev->dm.dmub_bo_gpu_addr,
1885                                     &adev->dm.dmub_bo_cpu_addr);
1886         if (r)
1887                 return r;
1888
1889         /* Rebase the regions on the framebuffer address. */
1890         memset(&fb_params, 0, sizeof(fb_params));
1891         fb_params.cpu_addr = adev->dm.dmub_bo_cpu_addr;
1892         fb_params.gpu_addr = adev->dm.dmub_bo_gpu_addr;
1893         fb_params.region_info = &region_info;
1894
1895         adev->dm.dmub_fb_info =
1896                 kzalloc(sizeof(*adev->dm.dmub_fb_info), GFP_KERNEL);
1897         fb_info = adev->dm.dmub_fb_info;
1898
1899         if (!fb_info) {
1900                 DRM_ERROR(
1901                         "Failed to allocate framebuffer info for DMUB service!\n");
1902                 return -ENOMEM;
1903         }
1904
1905         status = dmub_srv_calc_fb_info(dmub_srv, &fb_params, fb_info);
1906         if (status != DMUB_STATUS_OK) {
1907                 DRM_ERROR("Error calculating DMUB FB info: %d\n", status);
1908                 return -EINVAL;
1909         }
1910
1911         return 0;
1912 }
1913
1914 static int dm_sw_init(void *handle)
1915 {
1916         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1917         int r;
1918
1919         r = dm_dmub_sw_init(adev);
1920         if (r)
1921                 return r;
1922
1923         return load_dmcu_fw(adev);
1924 }
1925
1926 static int dm_sw_fini(void *handle)
1927 {
1928         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1929
1930         kfree(adev->dm.dmub_fb_info);
1931         adev->dm.dmub_fb_info = NULL;
1932
1933         if (adev->dm.dmub_srv) {
1934                 dmub_srv_destroy(adev->dm.dmub_srv);
1935                 adev->dm.dmub_srv = NULL;
1936         }
1937
1938         release_firmware(adev->dm.dmub_fw);
1939         adev->dm.dmub_fw = NULL;
1940
1941         release_firmware(adev->dm.fw_dmcu);
1942         adev->dm.fw_dmcu = NULL;
1943
1944         return 0;
1945 }
1946
1947 static int detect_mst_link_for_all_connectors(struct drm_device *dev)
1948 {
1949         struct amdgpu_dm_connector *aconnector;
1950         struct drm_connector *connector;
1951         struct drm_connector_list_iter iter;
1952         int ret = 0;
1953
1954         drm_connector_list_iter_begin(dev, &iter);
1955         drm_for_each_connector_iter(connector, &iter) {
1956                 aconnector = to_amdgpu_dm_connector(connector);
1957                 if (aconnector->dc_link->type == dc_connection_mst_branch &&
1958                     aconnector->mst_mgr.aux) {
1959                         DRM_DEBUG_DRIVER("DM_MST: starting TM on aconnector: %p [id: %d]\n",
1960                                          aconnector,
1961                                          aconnector->base.base.id);
1962
1963                         ret = drm_dp_mst_topology_mgr_set_mst(&aconnector->mst_mgr, true);
1964                         if (ret < 0) {
1965                                 DRM_ERROR("DM_MST: Failed to start MST\n");
1966                                 aconnector->dc_link->type =
1967                                         dc_connection_single;
1968                                 break;
1969                         }
1970                 }
1971         }
1972         drm_connector_list_iter_end(&iter);
1973
1974         return ret;
1975 }
1976
1977 static int dm_late_init(void *handle)
1978 {
1979         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1980
1981         struct dmcu_iram_parameters params;
1982         unsigned int linear_lut[16];
1983         int i;
1984         struct dmcu *dmcu = NULL;
1985
1986         dmcu = adev->dm.dc->res_pool->dmcu;
1987
1988         for (i = 0; i < 16; i++)
1989                 linear_lut[i] = 0xFFFF * i / 15;
1990
1991         params.set = 0;
1992         params.backlight_ramping_override = false;
1993         params.backlight_ramping_start = 0xCCCC;
1994         params.backlight_ramping_reduction = 0xCCCCCCCC;
1995         params.backlight_lut_array_size = 16;
1996         params.backlight_lut_array = linear_lut;
1997
1998         /* Min backlight level after ABM reduction,  Don't allow below 1%
1999          * 0xFFFF x 0.01 = 0x28F
2000          */
2001         params.min_abm_backlight = 0x28F;
2002         /* In the case where abm is implemented on dmcub,
2003         * dmcu object will be null.
2004         * ABM 2.4 and up are implemented on dmcub.
2005         */
2006         if (dmcu) {
2007                 if (!dmcu_load_iram(dmcu, params))
2008                         return -EINVAL;
2009         } else if (adev->dm.dc->ctx->dmub_srv) {
2010                 struct dc_link *edp_links[MAX_NUM_EDP];
2011                 int edp_num;
2012
2013                 get_edp_links(adev->dm.dc, edp_links, &edp_num);
2014                 for (i = 0; i < edp_num; i++) {
2015                         if (!dmub_init_abm_config(adev->dm.dc->res_pool, params, i))
2016                                 return -EINVAL;
2017                 }
2018         }
2019
2020         return detect_mst_link_for_all_connectors(adev_to_drm(adev));
2021 }
2022
2023 static void s3_handle_mst(struct drm_device *dev, bool suspend)
2024 {
2025         struct amdgpu_dm_connector *aconnector;
2026         struct drm_connector *connector;
2027         struct drm_connector_list_iter iter;
2028         struct drm_dp_mst_topology_mgr *mgr;
2029         int ret;
2030         bool need_hotplug = false;
2031
2032         drm_connector_list_iter_begin(dev, &iter);
2033         drm_for_each_connector_iter(connector, &iter) {
2034                 aconnector = to_amdgpu_dm_connector(connector);
2035                 if (aconnector->dc_link->type != dc_connection_mst_branch ||
2036                     aconnector->mst_port)
2037                         continue;
2038
2039                 mgr = &aconnector->mst_mgr;
2040
2041                 if (suspend) {
2042                         drm_dp_mst_topology_mgr_suspend(mgr);
2043                 } else {
2044                         ret = drm_dp_mst_topology_mgr_resume(mgr, true);
2045                         if (ret < 0) {
2046                                 drm_dp_mst_topology_mgr_set_mst(mgr, false);
2047                                 need_hotplug = true;
2048                         }
2049                 }
2050         }
2051         drm_connector_list_iter_end(&iter);
2052
2053         if (need_hotplug)
2054                 drm_kms_helper_hotplug_event(dev);
2055 }
2056
2057 static int amdgpu_dm_smu_write_watermarks_table(struct amdgpu_device *adev)
2058 {
2059         struct smu_context *smu = &adev->smu;
2060         int ret = 0;
2061
2062         if (!is_support_sw_smu(adev))
2063                 return 0;
2064
2065         /* This interface is for dGPU Navi1x.Linux dc-pplib interface depends
2066          * on window driver dc implementation.
2067          * For Navi1x, clock settings of dcn watermarks are fixed. the settings
2068          * should be passed to smu during boot up and resume from s3.
2069          * boot up: dc calculate dcn watermark clock settings within dc_create,
2070          * dcn20_resource_construct
2071          * then call pplib functions below to pass the settings to smu:
2072          * smu_set_watermarks_for_clock_ranges
2073          * smu_set_watermarks_table
2074          * navi10_set_watermarks_table
2075          * smu_write_watermarks_table
2076          *
2077          * For Renoir, clock settings of dcn watermark are also fixed values.
2078          * dc has implemented different flow for window driver:
2079          * dc_hardware_init / dc_set_power_state
2080          * dcn10_init_hw
2081          * notify_wm_ranges
2082          * set_wm_ranges
2083          * -- Linux
2084          * smu_set_watermarks_for_clock_ranges
2085          * renoir_set_watermarks_table
2086          * smu_write_watermarks_table
2087          *
2088          * For Linux,
2089          * dc_hardware_init -> amdgpu_dm_init
2090          * dc_set_power_state --> dm_resume
2091          *
2092          * therefore, this function apply to navi10/12/14 but not Renoir
2093          * *
2094          */
2095         switch (adev->ip_versions[DCE_HWIP][0]) {
2096         case IP_VERSION(2, 0, 2):
2097         case IP_VERSION(2, 0, 0):
2098                 break;
2099         default:
2100                 return 0;
2101         }
2102
2103         ret = smu_write_watermarks_table(smu);
2104         if (ret) {
2105                 DRM_ERROR("Failed to update WMTABLE!\n");
2106                 return ret;
2107         }
2108
2109         return 0;
2110 }
2111
2112 /**
2113  * dm_hw_init() - Initialize DC device
2114  * @handle: The base driver device containing the amdgpu_dm device.
2115  *
2116  * Initialize the &struct amdgpu_display_manager device. This involves calling
2117  * the initializers of each DM component, then populating the struct with them.
2118  *
2119  * Although the function implies hardware initialization, both hardware and
2120  * software are initialized here. Splitting them out to their relevant init
2121  * hooks is a future TODO item.
2122  *
2123  * Some notable things that are initialized here:
2124  *
2125  * - Display Core, both software and hardware
2126  * - DC modules that we need (freesync and color management)
2127  * - DRM software states
2128  * - Interrupt sources and handlers
2129  * - Vblank support
2130  * - Debug FS entries, if enabled
2131  */
2132 static int dm_hw_init(void *handle)
2133 {
2134         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
2135         /* Create DAL display manager */
2136         amdgpu_dm_init(adev);
2137         amdgpu_dm_hpd_init(adev);
2138
2139         return 0;
2140 }
2141
2142 /**
2143  * dm_hw_fini() - Teardown DC device
2144  * @handle: The base driver device containing the amdgpu_dm device.
2145  *
2146  * Teardown components within &struct amdgpu_display_manager that require
2147  * cleanup. This involves cleaning up the DRM device, DC, and any modules that
2148  * were loaded. Also flush IRQ workqueues and disable them.
2149  */
2150 static int dm_hw_fini(void *handle)
2151 {
2152         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
2153
2154         amdgpu_dm_hpd_fini(adev);
2155
2156         amdgpu_dm_irq_fini(adev);
2157         amdgpu_dm_fini(adev);
2158         return 0;
2159 }
2160
2161
2162 static int dm_enable_vblank(struct drm_crtc *crtc);
2163 static void dm_disable_vblank(struct drm_crtc *crtc);
2164
2165 static void dm_gpureset_toggle_interrupts(struct amdgpu_device *adev,
2166                                  struct dc_state *state, bool enable)
2167 {
2168         enum dc_irq_source irq_source;
2169         struct amdgpu_crtc *acrtc;
2170         int rc = -EBUSY;
2171         int i = 0;
2172
2173         for (i = 0; i < state->stream_count; i++) {
2174                 acrtc = get_crtc_by_otg_inst(
2175                                 adev, state->stream_status[i].primary_otg_inst);
2176
2177                 if (acrtc && state->stream_status[i].plane_count != 0) {
2178                         irq_source = IRQ_TYPE_PFLIP + acrtc->otg_inst;
2179                         rc = dc_interrupt_set(adev->dm.dc, irq_source, enable) ? 0 : -EBUSY;
2180                         DRM_DEBUG_VBL("crtc %d - vupdate irq %sabling: r=%d\n",
2181                                       acrtc->crtc_id, enable ? "en" : "dis", rc);
2182                         if (rc)
2183                                 DRM_WARN("Failed to %s pflip interrupts\n",
2184                                          enable ? "enable" : "disable");
2185
2186                         if (enable) {
2187                                 rc = dm_enable_vblank(&acrtc->base);
2188                                 if (rc)
2189                                         DRM_WARN("Failed to enable vblank interrupts\n");
2190                         } else {
2191                                 dm_disable_vblank(&acrtc->base);
2192                         }
2193
2194                 }
2195         }
2196
2197 }
2198
2199 static enum dc_status amdgpu_dm_commit_zero_streams(struct dc *dc)
2200 {
2201         struct dc_state *context = NULL;
2202         enum dc_status res = DC_ERROR_UNEXPECTED;
2203         int i;
2204         struct dc_stream_state *del_streams[MAX_PIPES];
2205         int del_streams_count = 0;
2206
2207         memset(del_streams, 0, sizeof(del_streams));
2208
2209         context = dc_create_state(dc);
2210         if (context == NULL)
2211                 goto context_alloc_fail;
2212
2213         dc_resource_state_copy_construct_current(dc, context);
2214
2215         /* First remove from context all streams */
2216         for (i = 0; i < context->stream_count; i++) {
2217                 struct dc_stream_state *stream = context->streams[i];
2218
2219                 del_streams[del_streams_count++] = stream;
2220         }
2221
2222         /* Remove all planes for removed streams and then remove the streams */
2223         for (i = 0; i < del_streams_count; i++) {
2224                 if (!dc_rem_all_planes_for_stream(dc, del_streams[i], context)) {
2225                         res = DC_FAIL_DETACH_SURFACES;
2226                         goto fail;
2227                 }
2228
2229                 res = dc_remove_stream_from_ctx(dc, context, del_streams[i]);
2230                 if (res != DC_OK)
2231                         goto fail;
2232         }
2233
2234
2235         res = dc_validate_global_state(dc, context, false);
2236
2237         if (res != DC_OK) {
2238                 DRM_ERROR("%s:resource validation failed, dc_status:%d\n", __func__, res);
2239                 goto fail;
2240         }
2241
2242         res = dc_commit_state(dc, context);
2243
2244 fail:
2245         dc_release_state(context);
2246
2247 context_alloc_fail:
2248         return res;
2249 }
2250
2251 static void hpd_rx_irq_work_suspend(struct amdgpu_display_manager *dm)
2252 {
2253         int i;
2254
2255         if (dm->hpd_rx_offload_wq) {
2256                 for (i = 0; i < dm->dc->caps.max_links; i++)
2257                         flush_workqueue(dm->hpd_rx_offload_wq[i].wq);
2258         }
2259 }
2260
2261 static int dm_suspend(void *handle)
2262 {
2263         struct amdgpu_device *adev = handle;
2264         struct amdgpu_display_manager *dm = &adev->dm;
2265         int ret = 0;
2266
2267         if (amdgpu_in_reset(adev)) {
2268                 mutex_lock(&dm->dc_lock);
2269
2270 #if defined(CONFIG_DRM_AMD_DC_DCN)
2271                 dc_allow_idle_optimizations(adev->dm.dc, false);
2272 #endif
2273
2274                 dm->cached_dc_state = dc_copy_state(dm->dc->current_state);
2275
2276                 dm_gpureset_toggle_interrupts(adev, dm->cached_dc_state, false);
2277
2278                 amdgpu_dm_commit_zero_streams(dm->dc);
2279
2280                 amdgpu_dm_irq_suspend(adev);
2281
2282                 hpd_rx_irq_work_suspend(dm);
2283
2284                 return ret;
2285         }
2286
2287         WARN_ON(adev->dm.cached_state);
2288         adev->dm.cached_state = drm_atomic_helper_suspend(adev_to_drm(adev));
2289
2290         s3_handle_mst(adev_to_drm(adev), true);
2291
2292         amdgpu_dm_irq_suspend(adev);
2293
2294         hpd_rx_irq_work_suspend(dm);
2295
2296         dc_set_power_state(dm->dc, DC_ACPI_CM_POWER_STATE_D3);
2297
2298         return 0;
2299 }
2300
2301 static struct amdgpu_dm_connector *
2302 amdgpu_dm_find_first_crtc_matching_connector(struct drm_atomic_state *state,
2303                                              struct drm_crtc *crtc)
2304 {
2305         uint32_t i;
2306         struct drm_connector_state *new_con_state;
2307         struct drm_connector *connector;
2308         struct drm_crtc *crtc_from_state;
2309
2310         for_each_new_connector_in_state(state, connector, new_con_state, i) {
2311                 crtc_from_state = new_con_state->crtc;
2312
2313                 if (crtc_from_state == crtc)
2314                         return to_amdgpu_dm_connector(connector);
2315         }
2316
2317         return NULL;
2318 }
2319
2320 static void emulated_link_detect(struct dc_link *link)
2321 {
2322         struct dc_sink_init_data sink_init_data = { 0 };
2323         struct display_sink_capability sink_caps = { 0 };
2324         enum dc_edid_status edid_status;
2325         struct dc_context *dc_ctx = link->ctx;
2326         struct dc_sink *sink = NULL;
2327         struct dc_sink *prev_sink = NULL;
2328
2329         link->type = dc_connection_none;
2330         prev_sink = link->local_sink;
2331
2332         if (prev_sink)
2333                 dc_sink_release(prev_sink);
2334
2335         switch (link->connector_signal) {
2336         case SIGNAL_TYPE_HDMI_TYPE_A: {
2337                 sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C;
2338                 sink_caps.signal = SIGNAL_TYPE_HDMI_TYPE_A;
2339                 break;
2340         }
2341
2342         case SIGNAL_TYPE_DVI_SINGLE_LINK: {
2343                 sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C;
2344                 sink_caps.signal = SIGNAL_TYPE_DVI_SINGLE_LINK;
2345                 break;
2346         }
2347
2348         case SIGNAL_TYPE_DVI_DUAL_LINK: {
2349                 sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C;
2350                 sink_caps.signal = SIGNAL_TYPE_DVI_DUAL_LINK;
2351                 break;
2352         }
2353
2354         case SIGNAL_TYPE_LVDS: {
2355                 sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C;
2356                 sink_caps.signal = SIGNAL_TYPE_LVDS;
2357                 break;
2358         }
2359
2360         case SIGNAL_TYPE_EDP: {
2361                 sink_caps.transaction_type =
2362                         DDC_TRANSACTION_TYPE_I2C_OVER_AUX;
2363                 sink_caps.signal = SIGNAL_TYPE_EDP;
2364                 break;
2365         }
2366
2367         case SIGNAL_TYPE_DISPLAY_PORT: {
2368                 sink_caps.transaction_type =
2369                         DDC_TRANSACTION_TYPE_I2C_OVER_AUX;
2370                 sink_caps.signal = SIGNAL_TYPE_VIRTUAL;
2371                 break;
2372         }
2373
2374         default:
2375                 DC_ERROR("Invalid connector type! signal:%d\n",
2376                         link->connector_signal);
2377                 return;
2378         }
2379
2380         sink_init_data.link = link;
2381         sink_init_data.sink_signal = sink_caps.signal;
2382
2383         sink = dc_sink_create(&sink_init_data);
2384         if (!sink) {
2385                 DC_ERROR("Failed to create sink!\n");
2386                 return;
2387         }
2388
2389         /* dc_sink_create returns a new reference */
2390         link->local_sink = sink;
2391
2392         edid_status = dm_helpers_read_local_edid(
2393                         link->ctx,
2394                         link,
2395                         sink);
2396
2397         if (edid_status != EDID_OK)
2398                 DC_ERROR("Failed to read EDID");
2399
2400 }
2401
2402 static void dm_gpureset_commit_state(struct dc_state *dc_state,
2403                                      struct amdgpu_display_manager *dm)
2404 {
2405         struct {
2406                 struct dc_surface_update surface_updates[MAX_SURFACES];
2407                 struct dc_plane_info plane_infos[MAX_SURFACES];
2408                 struct dc_scaling_info scaling_infos[MAX_SURFACES];
2409                 struct dc_flip_addrs flip_addrs[MAX_SURFACES];
2410                 struct dc_stream_update stream_update;
2411         } * bundle;
2412         int k, m;
2413
2414         bundle = kzalloc(sizeof(*bundle), GFP_KERNEL);
2415
2416         if (!bundle) {
2417                 dm_error("Failed to allocate update bundle\n");
2418                 goto cleanup;
2419         }
2420
2421         for (k = 0; k < dc_state->stream_count; k++) {
2422                 bundle->stream_update.stream = dc_state->streams[k];
2423
2424                 for (m = 0; m < dc_state->stream_status->plane_count; m++) {
2425                         bundle->surface_updates[m].surface =
2426                                 dc_state->stream_status->plane_states[m];
2427                         bundle->surface_updates[m].surface->force_full_update =
2428                                 true;
2429                 }
2430                 dc_commit_updates_for_stream(
2431                         dm->dc, bundle->surface_updates,
2432                         dc_state->stream_status->plane_count,
2433                         dc_state->streams[k], &bundle->stream_update, dc_state);
2434         }
2435
2436 cleanup:
2437         kfree(bundle);
2438
2439         return;
2440 }
2441
2442 static void dm_set_dpms_off(struct dc_link *link, struct dm_crtc_state *acrtc_state)
2443 {
2444         struct dc_stream_state *stream_state;
2445         struct amdgpu_dm_connector *aconnector = link->priv;
2446         struct amdgpu_device *adev = drm_to_adev(aconnector->base.dev);
2447         struct dc_stream_update stream_update;
2448         bool dpms_off = true;
2449
2450         memset(&stream_update, 0, sizeof(stream_update));
2451         stream_update.dpms_off = &dpms_off;
2452
2453         mutex_lock(&adev->dm.dc_lock);
2454         stream_state = dc_stream_find_from_link(link);
2455
2456         if (stream_state == NULL) {
2457                 DRM_DEBUG_DRIVER("Error finding stream state associated with link!\n");
2458                 mutex_unlock(&adev->dm.dc_lock);
2459                 return;
2460         }
2461
2462         stream_update.stream = stream_state;
2463         acrtc_state->force_dpms_off = true;
2464         dc_commit_updates_for_stream(stream_state->ctx->dc, NULL, 0,
2465                                      stream_state, &stream_update,
2466                                      stream_state->ctx->dc->current_state);
2467         mutex_unlock(&adev->dm.dc_lock);
2468 }
2469
2470 static int dm_resume(void *handle)
2471 {
2472         struct amdgpu_device *adev = handle;
2473         struct drm_device *ddev = adev_to_drm(adev);
2474         struct amdgpu_display_manager *dm = &adev->dm;
2475         struct amdgpu_dm_connector *aconnector;
2476         struct drm_connector *connector;
2477         struct drm_connector_list_iter iter;
2478         struct drm_crtc *crtc;
2479         struct drm_crtc_state *new_crtc_state;
2480         struct dm_crtc_state *dm_new_crtc_state;
2481         struct drm_plane *plane;
2482         struct drm_plane_state *new_plane_state;
2483         struct dm_plane_state *dm_new_plane_state;
2484         struct dm_atomic_state *dm_state = to_dm_atomic_state(dm->atomic_obj.state);
2485         enum dc_connection_type new_connection_type = dc_connection_none;
2486         struct dc_state *dc_state;
2487         int i, r, j;
2488
2489         if (amdgpu_in_reset(adev)) {
2490                 dc_state = dm->cached_dc_state;
2491
2492                 r = dm_dmub_hw_init(adev);
2493                 if (r)
2494                         DRM_ERROR("DMUB interface failed to initialize: status=%d\n", r);
2495
2496                 dc_set_power_state(dm->dc, DC_ACPI_CM_POWER_STATE_D0);
2497                 dc_resume(dm->dc);
2498
2499                 amdgpu_dm_irq_resume_early(adev);
2500
2501                 for (i = 0; i < dc_state->stream_count; i++) {
2502                         dc_state->streams[i]->mode_changed = true;
2503                         for (j = 0; j < dc_state->stream_status->plane_count; j++) {
2504                                 dc_state->stream_status->plane_states[j]->update_flags.raw
2505                                         = 0xffffffff;
2506                         }
2507                 }
2508 #if defined(CONFIG_DRM_AMD_DC_DCN)
2509                 /*
2510                  * Resource allocation happens for link encoders for newer ASIC in
2511                  * dc_validate_global_state, so we need to revalidate it.
2512                  *
2513                  * This shouldn't fail (it passed once before), so warn if it does.
2514                  */
2515                 WARN_ON(dc_validate_global_state(dm->dc, dc_state, false) != DC_OK);
2516 #endif
2517
2518                 WARN_ON(!dc_commit_state(dm->dc, dc_state));
2519
2520                 dm_gpureset_commit_state(dm->cached_dc_state, dm);
2521
2522                 dm_gpureset_toggle_interrupts(adev, dm->cached_dc_state, true);
2523
2524                 dc_release_state(dm->cached_dc_state);
2525                 dm->cached_dc_state = NULL;
2526
2527                 amdgpu_dm_irq_resume_late(adev);
2528
2529                 mutex_unlock(&dm->dc_lock);
2530
2531                 return 0;
2532         }
2533         /* Recreate dc_state - DC invalidates it when setting power state to S3. */
2534         dc_release_state(dm_state->context);
2535         dm_state->context = dc_create_state(dm->dc);
2536         /* TODO: Remove dc_state->dccg, use dc->dccg directly. */
2537         dc_resource_state_construct(dm->dc, dm_state->context);
2538
2539         /* Before powering on DC we need to re-initialize DMUB. */
2540         r = dm_dmub_hw_init(adev);
2541         if (r)
2542                 DRM_ERROR("DMUB interface failed to initialize: status=%d\n", r);
2543
2544         /* power on hardware */
2545         dc_set_power_state(dm->dc, DC_ACPI_CM_POWER_STATE_D0);
2546
2547         /* program HPD filter */
2548         dc_resume(dm->dc);
2549
2550         /*
2551          * early enable HPD Rx IRQ, should be done before set mode as short
2552          * pulse interrupts are used for MST
2553          */
2554         amdgpu_dm_irq_resume_early(adev);
2555
2556         /* On resume we need to rewrite the MSTM control bits to enable MST*/
2557         s3_handle_mst(ddev, false);
2558
2559         /* Do detection*/
2560         drm_connector_list_iter_begin(ddev, &iter);
2561         drm_for_each_connector_iter(connector, &iter) {
2562                 aconnector = to_amdgpu_dm_connector(connector);
2563
2564                 /*
2565                  * this is the case when traversing through already created
2566                  * MST connectors, should be skipped
2567                  */
2568                 if (aconnector->mst_port)
2569                         continue;
2570
2571                 mutex_lock(&aconnector->hpd_lock);
2572                 if (!dc_link_detect_sink(aconnector->dc_link, &new_connection_type))
2573                         DRM_ERROR("KMS: Failed to detect connector\n");
2574
2575                 if (aconnector->base.force && new_connection_type == dc_connection_none)
2576                         emulated_link_detect(aconnector->dc_link);
2577                 else
2578                         dc_link_detect(aconnector->dc_link, DETECT_REASON_HPD);
2579
2580                 if (aconnector->fake_enable && aconnector->dc_link->local_sink)
2581                         aconnector->fake_enable = false;
2582
2583                 if (aconnector->dc_sink)
2584                         dc_sink_release(aconnector->dc_sink);
2585                 aconnector->dc_sink = NULL;
2586                 amdgpu_dm_update_connector_after_detect(aconnector);
2587                 mutex_unlock(&aconnector->hpd_lock);
2588         }
2589         drm_connector_list_iter_end(&iter);
2590
2591         /* Force mode set in atomic commit */
2592         for_each_new_crtc_in_state(dm->cached_state, crtc, new_crtc_state, i)
2593                 new_crtc_state->active_changed = true;
2594
2595         /*
2596          * atomic_check is expected to create the dc states. We need to release
2597          * them here, since they were duplicated as part of the suspend
2598          * procedure.
2599          */
2600         for_each_new_crtc_in_state(dm->cached_state, crtc, new_crtc_state, i) {
2601                 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
2602                 if (dm_new_crtc_state->stream) {
2603                         WARN_ON(kref_read(&dm_new_crtc_state->stream->refcount) > 1);
2604                         dc_stream_release(dm_new_crtc_state->stream);
2605                         dm_new_crtc_state->stream = NULL;
2606                 }
2607         }
2608
2609         for_each_new_plane_in_state(dm->cached_state, plane, new_plane_state, i) {
2610                 dm_new_plane_state = to_dm_plane_state(new_plane_state);
2611                 if (dm_new_plane_state->dc_state) {
2612                         WARN_ON(kref_read(&dm_new_plane_state->dc_state->refcount) > 1);
2613                         dc_plane_state_release(dm_new_plane_state->dc_state);
2614                         dm_new_plane_state->dc_state = NULL;
2615                 }
2616         }
2617
2618         drm_atomic_helper_resume(ddev, dm->cached_state);
2619
2620         dm->cached_state = NULL;
2621
2622         amdgpu_dm_irq_resume_late(adev);
2623
2624         amdgpu_dm_smu_write_watermarks_table(adev);
2625
2626         return 0;
2627 }
2628
2629 /**
2630  * DOC: DM Lifecycle
2631  *
2632  * DM (and consequently DC) is registered in the amdgpu base driver as a IP
2633  * block. When CONFIG_DRM_AMD_DC is enabled, the DM device IP block is added to
2634  * the base driver's device list to be initialized and torn down accordingly.
2635  *
2636  * The functions to do so are provided as hooks in &struct amd_ip_funcs.
2637  */
2638
2639 static const struct amd_ip_funcs amdgpu_dm_funcs = {
2640         .name = "dm",
2641         .early_init = dm_early_init,
2642         .late_init = dm_late_init,
2643         .sw_init = dm_sw_init,
2644         .sw_fini = dm_sw_fini,
2645         .early_fini = amdgpu_dm_early_fini,
2646         .hw_init = dm_hw_init,
2647         .hw_fini = dm_hw_fini,
2648         .suspend = dm_suspend,
2649         .resume = dm_resume,
2650         .is_idle = dm_is_idle,
2651         .wait_for_idle = dm_wait_for_idle,
2652         .check_soft_reset = dm_check_soft_reset,
2653         .soft_reset = dm_soft_reset,
2654         .set_clockgating_state = dm_set_clockgating_state,
2655         .set_powergating_state = dm_set_powergating_state,
2656 };
2657
2658 const struct amdgpu_ip_block_version dm_ip_block =
2659 {
2660         .type = AMD_IP_BLOCK_TYPE_DCE,
2661         .major = 1,
2662         .minor = 0,
2663         .rev = 0,
2664         .funcs = &amdgpu_dm_funcs,
2665 };
2666
2667
2668 /**
2669  * DOC: atomic
2670  *
2671  * *WIP*
2672  */
2673
2674 static const struct drm_mode_config_funcs amdgpu_dm_mode_funcs = {
2675         .fb_create = amdgpu_display_user_framebuffer_create,
2676         .get_format_info = amd_get_format_info,
2677         .output_poll_changed = drm_fb_helper_output_poll_changed,
2678         .atomic_check = amdgpu_dm_atomic_check,
2679         .atomic_commit = drm_atomic_helper_commit,
2680 };
2681
2682 static struct drm_mode_config_helper_funcs amdgpu_dm_mode_config_helperfuncs = {
2683         .atomic_commit_tail = amdgpu_dm_atomic_commit_tail
2684 };
2685
2686 static void update_connector_ext_caps(struct amdgpu_dm_connector *aconnector)
2687 {
2688         u32 max_cll, min_cll, max, min, q, r;
2689         struct amdgpu_dm_backlight_caps *caps;
2690         struct amdgpu_display_manager *dm;
2691         struct drm_connector *conn_base;
2692         struct amdgpu_device *adev;
2693         struct dc_link *link = NULL;
2694         static const u8 pre_computed_values[] = {
2695                 50, 51, 52, 53, 55, 56, 57, 58, 59, 61, 62, 63, 65, 66, 68, 69,
2696                 71, 72, 74, 75, 77, 79, 81, 82, 84, 86, 88, 90, 92, 94, 96, 98};
2697         int i;
2698
2699         if (!aconnector || !aconnector->dc_link)
2700                 return;
2701
2702         link = aconnector->dc_link;
2703         if (link->connector_signal != SIGNAL_TYPE_EDP)
2704                 return;
2705
2706         conn_base = &aconnector->base;
2707         adev = drm_to_adev(conn_base->dev);
2708         dm = &adev->dm;
2709         for (i = 0; i < dm->num_of_edps; i++) {
2710                 if (link == dm->backlight_link[i])
2711                         break;
2712         }
2713         if (i >= dm->num_of_edps)
2714                 return;
2715         caps = &dm->backlight_caps[i];
2716         caps->ext_caps = &aconnector->dc_link->dpcd_sink_ext_caps;
2717         caps->aux_support = false;
2718         max_cll = conn_base->hdr_sink_metadata.hdmi_type1.max_cll;
2719         min_cll = conn_base->hdr_sink_metadata.hdmi_type1.min_cll;
2720
2721         if (caps->ext_caps->bits.oled == 1 /*||
2722             caps->ext_caps->bits.sdr_aux_backlight_control == 1 ||
2723             caps->ext_caps->bits.hdr_aux_backlight_control == 1*/)
2724                 caps->aux_support = true;
2725
2726         if (amdgpu_backlight == 0)
2727                 caps->aux_support = false;
2728         else if (amdgpu_backlight == 1)
2729                 caps->aux_support = true;
2730
2731         /* From the specification (CTA-861-G), for calculating the maximum
2732          * luminance we need to use:
2733          *      Luminance = 50*2**(CV/32)
2734          * Where CV is a one-byte value.
2735          * For calculating this expression we may need float point precision;
2736          * to avoid this complexity level, we take advantage that CV is divided
2737          * by a constant. From the Euclids division algorithm, we know that CV
2738          * can be written as: CV = 32*q + r. Next, we replace CV in the
2739          * Luminance expression and get 50*(2**q)*(2**(r/32)), hence we just
2740          * need to pre-compute the value of r/32. For pre-computing the values
2741          * We just used the following Ruby line:
2742          *      (0...32).each {|cv| puts (50*2**(cv/32.0)).round}
2743          * The results of the above expressions can be verified at
2744          * pre_computed_values.
2745          */
2746         q = max_cll >> 5;
2747         r = max_cll % 32;
2748         max = (1 << q) * pre_computed_values[r];
2749
2750         // min luminance: maxLum * (CV/255)^2 / 100
2751         q = DIV_ROUND_CLOSEST(min_cll, 255);
2752         min = max * DIV_ROUND_CLOSEST((q * q), 100);
2753
2754         caps->aux_max_input_signal = max;
2755         caps->aux_min_input_signal = min;
2756 }
2757
2758 void amdgpu_dm_update_connector_after_detect(
2759                 struct amdgpu_dm_connector *aconnector)
2760 {
2761         struct drm_connector *connector = &aconnector->base;
2762         struct drm_device *dev = connector->dev;
2763         struct dc_sink *sink;
2764
2765         /* MST handled by drm_mst framework */
2766         if (aconnector->mst_mgr.mst_state == true)
2767                 return;
2768
2769         sink = aconnector->dc_link->local_sink;
2770         if (sink)
2771                 dc_sink_retain(sink);
2772
2773         /*
2774          * Edid mgmt connector gets first update only in mode_valid hook and then
2775          * the connector sink is set to either fake or physical sink depends on link status.
2776          * Skip if already done during boot.
2777          */
2778         if (aconnector->base.force != DRM_FORCE_UNSPECIFIED
2779                         && aconnector->dc_em_sink) {
2780
2781                 /*
2782                  * For S3 resume with headless use eml_sink to fake stream
2783                  * because on resume connector->sink is set to NULL
2784                  */
2785                 mutex_lock(&dev->mode_config.mutex);
2786
2787                 if (sink) {
2788                         if (aconnector->dc_sink) {
2789                                 amdgpu_dm_update_freesync_caps(connector, NULL);
2790                                 /*
2791                                  * retain and release below are used to
2792                                  * bump up refcount for sink because the link doesn't point
2793                                  * to it anymore after disconnect, so on next crtc to connector
2794                                  * reshuffle by UMD we will get into unwanted dc_sink release
2795                                  */
2796                                 dc_sink_release(aconnector->dc_sink);
2797                         }
2798                         aconnector->dc_sink = sink;
2799                         dc_sink_retain(aconnector->dc_sink);
2800                         amdgpu_dm_update_freesync_caps(connector,
2801                                         aconnector->edid);
2802                 } else {
2803                         amdgpu_dm_update_freesync_caps(connector, NULL);
2804                         if (!aconnector->dc_sink) {
2805                                 aconnector->dc_sink = aconnector->dc_em_sink;
2806                                 dc_sink_retain(aconnector->dc_sink);
2807                         }
2808                 }
2809
2810                 mutex_unlock(&dev->mode_config.mutex);
2811
2812                 if (sink)
2813                         dc_sink_release(sink);
2814                 return;
2815         }
2816
2817         /*
2818          * TODO: temporary guard to look for proper fix
2819          * if this sink is MST sink, we should not do anything
2820          */
2821         if (sink && sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT_MST) {
2822                 dc_sink_release(sink);
2823                 return;
2824         }
2825
2826         if (aconnector->dc_sink == sink) {
2827                 /*
2828                  * We got a DP short pulse (Link Loss, DP CTS, etc...).
2829                  * Do nothing!!
2830                  */
2831                 DRM_DEBUG_DRIVER("DCHPD: connector_id=%d: dc_sink didn't change.\n",
2832                                 aconnector->connector_id);
2833                 if (sink)
2834                         dc_sink_release(sink);
2835                 return;
2836         }
2837
2838         DRM_DEBUG_DRIVER("DCHPD: connector_id=%d: Old sink=%p New sink=%p\n",
2839                 aconnector->connector_id, aconnector->dc_sink, sink);
2840
2841         mutex_lock(&dev->mode_config.mutex);
2842
2843         /*
2844          * 1. Update status of the drm connector
2845          * 2. Send an event and let userspace tell us what to do
2846          */
2847         if (sink) {
2848                 /*
2849                  * TODO: check if we still need the S3 mode update workaround.
2850                  * If yes, put it here.
2851                  */
2852                 if (aconnector->dc_sink) {
2853                         amdgpu_dm_update_freesync_caps(connector, NULL);
2854                         dc_sink_release(aconnector->dc_sink);
2855                 }
2856
2857                 aconnector->dc_sink = sink;
2858                 dc_sink_retain(aconnector->dc_sink);
2859                 if (sink->dc_edid.length == 0) {
2860                         aconnector->edid = NULL;
2861                         if (aconnector->dc_link->aux_mode) {
2862                                 drm_dp_cec_unset_edid(
2863                                         &aconnector->dm_dp_aux.aux);
2864                         }
2865                 } else {
2866                         aconnector->edid =
2867                                 (struct edid *)sink->dc_edid.raw_edid;
2868
2869                         drm_connector_update_edid_property(connector,
2870                                                            aconnector->edid);
2871                         if (aconnector->dc_link->aux_mode)
2872                                 drm_dp_cec_set_edid(&aconnector->dm_dp_aux.aux,
2873                                                     aconnector->edid);
2874                 }
2875
2876                 amdgpu_dm_update_freesync_caps(connector, aconnector->edid);
2877                 update_connector_ext_caps(aconnector);
2878         } else {
2879                 drm_dp_cec_unset_edid(&aconnector->dm_dp_aux.aux);
2880                 amdgpu_dm_update_freesync_caps(connector, NULL);
2881                 drm_connector_update_edid_property(connector, NULL);
2882                 aconnector->num_modes = 0;
2883                 dc_sink_release(aconnector->dc_sink);
2884                 aconnector->dc_sink = NULL;
2885                 aconnector->edid = NULL;
2886 #ifdef CONFIG_DRM_AMD_DC_HDCP
2887                 /* Set CP to DESIRED if it was ENABLED, so we can re-enable it again on hotplug */
2888                 if (connector->state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED)
2889                         connector->state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED;
2890 #endif
2891         }
2892
2893         mutex_unlock(&dev->mode_config.mutex);
2894
2895         update_subconnector_property(aconnector);
2896
2897         if (sink)
2898                 dc_sink_release(sink);
2899 }
2900
2901 static void handle_hpd_irq_helper(struct amdgpu_dm_connector *aconnector)
2902 {
2903         struct drm_connector *connector = &aconnector->base;
2904         struct drm_device *dev = connector->dev;
2905         enum dc_connection_type new_connection_type = dc_connection_none;
2906         struct amdgpu_device *adev = drm_to_adev(dev);
2907         struct dm_connector_state *dm_con_state = to_dm_connector_state(connector->state);
2908         struct dm_crtc_state *dm_crtc_state = NULL;
2909
2910         if (adev->dm.disable_hpd_irq)
2911                 return;
2912
2913         if (dm_con_state->base.state && dm_con_state->base.crtc)
2914                 dm_crtc_state = to_dm_crtc_state(drm_atomic_get_crtc_state(
2915                                         dm_con_state->base.state,
2916                                         dm_con_state->base.crtc));
2917         /*
2918          * In case of failure or MST no need to update connector status or notify the OS
2919          * since (for MST case) MST does this in its own context.
2920          */
2921         mutex_lock(&aconnector->hpd_lock);
2922
2923 #ifdef CONFIG_DRM_AMD_DC_HDCP
2924         if (adev->dm.hdcp_workqueue) {
2925                 hdcp_reset_display(adev->dm.hdcp_workqueue, aconnector->dc_link->link_index);
2926                 dm_con_state->update_hdcp = true;
2927         }
2928 #endif
2929         if (aconnector->fake_enable)
2930                 aconnector->fake_enable = false;
2931
2932         if (!dc_link_detect_sink(aconnector->dc_link, &new_connection_type))
2933                 DRM_ERROR("KMS: Failed to detect connector\n");
2934
2935         if (aconnector->base.force && new_connection_type == dc_connection_none) {
2936                 emulated_link_detect(aconnector->dc_link);
2937
2938                 drm_modeset_lock_all(dev);
2939                 dm_restore_drm_connector_state(dev, connector);
2940                 drm_modeset_unlock_all(dev);
2941
2942                 if (aconnector->base.force == DRM_FORCE_UNSPECIFIED)
2943                         drm_kms_helper_hotplug_event(dev);
2944
2945         } else if (dc_link_detect(aconnector->dc_link, DETECT_REASON_HPD)) {
2946                 if (new_connection_type == dc_connection_none &&
2947                     aconnector->dc_link->type == dc_connection_none &&
2948                     dm_crtc_state)
2949                         dm_set_dpms_off(aconnector->dc_link, dm_crtc_state);
2950
2951                 amdgpu_dm_update_connector_after_detect(aconnector);
2952
2953                 drm_modeset_lock_all(dev);
2954                 dm_restore_drm_connector_state(dev, connector);
2955                 drm_modeset_unlock_all(dev);
2956
2957                 if (aconnector->base.force == DRM_FORCE_UNSPECIFIED)
2958                         drm_kms_helper_hotplug_event(dev);
2959         }
2960         mutex_unlock(&aconnector->hpd_lock);
2961
2962 }
2963
2964 static void handle_hpd_irq(void *param)
2965 {
2966         struct amdgpu_dm_connector *aconnector = (struct amdgpu_dm_connector *)param;
2967
2968         handle_hpd_irq_helper(aconnector);
2969
2970 }
2971
2972 static void dm_handle_mst_sideband_msg(struct amdgpu_dm_connector *aconnector)
2973 {
2974         uint8_t esi[DP_PSR_ERROR_STATUS - DP_SINK_COUNT_ESI] = { 0 };
2975         uint8_t dret;
2976         bool new_irq_handled = false;
2977         int dpcd_addr;
2978         int dpcd_bytes_to_read;
2979
2980         const int max_process_count = 30;
2981         int process_count = 0;
2982
2983         const struct dc_link_status *link_status = dc_link_get_status(aconnector->dc_link);
2984
2985         if (link_status->dpcd_caps->dpcd_rev.raw < 0x12) {
2986                 dpcd_bytes_to_read = DP_LANE0_1_STATUS - DP_SINK_COUNT;
2987                 /* DPCD 0x200 - 0x201 for downstream IRQ */
2988                 dpcd_addr = DP_SINK_COUNT;
2989         } else {
2990                 dpcd_bytes_to_read = DP_PSR_ERROR_STATUS - DP_SINK_COUNT_ESI;
2991                 /* DPCD 0x2002 - 0x2005 for downstream IRQ */
2992                 dpcd_addr = DP_SINK_COUNT_ESI;
2993         }
2994
2995         dret = drm_dp_dpcd_read(
2996                 &aconnector->dm_dp_aux.aux,
2997                 dpcd_addr,
2998                 esi,
2999                 dpcd_bytes_to_read);
3000
3001         while (dret == dpcd_bytes_to_read &&
3002                 process_count < max_process_count) {
3003                 uint8_t retry;
3004                 dret = 0;
3005
3006                 process_count++;
3007
3008                 DRM_DEBUG_DRIVER("ESI %02x %02x %02x\n", esi[0], esi[1], esi[2]);
3009                 /* handle HPD short pulse irq */
3010                 if (aconnector->mst_mgr.mst_state)
3011                         drm_dp_mst_hpd_irq(
3012                                 &aconnector->mst_mgr,
3013                                 esi,
3014                                 &new_irq_handled);
3015
3016                 if (new_irq_handled) {
3017                         /* ACK at DPCD to notify down stream */
3018                         const int ack_dpcd_bytes_to_write =
3019                                 dpcd_bytes_to_read - 1;
3020
3021                         for (retry = 0; retry < 3; retry++) {
3022                                 uint8_t wret;
3023
3024                                 wret = drm_dp_dpcd_write(
3025                                         &aconnector->dm_dp_aux.aux,
3026                                         dpcd_addr + 1,
3027                                         &esi[1],
3028                                         ack_dpcd_bytes_to_write);
3029                                 if (wret == ack_dpcd_bytes_to_write)
3030                                         break;
3031                         }
3032
3033                         /* check if there is new irq to be handled */
3034                         dret = drm_dp_dpcd_read(
3035                                 &aconnector->dm_dp_aux.aux,
3036                                 dpcd_addr,
3037                                 esi,
3038                                 dpcd_bytes_to_read);
3039
3040                         new_irq_handled = false;
3041                 } else {
3042                         break;
3043                 }
3044         }
3045
3046         if (process_count == max_process_count)
3047                 DRM_DEBUG_DRIVER("Loop exceeded max iterations\n");
3048 }
3049
3050 static void schedule_hpd_rx_offload_work(struct hpd_rx_irq_offload_work_queue *offload_wq,
3051                                                         union hpd_irq_data hpd_irq_data)
3052 {
3053         struct hpd_rx_irq_offload_work *offload_work =
3054                                 kzalloc(sizeof(*offload_work), GFP_KERNEL);
3055
3056         if (!offload_work) {
3057                 DRM_ERROR("Failed to allocate hpd_rx_irq_offload_work.\n");
3058                 return;
3059         }
3060
3061         INIT_WORK(&offload_work->work, dm_handle_hpd_rx_offload_work);
3062         offload_work->data = hpd_irq_data;
3063         offload_work->offload_wq = offload_wq;
3064
3065         queue_work(offload_wq->wq, &offload_work->work);
3066         DRM_DEBUG_KMS("queue work to handle hpd_rx offload work");
3067 }
3068
3069 static void handle_hpd_rx_irq(void *param)
3070 {
3071         struct amdgpu_dm_connector *aconnector = (struct amdgpu_dm_connector *)param;
3072         struct drm_connector *connector = &aconnector->base;
3073         struct drm_device *dev = connector->dev;
3074         struct dc_link *dc_link = aconnector->dc_link;
3075         bool is_mst_root_connector = aconnector->mst_mgr.mst_state;
3076         bool result = false;
3077         enum dc_connection_type new_connection_type = dc_connection_none;
3078         struct amdgpu_device *adev = drm_to_adev(dev);
3079         union hpd_irq_data hpd_irq_data;
3080         bool link_loss = false;
3081         bool has_left_work = false;
3082         int idx = aconnector->base.index;
3083         struct hpd_rx_irq_offload_work_queue *offload_wq = &adev->dm.hpd_rx_offload_wq[idx];
3084
3085         memset(&hpd_irq_data, 0, sizeof(hpd_irq_data));
3086
3087         if (adev->dm.disable_hpd_irq)
3088                 return;
3089
3090         /*
3091          * TODO:Temporary add mutex to protect hpd interrupt not have a gpio
3092          * conflict, after implement i2c helper, this mutex should be
3093          * retired.
3094          */
3095         mutex_lock(&aconnector->hpd_lock);
3096
3097         result = dc_link_handle_hpd_rx_irq(dc_link, &hpd_irq_data,
3098                                                 &link_loss, true, &has_left_work);
3099
3100         if (!has_left_work)
3101                 goto out;
3102
3103         if (hpd_irq_data.bytes.device_service_irq.bits.AUTOMATED_TEST) {
3104                 schedule_hpd_rx_offload_work(offload_wq, hpd_irq_data);
3105                 goto out;
3106         }
3107
3108         if (dc_link_dp_allow_hpd_rx_irq(dc_link)) {
3109                 if (hpd_irq_data.bytes.device_service_irq.bits.UP_REQ_MSG_RDY ||
3110                         hpd_irq_data.bytes.device_service_irq.bits.DOWN_REP_MSG_RDY) {
3111                         dm_handle_mst_sideband_msg(aconnector);
3112                         goto out;
3113                 }
3114
3115                 if (link_loss) {
3116                         bool skip = false;
3117
3118                         spin_lock(&offload_wq->offload_lock);
3119                         skip = offload_wq->is_handling_link_loss;
3120
3121                         if (!skip)
3122                                 offload_wq->is_handling_link_loss = true;
3123
3124                         spin_unlock(&offload_wq->offload_lock);
3125
3126                         if (!skip)
3127                                 schedule_hpd_rx_offload_work(offload_wq, hpd_irq_data);
3128
3129                         goto out;
3130                 }
3131         }
3132
3133 out:
3134         if (result && !is_mst_root_connector) {
3135                 /* Downstream Port status changed. */
3136                 if (!dc_link_detect_sink(dc_link, &new_connection_type))
3137                         DRM_ERROR("KMS: Failed to detect connector\n");
3138
3139                 if (aconnector->base.force && new_connection_type == dc_connection_none) {
3140                         emulated_link_detect(dc_link);
3141
3142                         if (aconnector->fake_enable)
3143                                 aconnector->fake_enable = false;
3144
3145                         amdgpu_dm_update_connector_after_detect(aconnector);
3146
3147
3148                         drm_modeset_lock_all(dev);
3149                         dm_restore_drm_connector_state(dev, connector);
3150                         drm_modeset_unlock_all(dev);
3151
3152                         drm_kms_helper_hotplug_event(dev);
3153                 } else if (dc_link_detect(dc_link, DETECT_REASON_HPDRX)) {
3154
3155                         if (aconnector->fake_enable)
3156                                 aconnector->fake_enable = false;
3157
3158                         amdgpu_dm_update_connector_after_detect(aconnector);
3159
3160
3161                         drm_modeset_lock_all(dev);
3162                         dm_restore_drm_connector_state(dev, connector);
3163                         drm_modeset_unlock_all(dev);
3164
3165                         drm_kms_helper_hotplug_event(dev);
3166                 }
3167         }
3168 #ifdef CONFIG_DRM_AMD_DC_HDCP
3169         if (hpd_irq_data.bytes.device_service_irq.bits.CP_IRQ) {
3170                 if (adev->dm.hdcp_workqueue)
3171                         hdcp_handle_cpirq(adev->dm.hdcp_workqueue,  aconnector->base.index);
3172         }
3173 #endif
3174
3175         if (dc_link->type != dc_connection_mst_branch)
3176                 drm_dp_cec_irq(&aconnector->dm_dp_aux.aux);
3177
3178         mutex_unlock(&aconnector->hpd_lock);
3179 }
3180
3181 static void register_hpd_handlers(struct amdgpu_device *adev)
3182 {
3183         struct drm_device *dev = adev_to_drm(adev);
3184         struct drm_connector *connector;
3185         struct amdgpu_dm_connector *aconnector;
3186         const struct dc_link *dc_link;
3187         struct dc_interrupt_params int_params = {0};
3188
3189         int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
3190         int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
3191
3192         list_for_each_entry(connector,
3193                         &dev->mode_config.connector_list, head) {
3194
3195                 aconnector = to_amdgpu_dm_connector(connector);
3196                 dc_link = aconnector->dc_link;
3197
3198                 if (DC_IRQ_SOURCE_INVALID != dc_link->irq_source_hpd) {
3199                         int_params.int_context = INTERRUPT_LOW_IRQ_CONTEXT;
3200                         int_params.irq_source = dc_link->irq_source_hpd;
3201
3202                         amdgpu_dm_irq_register_interrupt(adev, &int_params,
3203                                         handle_hpd_irq,
3204                                         (void *) aconnector);
3205                 }
3206
3207                 if (DC_IRQ_SOURCE_INVALID != dc_link->irq_source_hpd_rx) {
3208
3209                         /* Also register for DP short pulse (hpd_rx). */
3210                         int_params.int_context = INTERRUPT_LOW_IRQ_CONTEXT;
3211                         int_params.irq_source = dc_link->irq_source_hpd_rx;
3212
3213                         amdgpu_dm_irq_register_interrupt(adev, &int_params,
3214                                         handle_hpd_rx_irq,
3215                                         (void *) aconnector);
3216
3217                         if (adev->dm.hpd_rx_offload_wq)
3218                                 adev->dm.hpd_rx_offload_wq[connector->index].aconnector =
3219                                         aconnector;
3220                 }
3221         }
3222 }
3223
3224 #if defined(CONFIG_DRM_AMD_DC_SI)
3225 /* Register IRQ sources and initialize IRQ callbacks */
3226 static int dce60_register_irq_handlers(struct amdgpu_device *adev)
3227 {
3228         struct dc *dc = adev->dm.dc;
3229         struct common_irq_params *c_irq_params;
3230         struct dc_interrupt_params int_params = {0};
3231         int r;
3232         int i;
3233         unsigned client_id = AMDGPU_IRQ_CLIENTID_LEGACY;
3234
3235         int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
3236         int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
3237
3238         /*
3239          * Actions of amdgpu_irq_add_id():
3240          * 1. Register a set() function with base driver.
3241          *    Base driver will call set() function to enable/disable an
3242          *    interrupt in DC hardware.
3243          * 2. Register amdgpu_dm_irq_handler().
3244          *    Base driver will call amdgpu_dm_irq_handler() for ALL interrupts
3245          *    coming from DC hardware.
3246          *    amdgpu_dm_irq_handler() will re-direct the interrupt to DC
3247          *    for acknowledging and handling. */
3248
3249         /* Use VBLANK interrupt */
3250         for (i = 0; i < adev->mode_info.num_crtc; i++) {
3251                 r = amdgpu_irq_add_id(adev, client_id, i+1 , &adev->crtc_irq);
3252                 if (r) {
3253                         DRM_ERROR("Failed to add crtc irq id!\n");
3254                         return r;
3255                 }
3256
3257                 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
3258                 int_params.irq_source =
3259                         dc_interrupt_to_irq_source(dc, i+1 , 0);
3260
3261                 c_irq_params = &adev->dm.vblank_params[int_params.irq_source - DC_IRQ_SOURCE_VBLANK1];
3262
3263                 c_irq_params->adev = adev;
3264                 c_irq_params->irq_src = int_params.irq_source;
3265
3266                 amdgpu_dm_irq_register_interrupt(adev, &int_params,
3267                                 dm_crtc_high_irq, c_irq_params);
3268         }
3269
3270         /* Use GRPH_PFLIP interrupt */
3271         for (i = VISLANDS30_IV_SRCID_D1_GRPH_PFLIP;
3272                         i <= VISLANDS30_IV_SRCID_D6_GRPH_PFLIP; i += 2) {
3273                 r = amdgpu_irq_add_id(adev, client_id, i, &adev->pageflip_irq);
3274                 if (r) {
3275                         DRM_ERROR("Failed to add page flip irq id!\n");
3276                         return r;
3277                 }
3278
3279                 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
3280                 int_params.irq_source =
3281                         dc_interrupt_to_irq_source(dc, i, 0);
3282
3283                 c_irq_params = &adev->dm.pflip_params[int_params.irq_source - DC_IRQ_SOURCE_PFLIP_FIRST];
3284
3285                 c_irq_params->adev = adev;
3286                 c_irq_params->irq_src = int_params.irq_source;
3287
3288                 amdgpu_dm_irq_register_interrupt(adev, &int_params,
3289                                 dm_pflip_high_irq, c_irq_params);
3290
3291         }
3292
3293         /* HPD */
3294         r = amdgpu_irq_add_id(adev, client_id,
3295                         VISLANDS30_IV_SRCID_HOTPLUG_DETECT_A, &adev->hpd_irq);
3296         if (r) {
3297                 DRM_ERROR("Failed to add hpd irq id!\n");
3298                 return r;
3299         }
3300
3301         register_hpd_handlers(adev);
3302
3303         return 0;
3304 }
3305 #endif
3306
3307 /* Register IRQ sources and initialize IRQ callbacks */
3308 static int dce110_register_irq_handlers(struct amdgpu_device *adev)
3309 {
3310         struct dc *dc = adev->dm.dc;
3311         struct common_irq_params *c_irq_params;
3312         struct dc_interrupt_params int_params = {0};
3313         int r;
3314         int i;
3315         unsigned client_id = AMDGPU_IRQ_CLIENTID_LEGACY;
3316
3317         if (adev->family >= AMDGPU_FAMILY_AI)
3318                 client_id = SOC15_IH_CLIENTID_DCE;
3319
3320         int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
3321         int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
3322
3323         /*
3324          * Actions of amdgpu_irq_add_id():
3325          * 1. Register a set() function with base driver.
3326          *    Base driver will call set() function to enable/disable an
3327          *    interrupt in DC hardware.
3328          * 2. Register amdgpu_dm_irq_handler().
3329          *    Base driver will call amdgpu_dm_irq_handler() for ALL interrupts
3330          *    coming from DC hardware.
3331          *    amdgpu_dm_irq_handler() will re-direct the interrupt to DC
3332          *    for acknowledging and handling. */
3333
3334         /* Use VBLANK interrupt */
3335         for (i = VISLANDS30_IV_SRCID_D1_VERTICAL_INTERRUPT0; i <= VISLANDS30_IV_SRCID_D6_VERTICAL_INTERRUPT0; i++) {
3336                 r = amdgpu_irq_add_id(adev, client_id, i, &adev->crtc_irq);
3337                 if (r) {
3338                         DRM_ERROR("Failed to add crtc irq id!\n");
3339                         return r;
3340                 }
3341
3342                 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
3343                 int_params.irq_source =
3344                         dc_interrupt_to_irq_source(dc, i, 0);
3345
3346                 c_irq_params = &adev->dm.vblank_params[int_params.irq_source - DC_IRQ_SOURCE_VBLANK1];
3347
3348                 c_irq_params->adev = adev;
3349                 c_irq_params->irq_src = int_params.irq_source;
3350
3351                 amdgpu_dm_irq_register_interrupt(adev, &int_params,
3352                                 dm_crtc_high_irq, c_irq_params);
3353         }
3354
3355         /* Use VUPDATE interrupt */
3356         for (i = VISLANDS30_IV_SRCID_D1_V_UPDATE_INT; i <= VISLANDS30_IV_SRCID_D6_V_UPDATE_INT; i += 2) {
3357                 r = amdgpu_irq_add_id(adev, client_id, i, &adev->vupdate_irq);
3358                 if (r) {
3359                         DRM_ERROR("Failed to add vupdate irq id!\n");
3360                         return r;
3361                 }
3362
3363                 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
3364                 int_params.irq_source =
3365                         dc_interrupt_to_irq_source(dc, i, 0);
3366
3367                 c_irq_params = &adev->dm.vupdate_params[int_params.irq_source - DC_IRQ_SOURCE_VUPDATE1];
3368
3369                 c_irq_params->adev = adev;
3370                 c_irq_params->irq_src = int_params.irq_source;
3371
3372                 amdgpu_dm_irq_register_interrupt(adev, &int_params,
3373                                 dm_vupdate_high_irq, c_irq_params);
3374         }
3375
3376         /* Use GRPH_PFLIP interrupt */
3377         for (i = VISLANDS30_IV_SRCID_D1_GRPH_PFLIP;
3378                         i <= VISLANDS30_IV_SRCID_D6_GRPH_PFLIP; i += 2) {
3379                 r = amdgpu_irq_add_id(adev, client_id, i, &adev->pageflip_irq);
3380                 if (r) {
3381                         DRM_ERROR("Failed to add page flip irq id!\n");
3382                         return r;
3383                 }
3384
3385                 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
3386                 int_params.irq_source =
3387                         dc_interrupt_to_irq_source(dc, i, 0);
3388
3389                 c_irq_params = &adev->dm.pflip_params[int_params.irq_source - DC_IRQ_SOURCE_PFLIP_FIRST];
3390
3391                 c_irq_params->adev = adev;
3392                 c_irq_params->irq_src = int_params.irq_source;
3393
3394                 amdgpu_dm_irq_register_interrupt(adev, &int_params,
3395                                 dm_pflip_high_irq, c_irq_params);
3396
3397         }
3398
3399         /* HPD */
3400         r = amdgpu_irq_add_id(adev, client_id,
3401                         VISLANDS30_IV_SRCID_HOTPLUG_DETECT_A, &adev->hpd_irq);
3402         if (r) {
3403                 DRM_ERROR("Failed to add hpd irq id!\n");
3404                 return r;
3405         }
3406
3407         register_hpd_handlers(adev);
3408
3409         return 0;
3410 }
3411
3412 #if defined(CONFIG_DRM_AMD_DC_DCN)
3413 /* Register IRQ sources and initialize IRQ callbacks */
3414 static int dcn10_register_irq_handlers(struct amdgpu_device *adev)
3415 {
3416         struct dc *dc = adev->dm.dc;
3417         struct common_irq_params *c_irq_params;
3418         struct dc_interrupt_params int_params = {0};
3419         int r;
3420         int i;
3421 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
3422         static const unsigned int vrtl_int_srcid[] = {
3423                 DCN_1_0__SRCID__OTG1_VERTICAL_INTERRUPT0_CONTROL,
3424                 DCN_1_0__SRCID__OTG2_VERTICAL_INTERRUPT0_CONTROL,
3425                 DCN_1_0__SRCID__OTG3_VERTICAL_INTERRUPT0_CONTROL,
3426                 DCN_1_0__SRCID__OTG4_VERTICAL_INTERRUPT0_CONTROL,
3427                 DCN_1_0__SRCID__OTG5_VERTICAL_INTERRUPT0_CONTROL,
3428                 DCN_1_0__SRCID__OTG6_VERTICAL_INTERRUPT0_CONTROL
3429         };
3430 #endif
3431
3432         int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
3433         int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
3434
3435         /*
3436          * Actions of amdgpu_irq_add_id():
3437          * 1. Register a set() function with base driver.
3438          *    Base driver will call set() function to enable/disable an
3439          *    interrupt in DC hardware.
3440          * 2. Register amdgpu_dm_irq_handler().
3441          *    Base driver will call amdgpu_dm_irq_handler() for ALL interrupts
3442          *    coming from DC hardware.
3443          *    amdgpu_dm_irq_handler() will re-direct the interrupt to DC
3444          *    for acknowledging and handling.
3445          */
3446
3447         /* Use VSTARTUP interrupt */
3448         for (i = DCN_1_0__SRCID__DC_D1_OTG_VSTARTUP;
3449                         i <= DCN_1_0__SRCID__DC_D1_OTG_VSTARTUP + adev->mode_info.num_crtc - 1;
3450                         i++) {
3451                 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, i, &adev->crtc_irq);
3452
3453                 if (r) {
3454                         DRM_ERROR("Failed to add crtc irq id!\n");
3455                         return r;
3456                 }
3457
3458                 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
3459                 int_params.irq_source =
3460                         dc_interrupt_to_irq_source(dc, i, 0);
3461
3462                 c_irq_params = &adev->dm.vblank_params[int_params.irq_source - DC_IRQ_SOURCE_VBLANK1];
3463
3464                 c_irq_params->adev = adev;
3465                 c_irq_params->irq_src = int_params.irq_source;
3466
3467                 amdgpu_dm_irq_register_interrupt(
3468                         adev, &int_params, dm_crtc_high_irq, c_irq_params);
3469         }
3470
3471         /* Use otg vertical line interrupt */
3472 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
3473         for (i = 0; i <= adev->mode_info.num_crtc - 1; i++) {
3474                 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE,
3475                                 vrtl_int_srcid[i], &adev->vline0_irq);
3476
3477                 if (r) {
3478                         DRM_ERROR("Failed to add vline0 irq id!\n");
3479                         return r;
3480                 }
3481
3482                 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
3483                 int_params.irq_source =
3484                         dc_interrupt_to_irq_source(dc, vrtl_int_srcid[i], 0);
3485
3486                 if (int_params.irq_source == DC_IRQ_SOURCE_INVALID) {
3487                         DRM_ERROR("Failed to register vline0 irq %d!\n", vrtl_int_srcid[i]);
3488                         break;
3489                 }
3490
3491                 c_irq_params = &adev->dm.vline0_params[int_params.irq_source
3492                                         - DC_IRQ_SOURCE_DC1_VLINE0];
3493
3494                 c_irq_params->adev = adev;
3495                 c_irq_params->irq_src = int_params.irq_source;
3496
3497                 amdgpu_dm_irq_register_interrupt(adev, &int_params,
3498                                 dm_dcn_vertical_interrupt0_high_irq, c_irq_params);
3499         }
3500 #endif
3501
3502         /* Use VUPDATE_NO_LOCK interrupt on DCN, which seems to correspond to
3503          * the regular VUPDATE interrupt on DCE. We want DC_IRQ_SOURCE_VUPDATEx
3504          * to trigger at end of each vblank, regardless of state of the lock,
3505          * matching DCE behaviour.
3506          */
3507         for (i = DCN_1_0__SRCID__OTG0_IHC_V_UPDATE_NO_LOCK_INTERRUPT;
3508              i <= DCN_1_0__SRCID__OTG0_IHC_V_UPDATE_NO_LOCK_INTERRUPT + adev->mode_info.num_crtc - 1;
3509              i++) {
3510                 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, i, &adev->vupdate_irq);
3511
3512                 if (r) {
3513                         DRM_ERROR("Failed to add vupdate irq id!\n");
3514                         return r;
3515                 }
3516
3517                 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
3518                 int_params.irq_source =
3519                         dc_interrupt_to_irq_source(dc, i, 0);
3520
3521                 c_irq_params = &adev->dm.vupdate_params[int_params.irq_source - DC_IRQ_SOURCE_VUPDATE1];
3522
3523                 c_irq_params->adev = adev;
3524                 c_irq_params->irq_src = int_params.irq_source;
3525
3526                 amdgpu_dm_irq_register_interrupt(adev, &int_params,
3527                                 dm_vupdate_high_irq, c_irq_params);
3528         }
3529
3530         /* Use GRPH_PFLIP interrupt */
3531         for (i = DCN_1_0__SRCID__HUBP0_FLIP_INTERRUPT;
3532                         i <= DCN_1_0__SRCID__HUBP0_FLIP_INTERRUPT + adev->mode_info.num_crtc - 1;
3533                         i++) {
3534                 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, i, &adev->pageflip_irq);
3535                 if (r) {
3536                         DRM_ERROR("Failed to add page flip irq id!\n");
3537                         return r;
3538                 }
3539
3540                 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
3541                 int_params.irq_source =
3542                         dc_interrupt_to_irq_source(dc, i, 0);
3543
3544                 c_irq_params = &adev->dm.pflip_params[int_params.irq_source - DC_IRQ_SOURCE_PFLIP_FIRST];
3545
3546                 c_irq_params->adev = adev;
3547                 c_irq_params->irq_src = int_params.irq_source;
3548
3549                 amdgpu_dm_irq_register_interrupt(adev, &int_params,
3550                                 dm_pflip_high_irq, c_irq_params);
3551
3552         }
3553
3554         /* HPD */
3555         r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, DCN_1_0__SRCID__DC_HPD1_INT,
3556                         &adev->hpd_irq);
3557         if (r) {
3558                 DRM_ERROR("Failed to add hpd irq id!\n");
3559                 return r;
3560         }
3561
3562         register_hpd_handlers(adev);
3563
3564         return 0;
3565 }
3566 /* Register Outbox IRQ sources and initialize IRQ callbacks */
3567 static int register_outbox_irq_handlers(struct amdgpu_device *adev)
3568 {
3569         struct dc *dc = adev->dm.dc;
3570         struct common_irq_params *c_irq_params;
3571         struct dc_interrupt_params int_params = {0};
3572         int r, i;
3573
3574         int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
3575         int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
3576
3577         r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, DCN_1_0__SRCID__DMCUB_OUTBOX_LOW_PRIORITY_READY_INT,
3578                         &adev->dmub_outbox_irq);
3579         if (r) {
3580                 DRM_ERROR("Failed to add outbox irq id!\n");
3581                 return r;
3582         }
3583
3584         if (dc->ctx->dmub_srv) {
3585                 i = DCN_1_0__SRCID__DMCUB_OUTBOX_LOW_PRIORITY_READY_INT;
3586                 int_params.int_context = INTERRUPT_LOW_IRQ_CONTEXT;
3587                 int_params.irq_source =
3588                 dc_interrupt_to_irq_source(dc, i, 0);
3589
3590                 c_irq_params = &adev->dm.dmub_outbox_params[0];
3591
3592                 c_irq_params->adev = adev;
3593                 c_irq_params->irq_src = int_params.irq_source;
3594
3595                 amdgpu_dm_irq_register_interrupt(adev, &int_params,
3596                                 dm_dmub_outbox1_low_irq, c_irq_params);
3597         }
3598
3599         return 0;
3600 }
3601 #endif
3602
3603 /*
3604  * Acquires the lock for the atomic state object and returns
3605  * the new atomic state.
3606  *
3607  * This should only be called during atomic check.
3608  */
3609 static int dm_atomic_get_state(struct drm_atomic_state *state,
3610                                struct dm_atomic_state **dm_state)
3611 {
3612         struct drm_device *dev = state->dev;
3613         struct amdgpu_device *adev = drm_to_adev(dev);
3614         struct amdgpu_display_manager *dm = &adev->dm;
3615         struct drm_private_state *priv_state;
3616
3617         if (*dm_state)
3618                 return 0;
3619
3620         priv_state = drm_atomic_get_private_obj_state(state, &dm->atomic_obj);
3621         if (IS_ERR(priv_state))
3622                 return PTR_ERR(priv_state);
3623
3624         *dm_state = to_dm_atomic_state(priv_state);
3625
3626         return 0;
3627 }
3628
3629 static struct dm_atomic_state *
3630 dm_atomic_get_new_state(struct drm_atomic_state *state)
3631 {
3632         struct drm_device *dev = state->dev;
3633         struct amdgpu_device *adev = drm_to_adev(dev);
3634         struct amdgpu_display_manager *dm = &adev->dm;
3635         struct drm_private_obj *obj;
3636         struct drm_private_state *new_obj_state;
3637         int i;
3638
3639         for_each_new_private_obj_in_state(state, obj, new_obj_state, i) {
3640                 if (obj->funcs == dm->atomic_obj.funcs)
3641                         return to_dm_atomic_state(new_obj_state);
3642         }
3643
3644         return NULL;
3645 }
3646
3647 static struct drm_private_state *
3648 dm_atomic_duplicate_state(struct drm_private_obj *obj)
3649 {
3650         struct dm_atomic_state *old_state, *new_state;
3651
3652         new_state = kzalloc(sizeof(*new_state), GFP_KERNEL);
3653         if (!new_state)
3654                 return NULL;
3655
3656         __drm_atomic_helper_private_obj_duplicate_state(obj, &new_state->base);
3657
3658         old_state = to_dm_atomic_state(obj->state);
3659
3660         if (old_state && old_state->context)
3661                 new_state->context = dc_copy_state(old_state->context);
3662
3663         if (!new_state->context) {
3664                 kfree(new_state);
3665                 return NULL;
3666         }
3667
3668         return &new_state->base;
3669 }
3670
3671 static void dm_atomic_destroy_state(struct drm_private_obj *obj,
3672                                     struct drm_private_state *state)
3673 {
3674         struct dm_atomic_state *dm_state = to_dm_atomic_state(state);
3675
3676         if (dm_state && dm_state->context)
3677                 dc_release_state(dm_state->context);
3678
3679         kfree(dm_state);
3680 }
3681
3682 static struct drm_private_state_funcs dm_atomic_state_funcs = {
3683         .atomic_duplicate_state = dm_atomic_duplicate_state,
3684         .atomic_destroy_state = dm_atomic_destroy_state,
3685 };
3686
3687 static int amdgpu_dm_mode_config_init(struct amdgpu_device *adev)
3688 {
3689         struct dm_atomic_state *state;
3690         int r;
3691
3692         adev->mode_info.mode_config_initialized = true;
3693
3694         adev_to_drm(adev)->mode_config.funcs = (void *)&amdgpu_dm_mode_funcs;
3695         adev_to_drm(adev)->mode_config.helper_private = &amdgpu_dm_mode_config_helperfuncs;
3696
3697         adev_to_drm(adev)->mode_config.max_width = 16384;
3698         adev_to_drm(adev)->mode_config.max_height = 16384;
3699
3700         adev_to_drm(adev)->mode_config.preferred_depth = 24;
3701         adev_to_drm(adev)->mode_config.prefer_shadow = 1;
3702         /* indicates support for immediate flip */
3703         adev_to_drm(adev)->mode_config.async_page_flip = true;
3704
3705         adev_to_drm(adev)->mode_config.fb_base = adev->gmc.aper_base;
3706
3707         state = kzalloc(sizeof(*state), GFP_KERNEL);
3708         if (!state)
3709                 return -ENOMEM;
3710
3711         state->context = dc_create_state(adev->dm.dc);
3712         if (!state->context) {
3713                 kfree(state);
3714                 return -ENOMEM;
3715         }
3716
3717         dc_resource_state_copy_construct_current(adev->dm.dc, state->context);
3718
3719         drm_atomic_private_obj_init(adev_to_drm(adev),
3720                                     &adev->dm.atomic_obj,
3721                                     &state->base,
3722                                     &dm_atomic_state_funcs);
3723
3724         r = amdgpu_display_modeset_create_props(adev);
3725         if (r) {
3726                 dc_release_state(state->context);
3727                 kfree(state);
3728                 return r;
3729         }
3730
3731         r = amdgpu_dm_audio_init(adev);
3732         if (r) {
3733                 dc_release_state(state->context);
3734                 kfree(state);
3735                 return r;
3736         }
3737
3738         return 0;
3739 }
3740
3741 #define AMDGPU_DM_DEFAULT_MIN_BACKLIGHT 12
3742 #define AMDGPU_DM_DEFAULT_MAX_BACKLIGHT 255
3743 #define AUX_BL_DEFAULT_TRANSITION_TIME_MS 50
3744
3745 #if defined(CONFIG_BACKLIGHT_CLASS_DEVICE) ||\
3746         defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE)
3747
3748 static void amdgpu_dm_update_backlight_caps(struct amdgpu_display_manager *dm,
3749                                             int bl_idx)
3750 {
3751 #if defined(CONFIG_ACPI)
3752         struct amdgpu_dm_backlight_caps caps;
3753
3754         memset(&caps, 0, sizeof(caps));
3755
3756         if (dm->backlight_caps[bl_idx].caps_valid)
3757                 return;
3758
3759         amdgpu_acpi_get_backlight_caps(&caps);
3760         if (caps.caps_valid) {
3761                 dm->backlight_caps[bl_idx].caps_valid = true;
3762                 if (caps.aux_support)
3763                         return;
3764                 dm->backlight_caps[bl_idx].min_input_signal = caps.min_input_signal;
3765                 dm->backlight_caps[bl_idx].max_input_signal = caps.max_input_signal;
3766         } else {
3767                 dm->backlight_caps[bl_idx].min_input_signal =
3768                                 AMDGPU_DM_DEFAULT_MIN_BACKLIGHT;
3769                 dm->backlight_caps[bl_idx].max_input_signal =
3770                                 AMDGPU_DM_DEFAULT_MAX_BACKLIGHT;
3771         }
3772 #else
3773         if (dm->backlight_caps[bl_idx].aux_support)
3774                 return;
3775
3776         dm->backlight_caps[bl_idx].min_input_signal = AMDGPU_DM_DEFAULT_MIN_BACKLIGHT;
3777         dm->backlight_caps[bl_idx].max_input_signal = AMDGPU_DM_DEFAULT_MAX_BACKLIGHT;
3778 #endif
3779 }
3780
3781 static int get_brightness_range(const struct amdgpu_dm_backlight_caps *caps,
3782                                 unsigned *min, unsigned *max)
3783 {
3784         if (!caps)
3785                 return 0;
3786
3787         if (caps->aux_support) {
3788                 // Firmware limits are in nits, DC API wants millinits.
3789                 *max = 1000 * caps->aux_max_input_signal;
3790                 *min = 1000 * caps->aux_min_input_signal;
3791         } else {
3792                 // Firmware limits are 8-bit, PWM control is 16-bit.
3793                 *max = 0x101 * caps->max_input_signal;
3794                 *min = 0x101 * caps->min_input_signal;
3795         }
3796         return 1;
3797 }
3798
3799 static u32 convert_brightness_from_user(const struct amdgpu_dm_backlight_caps *caps,
3800                                         uint32_t brightness)
3801 {
3802         unsigned min, max;
3803
3804         if (!get_brightness_range(caps, &min, &max))
3805                 return brightness;
3806
3807         // Rescale 0..255 to min..max
3808         return min + DIV_ROUND_CLOSEST((max - min) * brightness,
3809                                        AMDGPU_MAX_BL_LEVEL);
3810 }
3811
3812 static u32 convert_brightness_to_user(const struct amdgpu_dm_backlight_caps *caps,
3813                                       uint32_t brightness)
3814 {
3815         unsigned min, max;
3816
3817         if (!get_brightness_range(caps, &min, &max))
3818                 return brightness;
3819
3820         if (brightness < min)
3821                 return 0;
3822         // Rescale min..max to 0..255
3823         return DIV_ROUND_CLOSEST(AMDGPU_MAX_BL_LEVEL * (brightness - min),
3824                                  max - min);
3825 }
3826
3827 static int amdgpu_dm_backlight_set_level(struct amdgpu_display_manager *dm,
3828                                          int bl_idx,
3829                                          u32 user_brightness)
3830 {
3831         struct amdgpu_dm_backlight_caps caps;
3832         struct dc_link *link;
3833         u32 brightness;
3834         bool rc;
3835
3836         amdgpu_dm_update_backlight_caps(dm, bl_idx);
3837         caps = dm->backlight_caps[bl_idx];
3838
3839         dm->brightness[bl_idx] = user_brightness;
3840         brightness = convert_brightness_from_user(&caps, dm->brightness[bl_idx]);
3841         link = (struct dc_link *)dm->backlight_link[bl_idx];
3842
3843         /* Change brightness based on AUX property */
3844         if (caps.aux_support) {
3845                 rc = dc_link_set_backlight_level_nits(link, true, brightness,
3846                                                       AUX_BL_DEFAULT_TRANSITION_TIME_MS);
3847                 if (!rc)
3848                         DRM_DEBUG("DM: Failed to update backlight via AUX on eDP[%d]\n", bl_idx);
3849         } else {
3850                 rc = dc_link_set_backlight_level(link, brightness, 0);
3851                 if (!rc)
3852                         DRM_DEBUG("DM: Failed to update backlight on eDP[%d]\n", bl_idx);
3853         }
3854
3855         return rc ? 0 : 1;
3856 }
3857
3858 static int amdgpu_dm_backlight_update_status(struct backlight_device *bd)
3859 {
3860         struct amdgpu_display_manager *dm = bl_get_data(bd);
3861         int i;
3862
3863         for (i = 0; i < dm->num_of_edps; i++) {
3864                 if (bd == dm->backlight_dev[i])
3865                         break;
3866         }
3867         if (i >= AMDGPU_DM_MAX_NUM_EDP)
3868                 i = 0;
3869         amdgpu_dm_backlight_set_level(dm, i, bd->props.brightness);
3870
3871         return 0;
3872 }
3873
3874 static u32 amdgpu_dm_backlight_get_level(struct amdgpu_display_manager *dm,
3875                                          int bl_idx)
3876 {
3877         struct amdgpu_dm_backlight_caps caps;
3878         struct dc_link *link = (struct dc_link *)dm->backlight_link[bl_idx];
3879
3880         amdgpu_dm_update_backlight_caps(dm, bl_idx);
3881         caps = dm->backlight_caps[bl_idx];
3882
3883         if (caps.aux_support) {
3884                 u32 avg, peak;
3885                 bool rc;
3886
3887                 rc = dc_link_get_backlight_level_nits(link, &avg, &peak);
3888                 if (!rc)
3889                         return dm->brightness[bl_idx];
3890                 return convert_brightness_to_user(&caps, avg);
3891         } else {
3892                 int ret = dc_link_get_backlight_level(link);
3893
3894                 if (ret == DC_ERROR_UNEXPECTED)
3895                         return dm->brightness[bl_idx];
3896                 return convert_brightness_to_user(&caps, ret);
3897         }
3898 }
3899
3900 static int amdgpu_dm_backlight_get_brightness(struct backlight_device *bd)
3901 {
3902         struct amdgpu_display_manager *dm = bl_get_data(bd);
3903         int i;
3904
3905         for (i = 0; i < dm->num_of_edps; i++) {
3906                 if (bd == dm->backlight_dev[i])
3907                         break;
3908         }
3909         if (i >= AMDGPU_DM_MAX_NUM_EDP)
3910                 i = 0;
3911         return amdgpu_dm_backlight_get_level(dm, i);
3912 }
3913
3914 static const struct backlight_ops amdgpu_dm_backlight_ops = {
3915         .options = BL_CORE_SUSPENDRESUME,
3916         .get_brightness = amdgpu_dm_backlight_get_brightness,
3917         .update_status  = amdgpu_dm_backlight_update_status,
3918 };
3919
3920 static void
3921 amdgpu_dm_register_backlight_device(struct amdgpu_display_manager *dm)
3922 {
3923         char bl_name[16];
3924         struct backlight_properties props = { 0 };
3925
3926         amdgpu_dm_update_backlight_caps(dm, dm->num_of_edps);
3927         dm->brightness[dm->num_of_edps] = AMDGPU_MAX_BL_LEVEL;
3928
3929         props.max_brightness = AMDGPU_MAX_BL_LEVEL;
3930         props.brightness = AMDGPU_MAX_BL_LEVEL;
3931         props.type = BACKLIGHT_RAW;
3932
3933         snprintf(bl_name, sizeof(bl_name), "amdgpu_bl%d",
3934                  adev_to_drm(dm->adev)->primary->index + dm->num_of_edps);
3935
3936         dm->backlight_dev[dm->num_of_edps] = backlight_device_register(bl_name,
3937                                                                        adev_to_drm(dm->adev)->dev,
3938                                                                        dm,
3939                                                                        &amdgpu_dm_backlight_ops,
3940                                                                        &props);
3941
3942         if (IS_ERR(dm->backlight_dev[dm->num_of_edps]))
3943                 DRM_ERROR("DM: Backlight registration failed!\n");
3944         else
3945                 DRM_DEBUG_DRIVER("DM: Registered Backlight device: %s\n", bl_name);
3946 }
3947 #endif
3948
3949 static int initialize_plane(struct amdgpu_display_manager *dm,
3950                             struct amdgpu_mode_info *mode_info, int plane_id,
3951                             enum drm_plane_type plane_type,
3952                             const struct dc_plane_cap *plane_cap)
3953 {
3954         struct drm_plane *plane;
3955         unsigned long possible_crtcs;
3956         int ret = 0;
3957
3958         plane = kzalloc(sizeof(struct drm_plane), GFP_KERNEL);
3959         if (!plane) {
3960                 DRM_ERROR("KMS: Failed to allocate plane\n");
3961                 return -ENOMEM;
3962         }
3963         plane->type = plane_type;
3964
3965         /*
3966          * HACK: IGT tests expect that the primary plane for a CRTC
3967          * can only have one possible CRTC. Only expose support for
3968          * any CRTC if they're not going to be used as a primary plane
3969          * for a CRTC - like overlay or underlay planes.
3970          */
3971         possible_crtcs = 1 << plane_id;
3972         if (plane_id >= dm->dc->caps.max_streams)
3973                 possible_crtcs = 0xff;
3974
3975         ret = amdgpu_dm_plane_init(dm, plane, possible_crtcs, plane_cap);
3976
3977         if (ret) {
3978                 DRM_ERROR("KMS: Failed to initialize plane\n");
3979                 kfree(plane);
3980                 return ret;
3981         }
3982
3983         if (mode_info)
3984                 mode_info->planes[plane_id] = plane;
3985
3986         return ret;
3987 }
3988
3989
3990 static void register_backlight_device(struct amdgpu_display_manager *dm,
3991                                       struct dc_link *link)
3992 {
3993 #if defined(CONFIG_BACKLIGHT_CLASS_DEVICE) ||\
3994         defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE)
3995
3996         if ((link->connector_signal & (SIGNAL_TYPE_EDP | SIGNAL_TYPE_LVDS)) &&
3997             link->type != dc_connection_none) {
3998                 /*
3999                  * Event if registration failed, we should continue with
4000                  * DM initialization because not having a backlight control
4001                  * is better then a black screen.
4002                  */
4003                 if (!dm->backlight_dev[dm->num_of_edps])
4004                         amdgpu_dm_register_backlight_device(dm);
4005
4006                 if (dm->backlight_dev[dm->num_of_edps]) {
4007                         dm->backlight_link[dm->num_of_edps] = link;
4008                         dm->num_of_edps++;
4009                 }
4010         }
4011 #endif
4012 }
4013
4014
4015 /*
4016  * In this architecture, the association
4017  * connector -> encoder -> crtc
4018  * id not really requried. The crtc and connector will hold the
4019  * display_index as an abstraction to use with DAL component
4020  *
4021  * Returns 0 on success
4022  */
4023 static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev)
4024 {
4025         struct amdgpu_display_manager *dm = &adev->dm;
4026         int32_t i;
4027         struct amdgpu_dm_connector *aconnector = NULL;
4028         struct amdgpu_encoder *aencoder = NULL;
4029         struct amdgpu_mode_info *mode_info = &adev->mode_info;
4030         uint32_t link_cnt;
4031         int32_t primary_planes;
4032         enum dc_connection_type new_connection_type = dc_connection_none;
4033         const struct dc_plane_cap *plane;
4034
4035         dm->display_indexes_num = dm->dc->caps.max_streams;
4036         /* Update the actual used number of crtc */
4037         adev->mode_info.num_crtc = adev->dm.display_indexes_num;
4038
4039         link_cnt = dm->dc->caps.max_links;
4040         if (amdgpu_dm_mode_config_init(dm->adev)) {
4041                 DRM_ERROR("DM: Failed to initialize mode config\n");
4042                 return -EINVAL;
4043         }
4044
4045         /* There is one primary plane per CRTC */
4046         primary_planes = dm->dc->caps.max_streams;
4047         ASSERT(primary_planes <= AMDGPU_MAX_PLANES);
4048
4049         /*
4050          * Initialize primary planes, implicit planes for legacy IOCTLS.
4051          * Order is reversed to match iteration order in atomic check.
4052          */
4053         for (i = (primary_planes - 1); i >= 0; i--) {
4054                 plane = &dm->dc->caps.planes[i];
4055
4056                 if (initialize_plane(dm, mode_info, i,
4057                                      DRM_PLANE_TYPE_PRIMARY, plane)) {
4058                         DRM_ERROR("KMS: Failed to initialize primary plane\n");
4059                         goto fail;
4060                 }
4061         }
4062
4063         /*
4064          * Initialize overlay planes, index starting after primary planes.
4065          * These planes have a higher DRM index than the primary planes since
4066          * they should be considered as having a higher z-order.
4067          * Order is reversed to match iteration order in atomic check.
4068          *
4069          * Only support DCN for now, and only expose one so we don't encourage
4070          * userspace to use up all the pipes.
4071          */
4072         for (i = 0; i < dm->dc->caps.max_planes; ++i) {
4073                 struct dc_plane_cap *plane = &dm->dc->caps.planes[i];
4074
4075                 if (plane->type != DC_PLANE_TYPE_DCN_UNIVERSAL)
4076                         continue;
4077
4078                 if (!plane->blends_with_above || !plane->blends_with_below)
4079                         continue;
4080
4081                 if (!plane->pixel_format_support.argb8888)
4082                         continue;
4083
4084                 if (initialize_plane(dm, NULL, primary_planes + i,
4085                                      DRM_PLANE_TYPE_OVERLAY, plane)) {
4086                         DRM_ERROR("KMS: Failed to initialize overlay plane\n");
4087                         goto fail;
4088                 }
4089
4090                 /* Only create one overlay plane. */
4091                 break;
4092         }
4093
4094         for (i = 0; i < dm->dc->caps.max_streams; i++)
4095                 if (amdgpu_dm_crtc_init(dm, mode_info->planes[i], i)) {
4096                         DRM_ERROR("KMS: Failed to initialize crtc\n");
4097                         goto fail;
4098                 }
4099
4100 #if defined(CONFIG_DRM_AMD_DC_DCN)
4101         /* Use Outbox interrupt */
4102         switch (adev->ip_versions[DCE_HWIP][0]) {
4103         case IP_VERSION(3, 0, 0):
4104         case IP_VERSION(3, 1, 2):
4105         case IP_VERSION(3, 1, 3):
4106         case IP_VERSION(2, 1, 0):
4107                 if (register_outbox_irq_handlers(dm->adev)) {
4108                         DRM_ERROR("DM: Failed to initialize IRQ\n");
4109                         goto fail;
4110                 }
4111                 break;
4112         default:
4113                 DRM_DEBUG_KMS("Unsupported DCN IP version for outbox: 0x%X\n",
4114                               adev->ip_versions[DCE_HWIP][0]);
4115         }
4116 #endif
4117
4118         /* loops over all connectors on the board */
4119         for (i = 0; i < link_cnt; i++) {
4120                 struct dc_link *link = NULL;
4121
4122                 if (i > AMDGPU_DM_MAX_DISPLAY_INDEX) {
4123                         DRM_ERROR(
4124                                 "KMS: Cannot support more than %d display indexes\n",
4125                                         AMDGPU_DM_MAX_DISPLAY_INDEX);
4126                         continue;
4127                 }
4128
4129                 aconnector = kzalloc(sizeof(*aconnector), GFP_KERNEL);
4130                 if (!aconnector)
4131                         goto fail;
4132
4133                 aencoder = kzalloc(sizeof(*aencoder), GFP_KERNEL);
4134                 if (!aencoder)
4135                         goto fail;
4136
4137                 if (amdgpu_dm_encoder_init(dm->ddev, aencoder, i)) {
4138                         DRM_ERROR("KMS: Failed to initialize encoder\n");
4139                         goto fail;
4140                 }
4141
4142                 if (amdgpu_dm_connector_init(dm, aconnector, i, aencoder)) {
4143                         DRM_ERROR("KMS: Failed to initialize connector\n");
4144                         goto fail;
4145                 }
4146
4147                 link = dc_get_link_at_index(dm->dc, i);
4148
4149                 if (!dc_link_detect_sink(link, &new_connection_type))
4150                         DRM_ERROR("KMS: Failed to detect connector\n");
4151
4152                 if (aconnector->base.force && new_connection_type == dc_connection_none) {
4153                         emulated_link_detect(link);
4154                         amdgpu_dm_update_connector_after_detect(aconnector);
4155
4156                 } else if (dc_link_detect(link, DETECT_REASON_BOOT)) {
4157                         amdgpu_dm_update_connector_after_detect(aconnector);
4158                         register_backlight_device(dm, link);
4159                         if (amdgpu_dc_feature_mask & DC_PSR_MASK)
4160                                 amdgpu_dm_set_psr_caps(link);
4161                 }
4162
4163
4164         }
4165
4166         /* Software is initialized. Now we can register interrupt handlers. */
4167         switch (adev->asic_type) {
4168 #if defined(CONFIG_DRM_AMD_DC_SI)
4169         case CHIP_TAHITI:
4170         case CHIP_PITCAIRN:
4171         case CHIP_VERDE:
4172         case CHIP_OLAND:
4173                 if (dce60_register_irq_handlers(dm->adev)) {
4174                         DRM_ERROR("DM: Failed to initialize IRQ\n");
4175                         goto fail;
4176                 }
4177                 break;
4178 #endif
4179         case CHIP_BONAIRE:
4180         case CHIP_HAWAII:
4181         case CHIP_KAVERI:
4182         case CHIP_KABINI:
4183         case CHIP_MULLINS:
4184         case CHIP_TONGA:
4185         case CHIP_FIJI:
4186         case CHIP_CARRIZO:
4187         case CHIP_STONEY:
4188         case CHIP_POLARIS11:
4189         case CHIP_POLARIS10:
4190         case CHIP_POLARIS12:
4191         case CHIP_VEGAM:
4192         case CHIP_VEGA10:
4193         case CHIP_VEGA12:
4194         case CHIP_VEGA20:
4195                 if (dce110_register_irq_handlers(dm->adev)) {
4196                         DRM_ERROR("DM: Failed to initialize IRQ\n");
4197                         goto fail;
4198                 }
4199                 break;
4200         default:
4201 #if defined(CONFIG_DRM_AMD_DC_DCN)
4202                 switch (adev->ip_versions[DCE_HWIP][0]) {
4203                 case IP_VERSION(1, 0, 0):
4204                 case IP_VERSION(1, 0, 1):
4205                 case IP_VERSION(2, 0, 2):
4206                 case IP_VERSION(2, 0, 3):
4207                 case IP_VERSION(2, 0, 0):
4208                 case IP_VERSION(2, 1, 0):
4209                 case IP_VERSION(3, 0, 0):
4210                 case IP_VERSION(3, 0, 2):
4211                 case IP_VERSION(3, 0, 3):
4212                 case IP_VERSION(3, 0, 1):
4213                 case IP_VERSION(3, 1, 2):
4214                 case IP_VERSION(3, 1, 3):
4215                         if (dcn10_register_irq_handlers(dm->adev)) {
4216                                 DRM_ERROR("DM: Failed to initialize IRQ\n");
4217                                 goto fail;
4218                         }
4219                         break;
4220                 default:
4221                         DRM_ERROR("Unsupported DCE IP versions: 0x%X\n",
4222                                         adev->ip_versions[DCE_HWIP][0]);
4223                         goto fail;
4224                 }
4225 #endif
4226                 break;
4227         }
4228
4229         return 0;
4230 fail:
4231         kfree(aencoder);
4232         kfree(aconnector);
4233
4234         return -EINVAL;
4235 }
4236
4237 static void amdgpu_dm_destroy_drm_device(struct amdgpu_display_manager *dm)
4238 {
4239         drm_atomic_private_obj_fini(&dm->atomic_obj);
4240         return;
4241 }
4242
4243 /******************************************************************************
4244  * amdgpu_display_funcs functions
4245  *****************************************************************************/
4246
4247 /*
4248  * dm_bandwidth_update - program display watermarks
4249  *
4250  * @adev: amdgpu_device pointer
4251  *
4252  * Calculate and program the display watermarks and line buffer allocation.
4253  */
4254 static void dm_bandwidth_update(struct amdgpu_device *adev)
4255 {
4256         /* TODO: implement later */
4257 }
4258
4259 static const struct amdgpu_display_funcs dm_display_funcs = {
4260         .bandwidth_update = dm_bandwidth_update, /* called unconditionally */
4261         .vblank_get_counter = dm_vblank_get_counter,/* called unconditionally */
4262         .backlight_set_level = NULL, /* never called for DC */
4263         .backlight_get_level = NULL, /* never called for DC */
4264         .hpd_sense = NULL,/* called unconditionally */
4265         .hpd_set_polarity = NULL, /* called unconditionally */
4266         .hpd_get_gpio_reg = NULL, /* VBIOS parsing. DAL does it. */
4267         .page_flip_get_scanoutpos =
4268                 dm_crtc_get_scanoutpos,/* called unconditionally */
4269         .add_encoder = NULL, /* VBIOS parsing. DAL does it. */
4270         .add_connector = NULL, /* VBIOS parsing. DAL does it. */
4271 };
4272
4273 #if defined(CONFIG_DEBUG_KERNEL_DC)
4274
4275 static ssize_t s3_debug_store(struct device *device,
4276                               struct device_attribute *attr,
4277                               const char *buf,
4278                               size_t count)
4279 {
4280         int ret;
4281         int s3_state;
4282         struct drm_device *drm_dev = dev_get_drvdata(device);
4283         struct amdgpu_device *adev = drm_to_adev(drm_dev);
4284
4285         ret = kstrtoint(buf, 0, &s3_state);
4286
4287         if (ret == 0) {
4288                 if (s3_state) {
4289                         dm_resume(adev);
4290                         drm_kms_helper_hotplug_event(adev_to_drm(adev));
4291                 } else
4292                         dm_suspend(adev);
4293         }
4294
4295         return ret == 0 ? count : 0;
4296 }
4297
4298 DEVICE_ATTR_WO(s3_debug);
4299
4300 #endif
4301
4302 static int dm_early_init(void *handle)
4303 {
4304         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
4305
4306         switch (adev->asic_type) {
4307 #if defined(CONFIG_DRM_AMD_DC_SI)
4308         case CHIP_TAHITI:
4309         case CHIP_PITCAIRN:
4310         case CHIP_VERDE:
4311                 adev->mode_info.num_crtc = 6;
4312                 adev->mode_info.num_hpd = 6;
4313                 adev->mode_info.num_dig = 6;
4314                 break;
4315         case CHIP_OLAND:
4316                 adev->mode_info.num_crtc = 2;
4317                 adev->mode_info.num_hpd = 2;
4318                 adev->mode_info.num_dig = 2;
4319                 break;
4320 #endif
4321         case CHIP_BONAIRE:
4322         case CHIP_HAWAII:
4323                 adev->mode_info.num_crtc = 6;
4324                 adev->mode_info.num_hpd = 6;
4325                 adev->mode_info.num_dig = 6;
4326                 break;
4327         case CHIP_KAVERI:
4328                 adev->mode_info.num_crtc = 4;
4329                 adev->mode_info.num_hpd = 6;
4330                 adev->mode_info.num_dig = 7;
4331                 break;
4332         case CHIP_KABINI:
4333         case CHIP_MULLINS:
4334                 adev->mode_info.num_crtc = 2;
4335                 adev->mode_info.num_hpd = 6;
4336                 adev->mode_info.num_dig = 6;
4337                 break;
4338         case CHIP_FIJI:
4339         case CHIP_TONGA:
4340                 adev->mode_info.num_crtc = 6;
4341                 adev->mode_info.num_hpd = 6;
4342                 adev->mode_info.num_dig = 7;
4343                 break;
4344         case CHIP_CARRIZO:
4345                 adev->mode_info.num_crtc = 3;
4346                 adev->mode_info.num_hpd = 6;
4347                 adev->mode_info.num_dig = 9;
4348                 break;
4349         case CHIP_STONEY:
4350                 adev->mode_info.num_crtc = 2;
4351                 adev->mode_info.num_hpd = 6;
4352                 adev->mode_info.num_dig = 9;
4353                 break;
4354         case CHIP_POLARIS11:
4355         case CHIP_POLARIS12:
4356                 adev->mode_info.num_crtc = 5;
4357                 adev->mode_info.num_hpd = 5;
4358                 adev->mode_info.num_dig = 5;
4359                 break;
4360         case CHIP_POLARIS10:
4361         case CHIP_VEGAM:
4362                 adev->mode_info.num_crtc = 6;
4363                 adev->mode_info.num_hpd = 6;
4364                 adev->mode_info.num_dig = 6;
4365                 break;
4366         case CHIP_VEGA10:
4367         case CHIP_VEGA12:
4368         case CHIP_VEGA20:
4369                 adev->mode_info.num_crtc = 6;
4370                 adev->mode_info.num_hpd = 6;
4371                 adev->mode_info.num_dig = 6;
4372                 break;
4373         default:
4374 #if defined(CONFIG_DRM_AMD_DC_DCN)
4375                 switch (adev->ip_versions[DCE_HWIP][0]) {
4376                 case IP_VERSION(2, 0, 2):
4377                 case IP_VERSION(3, 0, 0):
4378                         adev->mode_info.num_crtc = 6;
4379                         adev->mode_info.num_hpd = 6;
4380                         adev->mode_info.num_dig = 6;
4381                         break;
4382                 case IP_VERSION(2, 0, 0):
4383                 case IP_VERSION(3, 0, 2):
4384                         adev->mode_info.num_crtc = 5;
4385                         adev->mode_info.num_hpd = 5;
4386                         adev->mode_info.num_dig = 5;
4387                         break;
4388                 case IP_VERSION(2, 0, 3):
4389                 case IP_VERSION(3, 0, 3):
4390                         adev->mode_info.num_crtc = 2;
4391                         adev->mode_info.num_hpd = 2;
4392                         adev->mode_info.num_dig = 2;
4393                         break;
4394                 case IP_VERSION(1, 0, 0):
4395                 case IP_VERSION(1, 0, 1):
4396                 case IP_VERSION(3, 0, 1):
4397                 case IP_VERSION(2, 1, 0):
4398                 case IP_VERSION(3, 1, 2):
4399                 case IP_VERSION(3, 1, 3):
4400                         adev->mode_info.num_crtc = 4;
4401                         adev->mode_info.num_hpd = 4;
4402                         adev->mode_info.num_dig = 4;
4403                         break;
4404                 default:
4405                         DRM_ERROR("Unsupported DCE IP versions: 0x%x\n",
4406                                         adev->ip_versions[DCE_HWIP][0]);
4407                         return -EINVAL;
4408                 }
4409 #endif
4410                 break;
4411         }
4412
4413         amdgpu_dm_set_irq_funcs(adev);
4414
4415         if (adev->mode_info.funcs == NULL)
4416                 adev->mode_info.funcs = &dm_display_funcs;
4417
4418         /*
4419          * Note: Do NOT change adev->audio_endpt_rreg and
4420          * adev->audio_endpt_wreg because they are initialised in
4421          * amdgpu_device_init()
4422          */
4423 #if defined(CONFIG_DEBUG_KERNEL_DC)
4424         device_create_file(
4425                 adev_to_drm(adev)->dev,
4426                 &dev_attr_s3_debug);
4427 #endif
4428
4429         return 0;
4430 }
4431
4432 static bool modeset_required(struct drm_crtc_state *crtc_state,
4433                              struct dc_stream_state *new_stream,
4434                              struct dc_stream_state *old_stream)
4435 {
4436         return crtc_state->active && drm_atomic_crtc_needs_modeset(crtc_state);
4437 }
4438
4439 static bool modereset_required(struct drm_crtc_state *crtc_state)
4440 {
4441         return !crtc_state->active && drm_atomic_crtc_needs_modeset(crtc_state);
4442 }
4443
4444 static void amdgpu_dm_encoder_destroy(struct drm_encoder *encoder)
4445 {
4446         drm_encoder_cleanup(encoder);
4447         kfree(encoder);
4448 }
4449
4450 static const struct drm_encoder_funcs amdgpu_dm_encoder_funcs = {
4451         .destroy = amdgpu_dm_encoder_destroy,
4452 };
4453
4454
4455 static void get_min_max_dc_plane_scaling(struct drm_device *dev,
4456                                          struct drm_framebuffer *fb,
4457                                          int *min_downscale, int *max_upscale)
4458 {
4459         struct amdgpu_device *adev = drm_to_adev(dev);
4460         struct dc *dc = adev->dm.dc;
4461         /* Caps for all supported planes are the same on DCE and DCN 1 - 3 */
4462         struct dc_plane_cap *plane_cap = &dc->caps.planes[0];
4463
4464         switch (fb->format->format) {
4465         case DRM_FORMAT_P010:
4466         case DRM_FORMAT_NV12:
4467         case DRM_FORMAT_NV21:
4468                 *max_upscale = plane_cap->max_upscale_factor.nv12;
4469                 *min_downscale = plane_cap->max_downscale_factor.nv12;
4470                 break;
4471
4472         case DRM_FORMAT_XRGB16161616F:
4473         case DRM_FORMAT_ARGB16161616F:
4474         case DRM_FORMAT_XBGR16161616F:
4475         case DRM_FORMAT_ABGR16161616F:
4476                 *max_upscale = plane_cap->max_upscale_factor.fp16;
4477                 *min_downscale = plane_cap->max_downscale_factor.fp16;
4478                 break;
4479
4480         default:
4481                 *max_upscale = plane_cap->max_upscale_factor.argb8888;
4482                 *min_downscale = plane_cap->max_downscale_factor.argb8888;
4483                 break;
4484         }
4485
4486         /*
4487          * A factor of 1 in the plane_cap means to not allow scaling, ie. use a
4488          * scaling factor of 1.0 == 1000 units.
4489          */
4490         if (*max_upscale == 1)
4491                 *max_upscale = 1000;
4492
4493         if (*min_downscale == 1)
4494                 *min_downscale = 1000;
4495 }
4496
4497
4498 static int fill_dc_scaling_info(const struct drm_plane_state *state,
4499                                 struct dc_scaling_info *scaling_info)
4500 {
4501         int scale_w, scale_h, min_downscale, max_upscale;
4502
4503         memset(scaling_info, 0, sizeof(*scaling_info));
4504
4505         /* Source is fixed 16.16 but we ignore mantissa for now... */
4506         scaling_info->src_rect.x = state->src_x >> 16;
4507         scaling_info->src_rect.y = state->src_y >> 16;
4508
4509         /*
4510          * For reasons we don't (yet) fully understand a non-zero
4511          * src_y coordinate into an NV12 buffer can cause a
4512          * system hang. To avoid hangs (and maybe be overly cautious)
4513          * let's reject both non-zero src_x and src_y.
4514          *
4515          * We currently know of only one use-case to reproduce a
4516          * scenario with non-zero src_x and src_y for NV12, which
4517          * is to gesture the YouTube Android app into full screen
4518          * on ChromeOS.
4519          */
4520         if (state->fb &&
4521             state->fb->format->format == DRM_FORMAT_NV12 &&
4522             (scaling_info->src_rect.x != 0 ||
4523              scaling_info->src_rect.y != 0))
4524                 return -EINVAL;
4525
4526         scaling_info->src_rect.width = state->src_w >> 16;
4527         if (scaling_info->src_rect.width == 0)
4528                 return -EINVAL;
4529
4530         scaling_info->src_rect.height = state->src_h >> 16;
4531         if (scaling_info->src_rect.height == 0)
4532                 return -EINVAL;
4533
4534         scaling_info->dst_rect.x = state->crtc_x;
4535         scaling_info->dst_rect.y = state->crtc_y;
4536
4537         if (state->crtc_w == 0)
4538                 return -EINVAL;
4539
4540         scaling_info->dst_rect.width = state->crtc_w;
4541
4542         if (state->crtc_h == 0)
4543                 return -EINVAL;
4544
4545         scaling_info->dst_rect.height = state->crtc_h;
4546
4547         /* DRM doesn't specify clipping on destination output. */
4548         scaling_info->clip_rect = scaling_info->dst_rect;
4549
4550         /* Validate scaling per-format with DC plane caps */
4551         if (state->plane && state->plane->dev && state->fb) {
4552                 get_min_max_dc_plane_scaling(state->plane->dev, state->fb,
4553                                              &min_downscale, &max_upscale);
4554         } else {
4555                 min_downscale = 250;
4556                 max_upscale = 16000;
4557         }
4558
4559         scale_w = scaling_info->dst_rect.width * 1000 /
4560                   scaling_info->src_rect.width;
4561
4562         if (scale_w < min_downscale || scale_w > max_upscale)
4563                 return -EINVAL;
4564
4565         scale_h = scaling_info->dst_rect.height * 1000 /
4566                   scaling_info->src_rect.height;
4567
4568         if (scale_h < min_downscale || scale_h > max_upscale)
4569                 return -EINVAL;
4570
4571         /*
4572          * The "scaling_quality" can be ignored for now, quality = 0 has DC
4573          * assume reasonable defaults based on the format.
4574          */
4575
4576         return 0;
4577 }
4578
4579 static void
4580 fill_gfx8_tiling_info_from_flags(union dc_tiling_info *tiling_info,
4581                                  uint64_t tiling_flags)
4582 {
4583         /* Fill GFX8 params */
4584         if (AMDGPU_TILING_GET(tiling_flags, ARRAY_MODE) == DC_ARRAY_2D_TILED_THIN1) {
4585                 unsigned int bankw, bankh, mtaspect, tile_split, num_banks;
4586
4587                 bankw = AMDGPU_TILING_GET(tiling_flags, BANK_WIDTH);
4588                 bankh = AMDGPU_TILING_GET(tiling_flags, BANK_HEIGHT);
4589                 mtaspect = AMDGPU_TILING_GET(tiling_flags, MACRO_TILE_ASPECT);
4590                 tile_split = AMDGPU_TILING_GET(tiling_flags, TILE_SPLIT);
4591                 num_banks = AMDGPU_TILING_GET(tiling_flags, NUM_BANKS);
4592
4593                 /* XXX fix me for VI */
4594                 tiling_info->gfx8.num_banks = num_banks;
4595                 tiling_info->gfx8.array_mode =
4596                                 DC_ARRAY_2D_TILED_THIN1;
4597                 tiling_info->gfx8.tile_split = tile_split;
4598                 tiling_info->gfx8.bank_width = bankw;
4599                 tiling_info->gfx8.bank_height = bankh;
4600                 tiling_info->gfx8.tile_aspect = mtaspect;
4601                 tiling_info->gfx8.tile_mode =
4602                                 DC_ADDR_SURF_MICRO_TILING_DISPLAY;
4603         } else if (AMDGPU_TILING_GET(tiling_flags, ARRAY_MODE)
4604                         == DC_ARRAY_1D_TILED_THIN1) {
4605                 tiling_info->gfx8.array_mode = DC_ARRAY_1D_TILED_THIN1;
4606         }
4607
4608         tiling_info->gfx8.pipe_config =
4609                         AMDGPU_TILING_GET(tiling_flags, PIPE_CONFIG);
4610 }
4611
4612 static void
4613 fill_gfx9_tiling_info_from_device(const struct amdgpu_device *adev,
4614                                   union dc_tiling_info *tiling_info)
4615 {
4616         tiling_info->gfx9.num_pipes =
4617                 adev->gfx.config.gb_addr_config_fields.num_pipes;
4618         tiling_info->gfx9.num_banks =
4619                 adev->gfx.config.gb_addr_config_fields.num_banks;
4620         tiling_info->gfx9.pipe_interleave =
4621                 adev->gfx.config.gb_addr_config_fields.pipe_interleave_size;
4622         tiling_info->gfx9.num_shader_engines =
4623                 adev->gfx.config.gb_addr_config_fields.num_se;
4624         tiling_info->gfx9.max_compressed_frags =
4625                 adev->gfx.config.gb_addr_config_fields.max_compress_frags;
4626         tiling_info->gfx9.num_rb_per_se =
4627                 adev->gfx.config.gb_addr_config_fields.num_rb_per_se;
4628         tiling_info->gfx9.shaderEnable = 1;
4629         if (adev->ip_versions[GC_HWIP][0] >= IP_VERSION(10, 3, 0))
4630                 tiling_info->gfx9.num_pkrs = adev->gfx.config.gb_addr_config_fields.num_pkrs;
4631 }
4632
4633 static int
4634 validate_dcc(struct amdgpu_device *adev,
4635              const enum surface_pixel_format format,
4636              const enum dc_rotation_angle rotation,
4637              const union dc_tiling_info *tiling_info,
4638              const struct dc_plane_dcc_param *dcc,
4639              const struct dc_plane_address *address,
4640              const struct plane_size *plane_size)
4641 {
4642         struct dc *dc = adev->dm.dc;
4643         struct dc_dcc_surface_param input;
4644         struct dc_surface_dcc_cap output;
4645
4646         memset(&input, 0, sizeof(input));
4647         memset(&output, 0, sizeof(output));
4648
4649         if (!dcc->enable)
4650                 return 0;
4651
4652         if (format >= SURFACE_PIXEL_FORMAT_VIDEO_BEGIN ||
4653             !dc->cap_funcs.get_dcc_compression_cap)
4654                 return -EINVAL;
4655
4656         input.format = format;
4657         input.surface_size.width = plane_size->surface_size.width;
4658         input.surface_size.height = plane_size->surface_size.height;
4659         input.swizzle_mode = tiling_info->gfx9.swizzle;
4660
4661         if (rotation == ROTATION_ANGLE_0 || rotation == ROTATION_ANGLE_180)
4662                 input.scan = SCAN_DIRECTION_HORIZONTAL;
4663         else if (rotation == ROTATION_ANGLE_90 || rotation == ROTATION_ANGLE_270)
4664                 input.scan = SCAN_DIRECTION_VERTICAL;
4665
4666         if (!dc->cap_funcs.get_dcc_compression_cap(dc, &input, &output))
4667                 return -EINVAL;
4668
4669         if (!output.capable)
4670                 return -EINVAL;
4671
4672         if (dcc->independent_64b_blks == 0 &&
4673             output.grph.rgb.independent_64b_blks != 0)
4674                 return -EINVAL;
4675
4676         return 0;
4677 }
4678
4679 static bool
4680 modifier_has_dcc(uint64_t modifier)
4681 {
4682         return IS_AMD_FMT_MOD(modifier) && AMD_FMT_MOD_GET(DCC, modifier);
4683 }
4684
4685 static unsigned
4686 modifier_gfx9_swizzle_mode(uint64_t modifier)
4687 {
4688         if (modifier == DRM_FORMAT_MOD_LINEAR)
4689                 return 0;
4690
4691         return AMD_FMT_MOD_GET(TILE, modifier);
4692 }
4693
4694 static const struct drm_format_info *
4695 amd_get_format_info(const struct drm_mode_fb_cmd2 *cmd)
4696 {
4697         return amdgpu_lookup_format_info(cmd->pixel_format, cmd->modifier[0]);
4698 }
4699
4700 static void
4701 fill_gfx9_tiling_info_from_modifier(const struct amdgpu_device *adev,
4702                                     union dc_tiling_info *tiling_info,
4703                                     uint64_t modifier)
4704 {
4705         unsigned int mod_bank_xor_bits = AMD_FMT_MOD_GET(BANK_XOR_BITS, modifier);
4706         unsigned int mod_pipe_xor_bits = AMD_FMT_MOD_GET(PIPE_XOR_BITS, modifier);
4707         unsigned int pkrs_log2 = AMD_FMT_MOD_GET(PACKERS, modifier);
4708         unsigned int pipes_log2 = min(4u, mod_pipe_xor_bits);
4709
4710         fill_gfx9_tiling_info_from_device(adev, tiling_info);
4711
4712         if (!IS_AMD_FMT_MOD(modifier))
4713                 return;
4714
4715         tiling_info->gfx9.num_pipes = 1u << pipes_log2;
4716         tiling_info->gfx9.num_shader_engines = 1u << (mod_pipe_xor_bits - pipes_log2);
4717
4718         if (adev->family >= AMDGPU_FAMILY_NV) {
4719                 tiling_info->gfx9.num_pkrs = 1u << pkrs_log2;
4720         } else {
4721                 tiling_info->gfx9.num_banks = 1u << mod_bank_xor_bits;
4722
4723                 /* for DCC we know it isn't rb aligned, so rb_per_se doesn't matter. */
4724         }
4725 }
4726
4727 enum dm_micro_swizzle {
4728         MICRO_SWIZZLE_Z = 0,
4729         MICRO_SWIZZLE_S = 1,
4730         MICRO_SWIZZLE_D = 2,
4731         MICRO_SWIZZLE_R = 3
4732 };
4733
4734 static bool dm_plane_format_mod_supported(struct drm_plane *plane,
4735                                           uint32_t format,
4736                                           uint64_t modifier)
4737 {
4738         struct amdgpu_device *adev = drm_to_adev(plane->dev);
4739         const struct drm_format_info *info = drm_format_info(format);
4740         int i;
4741
4742         enum dm_micro_swizzle microtile = modifier_gfx9_swizzle_mode(modifier) & 3;
4743
4744         if (!info)
4745                 return false;
4746
4747         /*
4748          * We always have to allow these modifiers:
4749          * 1. Core DRM checks for LINEAR support if userspace does not provide modifiers.
4750          * 2. Not passing any modifiers is the same as explicitly passing INVALID.
4751          */
4752         if (modifier == DRM_FORMAT_MOD_LINEAR ||
4753             modifier == DRM_FORMAT_MOD_INVALID) {
4754                 return true;
4755         }
4756
4757         /* Check that the modifier is on the list of the plane's supported modifiers. */
4758         for (i = 0; i < plane->modifier_count; i++) {
4759                 if (modifier == plane->modifiers[i])
4760                         break;
4761         }
4762         if (i == plane->modifier_count)
4763                 return false;
4764
4765         /*
4766          * For D swizzle the canonical modifier depends on the bpp, so check
4767          * it here.
4768          */
4769         if (AMD_FMT_MOD_GET(TILE_VERSION, modifier) == AMD_FMT_MOD_TILE_VER_GFX9 &&
4770             adev->family >= AMDGPU_FAMILY_NV) {
4771                 if (microtile == MICRO_SWIZZLE_D && info->cpp[0] == 4)
4772                         return false;
4773         }
4774
4775         if (adev->family >= AMDGPU_FAMILY_RV && microtile == MICRO_SWIZZLE_D &&
4776             info->cpp[0] < 8)
4777                 return false;
4778
4779         if (modifier_has_dcc(modifier)) {
4780                 /* Per radeonsi comments 16/64 bpp are more complicated. */
4781                 if (info->cpp[0] != 4)
4782                         return false;
4783                 /* We support multi-planar formats, but not when combined with
4784                  * additional DCC metadata planes. */
4785                 if (info->num_planes > 1)
4786                         return false;
4787         }
4788
4789         return true;
4790 }
4791
4792 static void
4793 add_modifier(uint64_t **mods, uint64_t *size, uint64_t *cap, uint64_t mod)
4794 {
4795         if (!*mods)
4796                 return;
4797
4798         if (*cap - *size < 1) {
4799                 uint64_t new_cap = *cap * 2;
4800                 uint64_t *new_mods = kmalloc(new_cap * sizeof(uint64_t), GFP_KERNEL);
4801
4802                 if (!new_mods) {
4803                         kfree(*mods);
4804                         *mods = NULL;
4805                         return;
4806                 }
4807
4808                 memcpy(new_mods, *mods, sizeof(uint64_t) * *size);
4809                 kfree(*mods);
4810                 *mods = new_mods;
4811                 *cap = new_cap;
4812         }
4813
4814         (*mods)[*size] = mod;
4815         *size += 1;
4816 }
4817
4818 static void
4819 add_gfx9_modifiers(const struct amdgpu_device *adev,
4820                    uint64_t **mods, uint64_t *size, uint64_t *capacity)
4821 {
4822         int pipes = ilog2(adev->gfx.config.gb_addr_config_fields.num_pipes);
4823         int pipe_xor_bits = min(8, pipes +
4824                                 ilog2(adev->gfx.config.gb_addr_config_fields.num_se));
4825         int bank_xor_bits = min(8 - pipe_xor_bits,
4826                                 ilog2(adev->gfx.config.gb_addr_config_fields.num_banks));
4827         int rb = ilog2(adev->gfx.config.gb_addr_config_fields.num_se) +
4828                  ilog2(adev->gfx.config.gb_addr_config_fields.num_rb_per_se);
4829
4830
4831         if (adev->family == AMDGPU_FAMILY_RV) {
4832                 /* Raven2 and later */
4833                 bool has_constant_encode = adev->asic_type > CHIP_RAVEN || adev->external_rev_id >= 0x81;
4834
4835                 /*
4836                  * No _D DCC swizzles yet because we only allow 32bpp, which
4837                  * doesn't support _D on DCN
4838                  */
4839
4840                 if (has_constant_encode) {
4841                         add_modifier(mods, size, capacity, AMD_FMT_MOD |
4842                                     AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) |
4843                                     AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9) |
4844                                     AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4845                                     AMD_FMT_MOD_SET(BANK_XOR_BITS, bank_xor_bits) |
4846                                     AMD_FMT_MOD_SET(DCC, 1) |
4847                                     AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
4848                                     AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B) |
4849                                     AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1));
4850                 }
4851
4852                 add_modifier(mods, size, capacity, AMD_FMT_MOD |
4853                             AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) |
4854                             AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9) |
4855                             AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4856                             AMD_FMT_MOD_SET(BANK_XOR_BITS, bank_xor_bits) |
4857                             AMD_FMT_MOD_SET(DCC, 1) |
4858                             AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
4859                             AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B) |
4860                             AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 0));
4861
4862                 if (has_constant_encode) {
4863                         add_modifier(mods, size, capacity, AMD_FMT_MOD |
4864                                     AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) |
4865                                     AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9) |
4866                                     AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4867                                     AMD_FMT_MOD_SET(BANK_XOR_BITS, bank_xor_bits) |
4868                                     AMD_FMT_MOD_SET(DCC, 1) |
4869                                     AMD_FMT_MOD_SET(DCC_RETILE, 1) |
4870                                     AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
4871                                     AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B) |
4872
4873                                     AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1) |
4874                                     AMD_FMT_MOD_SET(RB, rb) |
4875                                     AMD_FMT_MOD_SET(PIPE, pipes));
4876                 }
4877
4878                 add_modifier(mods, size, capacity, AMD_FMT_MOD |
4879                             AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) |
4880                             AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9) |
4881                             AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4882                             AMD_FMT_MOD_SET(BANK_XOR_BITS, bank_xor_bits) |
4883                             AMD_FMT_MOD_SET(DCC, 1) |
4884                             AMD_FMT_MOD_SET(DCC_RETILE, 1) |
4885                             AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
4886                             AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B) |
4887                             AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 0) |
4888                             AMD_FMT_MOD_SET(RB, rb) |
4889                             AMD_FMT_MOD_SET(PIPE, pipes));
4890         }
4891
4892         /*
4893          * Only supported for 64bpp on Raven, will be filtered on format in
4894          * dm_plane_format_mod_supported.
4895          */
4896         add_modifier(mods, size, capacity, AMD_FMT_MOD |
4897                     AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_D_X) |
4898                     AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9) |
4899                     AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4900                     AMD_FMT_MOD_SET(BANK_XOR_BITS, bank_xor_bits));
4901
4902         if (adev->family == AMDGPU_FAMILY_RV) {
4903                 add_modifier(mods, size, capacity, AMD_FMT_MOD |
4904                             AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) |
4905                             AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9) |
4906                             AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4907                             AMD_FMT_MOD_SET(BANK_XOR_BITS, bank_xor_bits));
4908         }
4909
4910         /*
4911          * Only supported for 64bpp on Raven, will be filtered on format in
4912          * dm_plane_format_mod_supported.
4913          */
4914         add_modifier(mods, size, capacity, AMD_FMT_MOD |
4915                     AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_D) |
4916                     AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9));
4917
4918         if (adev->family == AMDGPU_FAMILY_RV) {
4919                 add_modifier(mods, size, capacity, AMD_FMT_MOD |
4920                             AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S) |
4921                             AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9));
4922         }
4923 }
4924
4925 static void
4926 add_gfx10_1_modifiers(const struct amdgpu_device *adev,
4927                       uint64_t **mods, uint64_t *size, uint64_t *capacity)
4928 {
4929         int pipe_xor_bits = ilog2(adev->gfx.config.gb_addr_config_fields.num_pipes);
4930
4931         add_modifier(mods, size, capacity, AMD_FMT_MOD |
4932                     AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) |
4933                     AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10) |
4934                     AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4935                     AMD_FMT_MOD_SET(DCC, 1) |
4936                     AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1) |
4937                     AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
4938                     AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B));
4939
4940         add_modifier(mods, size, capacity, AMD_FMT_MOD |
4941                     AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) |
4942                     AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10) |
4943                     AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4944                     AMD_FMT_MOD_SET(DCC, 1) |
4945                     AMD_FMT_MOD_SET(DCC_RETILE, 1) |
4946                     AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1) |
4947                     AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
4948                     AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B));
4949
4950         add_modifier(mods, size, capacity, AMD_FMT_MOD |
4951                     AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) |
4952                     AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10) |
4953                     AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits));
4954
4955         add_modifier(mods, size, capacity, AMD_FMT_MOD |
4956                     AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) |
4957                     AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10) |
4958                     AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits));
4959
4960
4961         /* Only supported for 64bpp, will be filtered in dm_plane_format_mod_supported */
4962         add_modifier(mods, size, capacity, AMD_FMT_MOD |
4963                     AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_D) |
4964                     AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9));
4965
4966         add_modifier(mods, size, capacity, AMD_FMT_MOD |
4967                     AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S) |
4968                     AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9));
4969 }
4970
4971 static void
4972 add_gfx10_3_modifiers(const struct amdgpu_device *adev,
4973                       uint64_t **mods, uint64_t *size, uint64_t *capacity)
4974 {
4975         int pipe_xor_bits = ilog2(adev->gfx.config.gb_addr_config_fields.num_pipes);
4976         int pkrs = ilog2(adev->gfx.config.gb_addr_config_fields.num_pkrs);
4977
4978         add_modifier(mods, size, capacity, AMD_FMT_MOD |
4979                     AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) |
4980                     AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10_RBPLUS) |
4981                     AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4982                     AMD_FMT_MOD_SET(PACKERS, pkrs) |
4983                     AMD_FMT_MOD_SET(DCC, 1) |
4984                     AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1) |
4985                     AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
4986                     AMD_FMT_MOD_SET(DCC_INDEPENDENT_128B, 1) |
4987                     AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B));
4988
4989         add_modifier(mods, size, capacity, AMD_FMT_MOD |
4990                     AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) |
4991                     AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10_RBPLUS) |
4992                     AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4993                     AMD_FMT_MOD_SET(PACKERS, pkrs) |
4994                     AMD_FMT_MOD_SET(DCC, 1) |
4995                     AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1) |
4996                     AMD_FMT_MOD_SET(DCC_INDEPENDENT_128B, 1) |
4997                     AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_128B));
4998
4999         add_modifier(mods, size, capacity, AMD_FMT_MOD |
5000                     AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) |
5001                     AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10_RBPLUS) |
5002                     AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
5003                     AMD_FMT_MOD_SET(PACKERS, pkrs) |
5004                     AMD_FMT_MOD_SET(DCC, 1) |
5005                     AMD_FMT_MOD_SET(DCC_RETILE, 1) |
5006                     AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1) |
5007                     AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
5008                     AMD_FMT_MOD_SET(DCC_INDEPENDENT_128B, 1) |
5009                     AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B));
5010
5011         add_modifier(mods, size, capacity, AMD_FMT_MOD |
5012                     AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) |
5013                     AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10_RBPLUS) |
5014                     AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
5015                     AMD_FMT_MOD_SET(PACKERS, pkrs) |
5016                     AMD_FMT_MOD_SET(DCC, 1) |
5017                     AMD_FMT_MOD_SET(DCC_RETILE, 1) |
5018                     AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1) |
5019                     AMD_FMT_MOD_SET(DCC_INDEPENDENT_128B, 1) |
5020                     AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_128B));
5021
5022         add_modifier(mods, size, capacity, AMD_FMT_MOD |
5023                     AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) |
5024                     AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10_RBPLUS) |
5025                     AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
5026                     AMD_FMT_MOD_SET(PACKERS, pkrs));
5027
5028         add_modifier(mods, size, capacity, AMD_FMT_MOD |
5029                     AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) |
5030                     AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10_RBPLUS) |
5031                     AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
5032                     AMD_FMT_MOD_SET(PACKERS, pkrs));
5033
5034         /* Only supported for 64bpp, will be filtered in dm_plane_format_mod_supported */
5035         add_modifier(mods, size, capacity, AMD_FMT_MOD |
5036                     AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_D) |
5037                     AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9));
5038
5039         add_modifier(mods, size, capacity, AMD_FMT_MOD |
5040                     AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S) |
5041                     AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9));
5042 }
5043
5044 static int
5045 get_plane_modifiers(const struct amdgpu_device *adev, unsigned int plane_type, uint64_t **mods)
5046 {
5047         uint64_t size = 0, capacity = 128;
5048         *mods = NULL;
5049
5050         /* We have not hooked up any pre-GFX9 modifiers. */
5051         if (adev->family < AMDGPU_FAMILY_AI)
5052                 return 0;
5053
5054         *mods = kmalloc(capacity * sizeof(uint64_t), GFP_KERNEL);
5055
5056         if (plane_type == DRM_PLANE_TYPE_CURSOR) {
5057                 add_modifier(mods, &size, &capacity, DRM_FORMAT_MOD_LINEAR);
5058                 add_modifier(mods, &size, &capacity, DRM_FORMAT_MOD_INVALID);
5059                 return *mods ? 0 : -ENOMEM;
5060         }
5061
5062         switch (adev->family) {
5063         case AMDGPU_FAMILY_AI:
5064         case AMDGPU_FAMILY_RV:
5065                 add_gfx9_modifiers(adev, mods, &size, &capacity);
5066                 break;
5067         case AMDGPU_FAMILY_NV:
5068         case AMDGPU_FAMILY_VGH:
5069         case AMDGPU_FAMILY_YC:
5070                 if (adev->ip_versions[GC_HWIP][0] >= IP_VERSION(10, 3, 0))
5071                         add_gfx10_3_modifiers(adev, mods, &size, &capacity);
5072                 else
5073                         add_gfx10_1_modifiers(adev, mods, &size, &capacity);
5074                 break;
5075         }
5076
5077         add_modifier(mods, &size, &capacity, DRM_FORMAT_MOD_LINEAR);
5078
5079         /* INVALID marks the end of the list. */
5080         add_modifier(mods, &size, &capacity, DRM_FORMAT_MOD_INVALID);
5081
5082         if (!*mods)
5083                 return -ENOMEM;
5084
5085         return 0;
5086 }
5087
5088 static int
5089 fill_gfx9_plane_attributes_from_modifiers(struct amdgpu_device *adev,
5090                                           const struct amdgpu_framebuffer *afb,
5091                                           const enum surface_pixel_format format,
5092                                           const enum dc_rotation_angle rotation,
5093                                           const struct plane_size *plane_size,
5094                                           union dc_tiling_info *tiling_info,
5095                                           struct dc_plane_dcc_param *dcc,
5096                                           struct dc_plane_address *address,
5097                                           const bool force_disable_dcc)
5098 {
5099         const uint64_t modifier = afb->base.modifier;
5100         int ret = 0;
5101
5102         fill_gfx9_tiling_info_from_modifier(adev, tiling_info, modifier);
5103         tiling_info->gfx9.swizzle = modifier_gfx9_swizzle_mode(modifier);
5104
5105         if (modifier_has_dcc(modifier) && !force_disable_dcc) {
5106                 uint64_t dcc_address = afb->address + afb->base.offsets[1];
5107                 bool independent_64b_blks = AMD_FMT_MOD_GET(DCC_INDEPENDENT_64B, modifier);
5108                 bool independent_128b_blks = AMD_FMT_MOD_GET(DCC_INDEPENDENT_128B, modifier);
5109
5110                 dcc->enable = 1;
5111                 dcc->meta_pitch = afb->base.pitches[1];
5112                 dcc->independent_64b_blks = independent_64b_blks;
5113                 if (AMD_FMT_MOD_GET(TILE_VERSION, modifier) == AMD_FMT_MOD_TILE_VER_GFX10_RBPLUS) {
5114                         if (independent_64b_blks && independent_128b_blks)
5115                                 dcc->dcc_ind_blk = hubp_ind_block_64b_no_128bcl;
5116                         else if (independent_128b_blks)
5117                                 dcc->dcc_ind_blk = hubp_ind_block_128b;
5118                         else if (independent_64b_blks && !independent_128b_blks)
5119                                 dcc->dcc_ind_blk = hubp_ind_block_64b;
5120                         else
5121                                 dcc->dcc_ind_blk = hubp_ind_block_unconstrained;
5122                 } else {
5123                         if (independent_64b_blks)
5124                                 dcc->dcc_ind_blk = hubp_ind_block_64b;
5125                         else
5126                                 dcc->dcc_ind_blk = hubp_ind_block_unconstrained;
5127                 }
5128
5129                 address->grph.meta_addr.low_part = lower_32_bits(dcc_address);
5130                 address->grph.meta_addr.high_part = upper_32_bits(dcc_address);
5131         }
5132
5133         ret = validate_dcc(adev, format, rotation, tiling_info, dcc, address, plane_size);
5134         if (ret)
5135                 drm_dbg_kms(adev_to_drm(adev), "validate_dcc: returned error: %d\n", ret);
5136
5137         return ret;
5138 }
5139
5140 static int
5141 fill_plane_buffer_attributes(struct amdgpu_device *adev,
5142                              const struct amdgpu_framebuffer *afb,
5143                              const enum surface_pixel_format format,
5144                              const enum dc_rotation_angle rotation,
5145                              const uint64_t tiling_flags,
5146                              union dc_tiling_info *tiling_info,
5147                              struct plane_size *plane_size,
5148                              struct dc_plane_dcc_param *dcc,
5149                              struct dc_plane_address *address,
5150                              bool tmz_surface,
5151                              bool force_disable_dcc)
5152 {
5153         const struct drm_framebuffer *fb = &afb->base;
5154         int ret;
5155
5156         memset(tiling_info, 0, sizeof(*tiling_info));
5157         memset(plane_size, 0, sizeof(*plane_size));
5158         memset(dcc, 0, sizeof(*dcc));
5159         memset(address, 0, sizeof(*address));
5160
5161         address->tmz_surface = tmz_surface;
5162
5163         if (format < SURFACE_PIXEL_FORMAT_VIDEO_BEGIN) {
5164                 uint64_t addr = afb->address + fb->offsets[0];
5165
5166                 plane_size->surface_size.x = 0;
5167                 plane_size->surface_size.y = 0;
5168                 plane_size->surface_size.width = fb->width;
5169                 plane_size->surface_size.height = fb->height;
5170                 plane_size->surface_pitch =
5171                         fb->pitches[0] / fb->format->cpp[0];
5172
5173                 address->type = PLN_ADDR_TYPE_GRAPHICS;
5174                 address->grph.addr.low_part = lower_32_bits(addr);
5175                 address->grph.addr.high_part = upper_32_bits(addr);
5176         } else if (format < SURFACE_PIXEL_FORMAT_INVALID) {
5177                 uint64_t luma_addr = afb->address + fb->offsets[0];
5178                 uint64_t chroma_addr = afb->address + fb->offsets[1];
5179
5180                 plane_size->surface_size.x = 0;
5181                 plane_size->surface_size.y = 0;
5182                 plane_size->surface_size.width = fb->width;
5183                 plane_size->surface_size.height = fb->height;
5184                 plane_size->surface_pitch =
5185                         fb->pitches[0] / fb->format->cpp[0];
5186
5187                 plane_size->chroma_size.x = 0;
5188                 plane_size->chroma_size.y = 0;
5189                 /* TODO: set these based on surface format */
5190                 plane_size->chroma_size.width = fb->width / 2;
5191                 plane_size->chroma_size.height = fb->height / 2;
5192
5193                 plane_size->chroma_pitch =
5194                         fb->pitches[1] / fb->format->cpp[1];
5195
5196                 address->type = PLN_ADDR_TYPE_VIDEO_PROGRESSIVE;
5197                 address->video_progressive.luma_addr.low_part =
5198                         lower_32_bits(luma_addr);
5199                 address->video_progressive.luma_addr.high_part =
5200                         upper_32_bits(luma_addr);
5201                 address->video_progressive.chroma_addr.low_part =
5202                         lower_32_bits(chroma_addr);
5203                 address->video_progressive.chroma_addr.high_part =
5204                         upper_32_bits(chroma_addr);
5205         }
5206
5207         if (adev->family >= AMDGPU_FAMILY_AI) {
5208                 ret = fill_gfx9_plane_attributes_from_modifiers(adev, afb, format,
5209                                                                 rotation, plane_size,
5210                                                                 tiling_info, dcc,
5211                                                                 address,
5212                                                                 force_disable_dcc);
5213                 if (ret)
5214                         return ret;
5215         } else {
5216                 fill_gfx8_tiling_info_from_flags(tiling_info, tiling_flags);
5217         }
5218
5219         return 0;
5220 }
5221
5222 static void
5223 fill_blending_from_plane_state(const struct drm_plane_state *plane_state,
5224                                bool *per_pixel_alpha, bool *global_alpha,
5225                                int *global_alpha_value)
5226 {
5227         *per_pixel_alpha = false;
5228         *global_alpha = false;
5229         *global_alpha_value = 0xff;
5230
5231         if (plane_state->plane->type != DRM_PLANE_TYPE_OVERLAY)
5232                 return;
5233
5234         if (plane_state->pixel_blend_mode == DRM_MODE_BLEND_PREMULTI) {
5235                 static const uint32_t alpha_formats[] = {
5236                         DRM_FORMAT_ARGB8888,
5237                         DRM_FORMAT_RGBA8888,
5238                         DRM_FORMAT_ABGR8888,
5239                 };
5240                 uint32_t format = plane_state->fb->format->format;
5241                 unsigned int i;
5242
5243                 for (i = 0; i < ARRAY_SIZE(alpha_formats); ++i) {
5244                         if (format == alpha_formats[i]) {
5245                                 *per_pixel_alpha = true;
5246                                 break;
5247                         }
5248                 }
5249         }
5250
5251         if (plane_state->alpha < 0xffff) {
5252                 *global_alpha = true;
5253                 *global_alpha_value = plane_state->alpha >> 8;
5254         }
5255 }
5256
5257 static int
5258 fill_plane_color_attributes(const struct drm_plane_state *plane_state,
5259                             const enum surface_pixel_format format,
5260                             enum dc_color_space *color_space)
5261 {
5262         bool full_range;
5263
5264         *color_space = COLOR_SPACE_SRGB;
5265
5266         /* DRM color properties only affect non-RGB formats. */
5267         if (format < SURFACE_PIXEL_FORMAT_VIDEO_BEGIN)
5268                 return 0;
5269
5270         full_range = (plane_state->color_range == DRM_COLOR_YCBCR_FULL_RANGE);
5271
5272         switch (plane_state->color_encoding) {
5273         case DRM_COLOR_YCBCR_BT601:
5274                 if (full_range)
5275                         *color_space = COLOR_SPACE_YCBCR601;
5276                 else
5277                         *color_space = COLOR_SPACE_YCBCR601_LIMITED;
5278                 break;
5279
5280         case DRM_COLOR_YCBCR_BT709:
5281                 if (full_range)
5282                         *color_space = COLOR_SPACE_YCBCR709;
5283                 else
5284                         *color_space = COLOR_SPACE_YCBCR709_LIMITED;
5285                 break;
5286
5287         case DRM_COLOR_YCBCR_BT2020:
5288                 if (full_range)
5289                         *color_space = COLOR_SPACE_2020_YCBCR;
5290                 else
5291                         return -EINVAL;
5292                 break;
5293
5294         default:
5295                 return -EINVAL;
5296         }
5297
5298         return 0;
5299 }
5300
5301 static int
5302 fill_dc_plane_info_and_addr(struct amdgpu_device *adev,
5303                             const struct drm_plane_state *plane_state,
5304                             const uint64_t tiling_flags,
5305                             struct dc_plane_info *plane_info,
5306                             struct dc_plane_address *address,
5307                             bool tmz_surface,
5308                             bool force_disable_dcc)
5309 {
5310         const struct drm_framebuffer *fb = plane_state->fb;
5311         const struct amdgpu_framebuffer *afb =
5312                 to_amdgpu_framebuffer(plane_state->fb);
5313         int ret;
5314
5315         memset(plane_info, 0, sizeof(*plane_info));
5316
5317         switch (fb->format->format) {
5318         case DRM_FORMAT_C8:
5319                 plane_info->format =
5320                         SURFACE_PIXEL_FORMAT_GRPH_PALETA_256_COLORS;
5321                 break;
5322         case DRM_FORMAT_RGB565:
5323                 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_RGB565;
5324                 break;
5325         case DRM_FORMAT_XRGB8888:
5326         case DRM_FORMAT_ARGB8888:
5327                 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ARGB8888;
5328                 break;
5329         case DRM_FORMAT_XRGB2101010:
5330         case DRM_FORMAT_ARGB2101010:
5331                 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ARGB2101010;
5332                 break;
5333         case DRM_FORMAT_XBGR2101010:
5334         case DRM_FORMAT_ABGR2101010:
5335                 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ABGR2101010;
5336                 break;
5337         case DRM_FORMAT_XBGR8888:
5338         case DRM_FORMAT_ABGR8888:
5339                 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ABGR8888;
5340                 break;
5341         case DRM_FORMAT_NV21:
5342                 plane_info->format = SURFACE_PIXEL_FORMAT_VIDEO_420_YCbCr;
5343                 break;
5344         case DRM_FORMAT_NV12:
5345                 plane_info->format = SURFACE_PIXEL_FORMAT_VIDEO_420_YCrCb;
5346                 break;
5347         case DRM_FORMAT_P010:
5348                 plane_info->format = SURFACE_PIXEL_FORMAT_VIDEO_420_10bpc_YCrCb;
5349                 break;
5350         case DRM_FORMAT_XRGB16161616F:
5351         case DRM_FORMAT_ARGB16161616F:
5352                 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ARGB16161616F;
5353                 break;
5354         case DRM_FORMAT_XBGR16161616F:
5355         case DRM_FORMAT_ABGR16161616F:
5356                 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ABGR16161616F;
5357                 break;
5358         case DRM_FORMAT_XRGB16161616:
5359         case DRM_FORMAT_ARGB16161616:
5360                 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ARGB16161616;
5361                 break;
5362         case DRM_FORMAT_XBGR16161616:
5363         case DRM_FORMAT_ABGR16161616:
5364                 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ABGR16161616;
5365                 break;
5366         default:
5367                 DRM_ERROR(
5368                         "Unsupported screen format %p4cc\n",
5369                         &fb->format->format);
5370                 return -EINVAL;
5371         }
5372
5373         switch (plane_state->rotation & DRM_MODE_ROTATE_MASK) {
5374         case DRM_MODE_ROTATE_0:
5375                 plane_info->rotation = ROTATION_ANGLE_0;
5376                 break;
5377         case DRM_MODE_ROTATE_90:
5378                 plane_info->rotation = ROTATION_ANGLE_90;
5379                 break;
5380         case DRM_MODE_ROTATE_180:
5381                 plane_info->rotation = ROTATION_ANGLE_180;
5382                 break;
5383         case DRM_MODE_ROTATE_270:
5384                 plane_info->rotation = ROTATION_ANGLE_270;
5385                 break;
5386         default:
5387                 plane_info->rotation = ROTATION_ANGLE_0;
5388                 break;
5389         }
5390
5391         plane_info->visible = true;
5392         plane_info->stereo_format = PLANE_STEREO_FORMAT_NONE;
5393
5394         plane_info->layer_index = 0;
5395
5396         ret = fill_plane_color_attributes(plane_state, plane_info->format,
5397                                           &plane_info->color_space);
5398         if (ret)
5399                 return ret;
5400
5401         ret = fill_plane_buffer_attributes(adev, afb, plane_info->format,
5402                                            plane_info->rotation, tiling_flags,
5403                                            &plane_info->tiling_info,
5404                                            &plane_info->plane_size,
5405                                            &plane_info->dcc, address, tmz_surface,
5406                                            force_disable_dcc);
5407         if (ret)
5408                 return ret;
5409
5410         fill_blending_from_plane_state(
5411                 plane_state, &plane_info->per_pixel_alpha,
5412                 &plane_info->global_alpha, &plane_info->global_alpha_value);
5413
5414         return 0;
5415 }
5416
5417 static int fill_dc_plane_attributes(struct amdgpu_device *adev,
5418                                     struct dc_plane_state *dc_plane_state,
5419                                     struct drm_plane_state *plane_state,
5420                                     struct drm_crtc_state *crtc_state)
5421 {
5422         struct dm_crtc_state *dm_crtc_state = to_dm_crtc_state(crtc_state);
5423         struct amdgpu_framebuffer *afb = (struct amdgpu_framebuffer *)plane_state->fb;
5424         struct dc_scaling_info scaling_info;
5425         struct dc_plane_info plane_info;
5426         int ret;
5427         bool force_disable_dcc = false;
5428
5429         ret = fill_dc_scaling_info(plane_state, &scaling_info);
5430         if (ret)
5431                 return ret;
5432
5433         dc_plane_state->src_rect = scaling_info.src_rect;
5434         dc_plane_state->dst_rect = scaling_info.dst_rect;
5435         dc_plane_state->clip_rect = scaling_info.clip_rect;
5436         dc_plane_state->scaling_quality = scaling_info.scaling_quality;
5437
5438         force_disable_dcc = adev->asic_type == CHIP_RAVEN && adev->in_suspend;
5439         ret = fill_dc_plane_info_and_addr(adev, plane_state,
5440                                           afb->tiling_flags,
5441                                           &plane_info,
5442                                           &dc_plane_state->address,
5443                                           afb->tmz_surface,
5444                                           force_disable_dcc);
5445         if (ret)
5446                 return ret;
5447
5448         dc_plane_state->format = plane_info.format;
5449         dc_plane_state->color_space = plane_info.color_space;
5450         dc_plane_state->format = plane_info.format;
5451         dc_plane_state->plane_size = plane_info.plane_size;
5452         dc_plane_state->rotation = plane_info.rotation;
5453         dc_plane_state->horizontal_mirror = plane_info.horizontal_mirror;
5454         dc_plane_state->stereo_format = plane_info.stereo_format;
5455         dc_plane_state->tiling_info = plane_info.tiling_info;
5456         dc_plane_state->visible = plane_info.visible;
5457         dc_plane_state->per_pixel_alpha = plane_info.per_pixel_alpha;
5458         dc_plane_state->global_alpha = plane_info.global_alpha;
5459         dc_plane_state->global_alpha_value = plane_info.global_alpha_value;
5460         dc_plane_state->dcc = plane_info.dcc;
5461         dc_plane_state->layer_index = plane_info.layer_index; // Always returns 0
5462         dc_plane_state->flip_int_enabled = true;
5463
5464         /*
5465          * Always set input transfer function, since plane state is refreshed
5466          * every time.
5467          */
5468         ret = amdgpu_dm_update_plane_color_mgmt(dm_crtc_state, dc_plane_state);
5469         if (ret)
5470                 return ret;
5471
5472         return 0;
5473 }
5474
5475 static void update_stream_scaling_settings(const struct drm_display_mode *mode,
5476                                            const struct dm_connector_state *dm_state,
5477                                            struct dc_stream_state *stream)
5478 {
5479         enum amdgpu_rmx_type rmx_type;
5480
5481         struct rect src = { 0 }; /* viewport in composition space*/
5482         struct rect dst = { 0 }; /* stream addressable area */
5483
5484         /* no mode. nothing to be done */
5485         if (!mode)
5486                 return;
5487
5488         /* Full screen scaling by default */
5489         src.width = mode->hdisplay;
5490         src.height = mode->vdisplay;
5491         dst.width = stream->timing.h_addressable;
5492         dst.height = stream->timing.v_addressable;
5493
5494         if (dm_state) {
5495                 rmx_type = dm_state->scaling;
5496                 if (rmx_type == RMX_ASPECT || rmx_type == RMX_OFF) {
5497                         if (src.width * dst.height <
5498                                         src.height * dst.width) {
5499                                 /* height needs less upscaling/more downscaling */
5500                                 dst.width = src.width *
5501                                                 dst.height / src.height;
5502                         } else {
5503                                 /* width needs less upscaling/more downscaling */
5504                                 dst.height = src.height *
5505                                                 dst.width / src.width;
5506                         }
5507                 } else if (rmx_type == RMX_CENTER) {
5508                         dst = src;
5509                 }
5510
5511                 dst.x = (stream->timing.h_addressable - dst.width) / 2;
5512                 dst.y = (stream->timing.v_addressable - dst.height) / 2;
5513
5514                 if (dm_state->underscan_enable) {
5515                         dst.x += dm_state->underscan_hborder / 2;
5516                         dst.y += dm_state->underscan_vborder / 2;
5517                         dst.width -= dm_state->underscan_hborder;
5518                         dst.height -= dm_state->underscan_vborder;
5519                 }
5520         }
5521
5522         stream->src = src;
5523         stream->dst = dst;
5524
5525         DRM_DEBUG_KMS("Destination Rectangle x:%d  y:%d  width:%d  height:%d\n",
5526                       dst.x, dst.y, dst.width, dst.height);
5527
5528 }
5529
5530 static enum dc_color_depth
5531 convert_color_depth_from_display_info(const struct drm_connector *connector,
5532                                       bool is_y420, int requested_bpc)
5533 {
5534         uint8_t bpc;
5535
5536         if (is_y420) {
5537                 bpc = 8;
5538
5539                 /* Cap display bpc based on HDMI 2.0 HF-VSDB */
5540                 if (connector->display_info.hdmi.y420_dc_modes & DRM_EDID_YCBCR420_DC_48)
5541                         bpc = 16;
5542                 else if (connector->display_info.hdmi.y420_dc_modes & DRM_EDID_YCBCR420_DC_36)
5543                         bpc = 12;
5544                 else if (connector->display_info.hdmi.y420_dc_modes & DRM_EDID_YCBCR420_DC_30)
5545                         bpc = 10;
5546         } else {
5547                 bpc = (uint8_t)connector->display_info.bpc;
5548                 /* Assume 8 bpc by default if no bpc is specified. */
5549                 bpc = bpc ? bpc : 8;
5550         }
5551
5552         if (requested_bpc > 0) {
5553                 /*
5554                  * Cap display bpc based on the user requested value.
5555                  *
5556                  * The value for state->max_bpc may not correctly updated
5557                  * depending on when the connector gets added to the state
5558                  * or if this was called outside of atomic check, so it
5559                  * can't be used directly.
5560                  */
5561                 bpc = min_t(u8, bpc, requested_bpc);
5562
5563                 /* Round down to the nearest even number. */
5564                 bpc = bpc - (bpc & 1);
5565         }
5566
5567         switch (bpc) {
5568         case 0:
5569                 /*
5570                  * Temporary Work around, DRM doesn't parse color depth for
5571                  * EDID revision before 1.4
5572                  * TODO: Fix edid parsing
5573                  */
5574                 return COLOR_DEPTH_888;
5575         case 6:
5576                 return COLOR_DEPTH_666;
5577         case 8:
5578                 return COLOR_DEPTH_888;
5579         case 10:
5580                 return COLOR_DEPTH_101010;
5581         case 12:
5582                 return COLOR_DEPTH_121212;
5583         case 14:
5584                 return COLOR_DEPTH_141414;
5585         case 16:
5586                 return COLOR_DEPTH_161616;
5587         default:
5588                 return COLOR_DEPTH_UNDEFINED;
5589         }
5590 }
5591
5592 static enum dc_aspect_ratio
5593 get_aspect_ratio(const struct drm_display_mode *mode_in)
5594 {
5595         /* 1-1 mapping, since both enums follow the HDMI spec. */
5596         return (enum dc_aspect_ratio) mode_in->picture_aspect_ratio;
5597 }
5598
5599 static enum dc_color_space
5600 get_output_color_space(const struct dc_crtc_timing *dc_crtc_timing)
5601 {
5602         enum dc_color_space color_space = COLOR_SPACE_SRGB;
5603
5604         switch (dc_crtc_timing->pixel_encoding) {
5605         case PIXEL_ENCODING_YCBCR422:
5606         case PIXEL_ENCODING_YCBCR444:
5607         case PIXEL_ENCODING_YCBCR420:
5608         {
5609                 /*
5610                  * 27030khz is the separation point between HDTV and SDTV
5611                  * according to HDMI spec, we use YCbCr709 and YCbCr601
5612                  * respectively
5613                  */
5614                 if (dc_crtc_timing->pix_clk_100hz > 270300) {
5615                         if (dc_crtc_timing->flags.Y_ONLY)
5616                                 color_space =
5617                                         COLOR_SPACE_YCBCR709_LIMITED;
5618                         else
5619                                 color_space = COLOR_SPACE_YCBCR709;
5620                 } else {
5621                         if (dc_crtc_timing->flags.Y_ONLY)
5622                                 color_space =
5623                                         COLOR_SPACE_YCBCR601_LIMITED;
5624                         else
5625                                 color_space = COLOR_SPACE_YCBCR601;
5626                 }
5627
5628         }
5629         break;
5630         case PIXEL_ENCODING_RGB:
5631                 color_space = COLOR_SPACE_SRGB;
5632                 break;
5633
5634         default:
5635                 WARN_ON(1);
5636                 break;
5637         }
5638
5639         return color_space;
5640 }
5641
5642 static bool adjust_colour_depth_from_display_info(
5643         struct dc_crtc_timing *timing_out,
5644         const struct drm_display_info *info)
5645 {
5646         enum dc_color_depth depth = timing_out->display_color_depth;
5647         int normalized_clk;
5648         do {
5649                 normalized_clk = timing_out->pix_clk_100hz / 10;
5650                 /* YCbCr 4:2:0 requires additional adjustment of 1/2 */
5651                 if (timing_out->pixel_encoding == PIXEL_ENCODING_YCBCR420)
5652                         normalized_clk /= 2;
5653                 /* Adjusting pix clock following on HDMI spec based on colour depth */
5654                 switch (depth) {
5655                 case COLOR_DEPTH_888:
5656                         break;
5657                 case COLOR_DEPTH_101010:
5658                         normalized_clk = (normalized_clk * 30) / 24;
5659                         break;
5660                 case COLOR_DEPTH_121212:
5661                         normalized_clk = (normalized_clk * 36) / 24;
5662                         break;
5663                 case COLOR_DEPTH_161616:
5664                         normalized_clk = (normalized_clk * 48) / 24;
5665                         break;
5666                 default:
5667                         /* The above depths are the only ones valid for HDMI. */
5668                         return false;
5669                 }
5670                 if (normalized_clk <= info->max_tmds_clock) {
5671                         timing_out->display_color_depth = depth;
5672                         return true;
5673                 }
5674         } while (--depth > COLOR_DEPTH_666);
5675         return false;
5676 }
5677
5678 static void fill_stream_properties_from_drm_display_mode(
5679         struct dc_stream_state *stream,
5680         const struct drm_display_mode *mode_in,
5681         const struct drm_connector *connector,
5682         const struct drm_connector_state *connector_state,
5683         const struct dc_stream_state *old_stream,
5684         int requested_bpc)
5685 {
5686         struct dc_crtc_timing *timing_out = &stream->timing;
5687         const struct drm_display_info *info = &connector->display_info;
5688         struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
5689         struct hdmi_vendor_infoframe hv_frame;
5690         struct hdmi_avi_infoframe avi_frame;
5691
5692         memset(&hv_frame, 0, sizeof(hv_frame));
5693         memset(&avi_frame, 0, sizeof(avi_frame));
5694
5695         timing_out->h_border_left = 0;
5696         timing_out->h_border_right = 0;
5697         timing_out->v_border_top = 0;
5698         timing_out->v_border_bottom = 0;
5699         /* TODO: un-hardcode */
5700         if (drm_mode_is_420_only(info, mode_in)
5701                         && stream->signal == SIGNAL_TYPE_HDMI_TYPE_A)
5702                 timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR420;
5703         else if (drm_mode_is_420_also(info, mode_in)
5704                         && aconnector->force_yuv420_output)
5705                 timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR420;
5706         else if ((connector->display_info.color_formats & DRM_COLOR_FORMAT_YCRCB444)
5707                         && stream->signal == SIGNAL_TYPE_HDMI_TYPE_A)
5708                 timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR444;
5709         else
5710                 timing_out->pixel_encoding = PIXEL_ENCODING_RGB;
5711
5712         timing_out->timing_3d_format = TIMING_3D_FORMAT_NONE;
5713         timing_out->display_color_depth = convert_color_depth_from_display_info(
5714                 connector,
5715                 (timing_out->pixel_encoding == PIXEL_ENCODING_YCBCR420),
5716                 requested_bpc);
5717         timing_out->scan_type = SCANNING_TYPE_NODATA;
5718         timing_out->hdmi_vic = 0;
5719
5720         if(old_stream) {
5721                 timing_out->vic = old_stream->timing.vic;
5722                 timing_out->flags.HSYNC_POSITIVE_POLARITY = old_stream->timing.flags.HSYNC_POSITIVE_POLARITY;
5723                 timing_out->flags.VSYNC_POSITIVE_POLARITY = old_stream->timing.flags.VSYNC_POSITIVE_POLARITY;
5724         } else {
5725                 timing_out->vic = drm_match_cea_mode(mode_in);
5726                 if (mode_in->flags & DRM_MODE_FLAG_PHSYNC)
5727                         timing_out->flags.HSYNC_POSITIVE_POLARITY = 1;
5728                 if (mode_in->flags & DRM_MODE_FLAG_PVSYNC)
5729                         timing_out->flags.VSYNC_POSITIVE_POLARITY = 1;
5730         }
5731
5732         if (stream->signal == SIGNAL_TYPE_HDMI_TYPE_A) {
5733                 drm_hdmi_avi_infoframe_from_display_mode(&avi_frame, (struct drm_connector *)connector, mode_in);
5734                 timing_out->vic = avi_frame.video_code;
5735                 drm_hdmi_vendor_infoframe_from_display_mode(&hv_frame, (struct drm_connector *)connector, mode_in);
5736                 timing_out->hdmi_vic = hv_frame.vic;
5737         }
5738
5739         if (is_freesync_video_mode(mode_in, aconnector)) {
5740                 timing_out->h_addressable = mode_in->hdisplay;
5741                 timing_out->h_total = mode_in->htotal;
5742                 timing_out->h_sync_width = mode_in->hsync_end - mode_in->hsync_start;
5743                 timing_out->h_front_porch = mode_in->hsync_start - mode_in->hdisplay;
5744                 timing_out->v_total = mode_in->vtotal;
5745                 timing_out->v_addressable = mode_in->vdisplay;
5746                 timing_out->v_front_porch = mode_in->vsync_start - mode_in->vdisplay;
5747                 timing_out->v_sync_width = mode_in->vsync_end - mode_in->vsync_start;
5748                 timing_out->pix_clk_100hz = mode_in->clock * 10;
5749         } else {
5750                 timing_out->h_addressable = mode_in->crtc_hdisplay;
5751                 timing_out->h_total = mode_in->crtc_htotal;
5752                 timing_out->h_sync_width = mode_in->crtc_hsync_end - mode_in->crtc_hsync_start;
5753                 timing_out->h_front_porch = mode_in->crtc_hsync_start - mode_in->crtc_hdisplay;
5754                 timing_out->v_total = mode_in->crtc_vtotal;
5755                 timing_out->v_addressable = mode_in->crtc_vdisplay;
5756                 timing_out->v_front_porch = mode_in->crtc_vsync_start - mode_in->crtc_vdisplay;
5757                 timing_out->v_sync_width = mode_in->crtc_vsync_end - mode_in->crtc_vsync_start;
5758                 timing_out->pix_clk_100hz = mode_in->crtc_clock * 10;
5759         }
5760
5761         timing_out->aspect_ratio = get_aspect_ratio(mode_in);
5762
5763         stream->output_color_space = get_output_color_space(timing_out);
5764
5765         stream->out_transfer_func->type = TF_TYPE_PREDEFINED;
5766         stream->out_transfer_func->tf = TRANSFER_FUNCTION_SRGB;
5767         if (stream->signal == SIGNAL_TYPE_HDMI_TYPE_A) {
5768                 if (!adjust_colour_depth_from_display_info(timing_out, info) &&
5769                     drm_mode_is_420_also(info, mode_in) &&
5770                     timing_out->pixel_encoding != PIXEL_ENCODING_YCBCR420) {
5771                         timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR420;
5772                         adjust_colour_depth_from_display_info(timing_out, info);
5773                 }
5774         }
5775 }
5776
5777 static void fill_audio_info(struct audio_info *audio_info,
5778                             const struct drm_connector *drm_connector,
5779                             const struct dc_sink *dc_sink)
5780 {
5781         int i = 0;
5782         int cea_revision = 0;
5783         const struct dc_edid_caps *edid_caps = &dc_sink->edid_caps;
5784
5785         audio_info->manufacture_id = edid_caps->manufacturer_id;
5786         audio_info->product_id = edid_caps->product_id;
5787
5788         cea_revision = drm_connector->display_info.cea_rev;
5789
5790         strscpy(audio_info->display_name,
5791                 edid_caps->display_name,
5792                 AUDIO_INFO_DISPLAY_NAME_SIZE_IN_CHARS);
5793
5794         if (cea_revision >= 3) {
5795                 audio_info->mode_count = edid_caps->audio_mode_count;
5796
5797                 for (i = 0; i < audio_info->mode_count; ++i) {
5798                         audio_info->modes[i].format_code =
5799                                         (enum audio_format_code)
5800                                         (edid_caps->audio_modes[i].format_code);
5801                         audio_info->modes[i].channel_count =
5802                                         edid_caps->audio_modes[i].channel_count;
5803                         audio_info->modes[i].sample_rates.all =
5804                                         edid_caps->audio_modes[i].sample_rate;
5805                         audio_info->modes[i].sample_size =
5806                                         edid_caps->audio_modes[i].sample_size;
5807                 }
5808         }
5809
5810         audio_info->flags.all = edid_caps->speaker_flags;
5811
5812         /* TODO: We only check for the progressive mode, check for interlace mode too */
5813         if (drm_connector->latency_present[0]) {
5814                 audio_info->video_latency = drm_connector->video_latency[0];
5815                 audio_info->audio_latency = drm_connector->audio_latency[0];
5816         }
5817
5818         /* TODO: For DP, video and audio latency should be calculated from DPCD caps */
5819
5820 }
5821
5822 static void
5823 copy_crtc_timing_for_drm_display_mode(const struct drm_display_mode *src_mode,
5824                                       struct drm_display_mode *dst_mode)
5825 {
5826         dst_mode->crtc_hdisplay = src_mode->crtc_hdisplay;
5827         dst_mode->crtc_vdisplay = src_mode->crtc_vdisplay;
5828         dst_mode->crtc_clock = src_mode->crtc_clock;
5829         dst_mode->crtc_hblank_start = src_mode->crtc_hblank_start;
5830         dst_mode->crtc_hblank_end = src_mode->crtc_hblank_end;
5831         dst_mode->crtc_hsync_start =  src_mode->crtc_hsync_start;
5832         dst_mode->crtc_hsync_end = src_mode->crtc_hsync_end;
5833         dst_mode->crtc_htotal = src_mode->crtc_htotal;
5834         dst_mode->crtc_hskew = src_mode->crtc_hskew;
5835         dst_mode->crtc_vblank_start = src_mode->crtc_vblank_start;
5836         dst_mode->crtc_vblank_end = src_mode->crtc_vblank_end;
5837         dst_mode->crtc_vsync_start = src_mode->crtc_vsync_start;
5838         dst_mode->crtc_vsync_end = src_mode->crtc_vsync_end;
5839         dst_mode->crtc_vtotal = src_mode->crtc_vtotal;
5840 }
5841
5842 static void
5843 decide_crtc_timing_for_drm_display_mode(struct drm_display_mode *drm_mode,
5844                                         const struct drm_display_mode *native_mode,
5845                                         bool scale_enabled)
5846 {
5847         if (scale_enabled) {
5848                 copy_crtc_timing_for_drm_display_mode(native_mode, drm_mode);
5849         } else if (native_mode->clock == drm_mode->clock &&
5850                         native_mode->htotal == drm_mode->htotal &&
5851                         native_mode->vtotal == drm_mode->vtotal) {
5852                 copy_crtc_timing_for_drm_display_mode(native_mode, drm_mode);
5853         } else {
5854                 /* no scaling nor amdgpu inserted, no need to patch */
5855         }
5856 }
5857
5858 static struct dc_sink *
5859 create_fake_sink(struct amdgpu_dm_connector *aconnector)
5860 {
5861         struct dc_sink_init_data sink_init_data = { 0 };
5862         struct dc_sink *sink = NULL;
5863         sink_init_data.link = aconnector->dc_link;
5864         sink_init_data.sink_signal = aconnector->dc_link->connector_signal;
5865
5866         sink = dc_sink_create(&sink_init_data);
5867         if (!sink) {
5868                 DRM_ERROR("Failed to create sink!\n");
5869                 return NULL;
5870         }
5871         sink->sink_signal = SIGNAL_TYPE_VIRTUAL;
5872
5873         return sink;
5874 }
5875
5876 static void set_multisync_trigger_params(
5877                 struct dc_stream_state *stream)
5878 {
5879         struct dc_stream_state *master = NULL;
5880
5881         if (stream->triggered_crtc_reset.enabled) {
5882                 master = stream->triggered_crtc_reset.event_source;
5883                 stream->triggered_crtc_reset.event =
5884                         master->timing.flags.VSYNC_POSITIVE_POLARITY ?
5885                         CRTC_EVENT_VSYNC_RISING : CRTC_EVENT_VSYNC_FALLING;
5886                 stream->triggered_crtc_reset.delay = TRIGGER_DELAY_NEXT_PIXEL;
5887         }
5888 }
5889
5890 static void set_master_stream(struct dc_stream_state *stream_set[],
5891                               int stream_count)
5892 {
5893         int j, highest_rfr = 0, master_stream = 0;
5894
5895         for (j = 0;  j < stream_count; j++) {
5896                 if (stream_set[j] && stream_set[j]->triggered_crtc_reset.enabled) {
5897                         int refresh_rate = 0;
5898
5899                         refresh_rate = (stream_set[j]->timing.pix_clk_100hz*100)/
5900                                 (stream_set[j]->timing.h_total*stream_set[j]->timing.v_total);
5901                         if (refresh_rate > highest_rfr) {
5902                                 highest_rfr = refresh_rate;
5903                                 master_stream = j;
5904                         }
5905                 }
5906         }
5907         for (j = 0;  j < stream_count; j++) {
5908                 if (stream_set[j])
5909                         stream_set[j]->triggered_crtc_reset.event_source = stream_set[master_stream];
5910         }
5911 }
5912
5913 static void dm_enable_per_frame_crtc_master_sync(struct dc_state *context)
5914 {
5915         int i = 0;
5916         struct dc_stream_state *stream;
5917
5918         if (context->stream_count < 2)
5919                 return;
5920         for (i = 0; i < context->stream_count ; i++) {
5921                 if (!context->streams[i])
5922                         continue;
5923                 /*
5924                  * TODO: add a function to read AMD VSDB bits and set
5925                  * crtc_sync_master.multi_sync_enabled flag
5926                  * For now it's set to false
5927                  */
5928         }
5929
5930         set_master_stream(context->streams, context->stream_count);
5931
5932         for (i = 0; i < context->stream_count ; i++) {
5933                 stream = context->streams[i];
5934
5935                 if (!stream)
5936                         continue;
5937
5938                 set_multisync_trigger_params(stream);
5939         }
5940 }
5941
5942 #if defined(CONFIG_DRM_AMD_DC_DCN)
5943 static void update_dsc_caps(struct amdgpu_dm_connector *aconnector,
5944                                                         struct dc_sink *sink, struct dc_stream_state *stream,
5945                                                         struct dsc_dec_dpcd_caps *dsc_caps)
5946 {
5947         stream->timing.flags.DSC = 0;
5948
5949         if (aconnector->dc_link && sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT) {
5950                 dc_dsc_parse_dsc_dpcd(aconnector->dc_link->ctx->dc,
5951                                       aconnector->dc_link->dpcd_caps.dsc_caps.dsc_basic_caps.raw,
5952                                       aconnector->dc_link->dpcd_caps.dsc_caps.dsc_branch_decoder_caps.raw,
5953                                       dsc_caps);
5954         }
5955 }
5956
5957 static void apply_dsc_policy_for_stream(struct amdgpu_dm_connector *aconnector,
5958                                                                                 struct dc_sink *sink, struct dc_stream_state *stream,
5959                                                                                 struct dsc_dec_dpcd_caps *dsc_caps)
5960 {
5961         struct drm_connector *drm_connector = &aconnector->base;
5962         uint32_t link_bandwidth_kbps;
5963         uint32_t max_dsc_target_bpp_limit_override = 0;
5964
5965         link_bandwidth_kbps = dc_link_bandwidth_kbps(aconnector->dc_link,
5966                                                         dc_link_get_link_cap(aconnector->dc_link));
5967
5968         if (stream->link && stream->link->local_sink)
5969                 max_dsc_target_bpp_limit_override =
5970                         stream->link->local_sink->edid_caps.panel_patch.max_dsc_target_bpp_limit;
5971         
5972         /* Set DSC policy according to dsc_clock_en */
5973         dc_dsc_policy_set_enable_dsc_when_not_needed(
5974                 aconnector->dsc_settings.dsc_force_enable == DSC_CLK_FORCE_ENABLE);
5975
5976         if (aconnector->dc_link && sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT) {
5977
5978                 if (dc_dsc_compute_config(aconnector->dc_link->ctx->dc->res_pool->dscs[0],
5979                                                 dsc_caps,
5980                                                 aconnector->dc_link->ctx->dc->debug.dsc_min_slice_height_override,
5981                                                 max_dsc_target_bpp_limit_override,
5982                                                 link_bandwidth_kbps,
5983                                                 &stream->timing,
5984                                                 &stream->timing.dsc_cfg)) {
5985                         stream->timing.flags.DSC = 1;
5986                         DRM_DEBUG_DRIVER("%s: [%s] DSC is selected from SST RX\n", __func__, drm_connector->name);
5987                 }
5988         }
5989
5990         /* Overwrite the stream flag if DSC is enabled through debugfs */
5991         if (aconnector->dsc_settings.dsc_force_enable == DSC_CLK_FORCE_ENABLE)
5992                 stream->timing.flags.DSC = 1;
5993
5994         if (stream->timing.flags.DSC && aconnector->dsc_settings.dsc_num_slices_h)
5995                 stream->timing.dsc_cfg.num_slices_h = aconnector->dsc_settings.dsc_num_slices_h;
5996
5997         if (stream->timing.flags.DSC && aconnector->dsc_settings.dsc_num_slices_v)
5998                 stream->timing.dsc_cfg.num_slices_v = aconnector->dsc_settings.dsc_num_slices_v;
5999
6000         if (stream->timing.flags.DSC && aconnector->dsc_settings.dsc_bits_per_pixel)
6001                 stream->timing.dsc_cfg.bits_per_pixel = aconnector->dsc_settings.dsc_bits_per_pixel;
6002 }
6003 #endif
6004
6005 /**
6006  * DOC: FreeSync Video
6007  *
6008  * When a userspace application wants to play a video, the content follows a
6009  * standard format definition that usually specifies the FPS for that format.
6010  * The below list illustrates some video format and the expected FPS,
6011  * respectively:
6012  *
6013  * - TV/NTSC (23.976 FPS)
6014  * - Cinema (24 FPS)
6015  * - TV/PAL (25 FPS)
6016  * - TV/NTSC (29.97 FPS)
6017  * - TV/NTSC (30 FPS)
6018  * - Cinema HFR (48 FPS)
6019  * - TV/PAL (50 FPS)
6020  * - Commonly used (60 FPS)
6021  * - Multiples of 24 (48,72,96,120 FPS)
6022  *
6023  * The list of standards video format is not huge and can be added to the
6024  * connector modeset list beforehand. With that, userspace can leverage
6025  * FreeSync to extends the front porch in order to attain the target refresh
6026  * rate. Such a switch will happen seamlessly, without screen blanking or
6027  * reprogramming of the output in any other way. If the userspace requests a
6028  * modesetting change compatible with FreeSync modes that only differ in the
6029  * refresh rate, DC will skip the full update and avoid blink during the
6030  * transition. For example, the video player can change the modesetting from
6031  * 60Hz to 30Hz for playing TV/NTSC content when it goes full screen without
6032  * causing any display blink. This same concept can be applied to a mode
6033  * setting change.
6034  */
6035 static struct drm_display_mode *
6036 get_highest_refresh_rate_mode(struct amdgpu_dm_connector *aconnector,
6037                           bool use_probed_modes)
6038 {
6039         struct drm_display_mode *m, *m_pref = NULL;
6040         u16 current_refresh, highest_refresh;
6041         struct list_head *list_head = use_probed_modes ?
6042                                                     &aconnector->base.probed_modes :
6043                                                     &aconnector->base.modes;
6044
6045         if (aconnector->freesync_vid_base.clock != 0)
6046                 return &aconnector->freesync_vid_base;
6047
6048         /* Find the preferred mode */
6049         list_for_each_entry (m, list_head, head) {
6050                 if (m->type & DRM_MODE_TYPE_PREFERRED) {
6051                         m_pref = m;
6052                         break;
6053                 }
6054         }
6055
6056         if (!m_pref) {
6057                 /* Probably an EDID with no preferred mode. Fallback to first entry */
6058                 m_pref = list_first_entry_or_null(
6059                         &aconnector->base.modes, struct drm_display_mode, head);
6060                 if (!m_pref) {
6061                         DRM_DEBUG_DRIVER("No preferred mode found in EDID\n");
6062                         return NULL;
6063                 }
6064         }
6065
6066         highest_refresh = drm_mode_vrefresh(m_pref);
6067
6068         /*
6069          * Find the mode with highest refresh rate with same resolution.
6070          * For some monitors, preferred mode is not the mode with highest
6071          * supported refresh rate.
6072          */
6073         list_for_each_entry (m, list_head, head) {
6074                 current_refresh  = drm_mode_vrefresh(m);
6075
6076                 if (m->hdisplay == m_pref->hdisplay &&
6077                     m->vdisplay == m_pref->vdisplay &&
6078                     highest_refresh < current_refresh) {
6079                         highest_refresh = current_refresh;
6080                         m_pref = m;
6081                 }
6082         }
6083
6084         aconnector->freesync_vid_base = *m_pref;
6085         return m_pref;
6086 }
6087
6088 static bool is_freesync_video_mode(const struct drm_display_mode *mode,
6089                                    struct amdgpu_dm_connector *aconnector)
6090 {
6091         struct drm_display_mode *high_mode;
6092         int timing_diff;
6093
6094         high_mode = get_highest_refresh_rate_mode(aconnector, false);
6095         if (!high_mode || !mode)
6096                 return false;
6097
6098         timing_diff = high_mode->vtotal - mode->vtotal;
6099
6100         if (high_mode->clock == 0 || high_mode->clock != mode->clock ||
6101             high_mode->hdisplay != mode->hdisplay ||
6102             high_mode->vdisplay != mode->vdisplay ||
6103             high_mode->hsync_start != mode->hsync_start ||
6104             high_mode->hsync_end != mode->hsync_end ||
6105             high_mode->htotal != mode->htotal ||
6106             high_mode->hskew != mode->hskew ||
6107             high_mode->vscan != mode->vscan ||
6108             high_mode->vsync_start - mode->vsync_start != timing_diff ||
6109             high_mode->vsync_end - mode->vsync_end != timing_diff)
6110                 return false;
6111         else
6112                 return true;
6113 }
6114
6115 static struct dc_stream_state *
6116 create_stream_for_sink(struct amdgpu_dm_connector *aconnector,
6117                        const struct drm_display_mode *drm_mode,
6118                        const struct dm_connector_state *dm_state,
6119                        const struct dc_stream_state *old_stream,
6120                        int requested_bpc)
6121 {
6122         struct drm_display_mode *preferred_mode = NULL;
6123         struct drm_connector *drm_connector;
6124         const struct drm_connector_state *con_state =
6125                 dm_state ? &dm_state->base : NULL;
6126         struct dc_stream_state *stream = NULL;
6127         struct drm_display_mode mode = *drm_mode;
6128         struct drm_display_mode saved_mode;
6129         struct drm_display_mode *freesync_mode = NULL;
6130         bool native_mode_found = false;
6131         bool recalculate_timing = false;
6132         bool scale = dm_state ? (dm_state->scaling != RMX_OFF) : false;
6133         int mode_refresh;
6134         int preferred_refresh = 0;
6135 #if defined(CONFIG_DRM_AMD_DC_DCN)
6136         struct dsc_dec_dpcd_caps dsc_caps;
6137 #endif
6138         struct dc_sink *sink = NULL;
6139
6140         memset(&saved_mode, 0, sizeof(saved_mode));
6141
6142         if (aconnector == NULL) {
6143                 DRM_ERROR("aconnector is NULL!\n");
6144                 return stream;
6145         }
6146
6147         drm_connector = &aconnector->base;
6148
6149         if (!aconnector->dc_sink) {
6150                 sink = create_fake_sink(aconnector);
6151                 if (!sink)
6152                         return stream;
6153         } else {
6154                 sink = aconnector->dc_sink;
6155                 dc_sink_retain(sink);
6156         }
6157
6158         stream = dc_create_stream_for_sink(sink);
6159
6160         if (stream == NULL) {
6161                 DRM_ERROR("Failed to create stream for sink!\n");
6162                 goto finish;
6163         }
6164
6165         stream->dm_stream_context = aconnector;
6166
6167         stream->timing.flags.LTE_340MCSC_SCRAMBLE =
6168                 drm_connector->display_info.hdmi.scdc.scrambling.low_rates;
6169
6170         list_for_each_entry(preferred_mode, &aconnector->base.modes, head) {
6171                 /* Search for preferred mode */
6172                 if (preferred_mode->type & DRM_MODE_TYPE_PREFERRED) {
6173                         native_mode_found = true;
6174                         break;
6175                 }
6176         }
6177         if (!native_mode_found)
6178                 preferred_mode = list_first_entry_or_null(
6179                                 &aconnector->base.modes,
6180                                 struct drm_display_mode,
6181                                 head);
6182
6183         mode_refresh = drm_mode_vrefresh(&mode);
6184
6185         if (preferred_mode == NULL) {
6186                 /*
6187                  * This may not be an error, the use case is when we have no
6188                  * usermode calls to reset and set mode upon hotplug. In this
6189                  * case, we call set mode ourselves to restore the previous mode
6190                  * and the modelist may not be filled in in time.
6191                  */
6192                 DRM_DEBUG_DRIVER("No preferred mode found\n");
6193         } else {
6194                 recalculate_timing = amdgpu_freesync_vid_mode &&
6195                                  is_freesync_video_mode(&mode, aconnector);
6196                 if (recalculate_timing) {
6197                         freesync_mode = get_highest_refresh_rate_mode(aconnector, false);
6198                         saved_mode = mode;
6199                         mode = *freesync_mode;
6200                 } else {
6201                         decide_crtc_timing_for_drm_display_mode(
6202                                 &mode, preferred_mode, scale);
6203
6204                         preferred_refresh = drm_mode_vrefresh(preferred_mode);
6205                 }
6206         }
6207
6208         if (recalculate_timing)
6209                 drm_mode_set_crtcinfo(&saved_mode, 0);
6210         else if (!dm_state)
6211                 drm_mode_set_crtcinfo(&mode, 0);
6212
6213        /*
6214         * If scaling is enabled and refresh rate didn't change
6215         * we copy the vic and polarities of the old timings
6216         */
6217         if (!scale || mode_refresh != preferred_refresh)
6218                 fill_stream_properties_from_drm_display_mode(
6219                         stream, &mode, &aconnector->base, con_state, NULL,
6220                         requested_bpc);
6221         else
6222                 fill_stream_properties_from_drm_display_mode(
6223                         stream, &mode, &aconnector->base, con_state, old_stream,
6224                         requested_bpc);
6225
6226 #if defined(CONFIG_DRM_AMD_DC_DCN)
6227         /* SST DSC determination policy */
6228         update_dsc_caps(aconnector, sink, stream, &dsc_caps);
6229         if (aconnector->dsc_settings.dsc_force_enable != DSC_CLK_FORCE_DISABLE && dsc_caps.is_dsc_supported)
6230                 apply_dsc_policy_for_stream(aconnector, sink, stream, &dsc_caps);
6231 #endif
6232
6233         update_stream_scaling_settings(&mode, dm_state, stream);
6234
6235         fill_audio_info(
6236                 &stream->audio_info,
6237                 drm_connector,
6238                 sink);
6239
6240         update_stream_signal(stream, sink);
6241
6242         if (stream->signal == SIGNAL_TYPE_HDMI_TYPE_A)
6243                 mod_build_hf_vsif_infopacket(stream, &stream->vsp_infopacket);
6244
6245         if (stream->link->psr_settings.psr_feature_enabled) {
6246                 //
6247                 // should decide stream support vsc sdp colorimetry capability
6248                 // before building vsc info packet
6249                 //
6250                 stream->use_vsc_sdp_for_colorimetry = false;
6251                 if (aconnector->dc_sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT_MST) {
6252                         stream->use_vsc_sdp_for_colorimetry =
6253                                 aconnector->dc_sink->is_vsc_sdp_colorimetry_supported;
6254                 } else {
6255                         if (stream->link->dpcd_caps.dprx_feature.bits.VSC_SDP_COLORIMETRY_SUPPORTED)
6256                                 stream->use_vsc_sdp_for_colorimetry = true;
6257                 }
6258                 mod_build_vsc_infopacket(stream, &stream->vsc_infopacket);
6259                 aconnector->psr_skip_count = AMDGPU_DM_PSR_ENTRY_DELAY;
6260
6261         }
6262 finish:
6263         dc_sink_release(sink);
6264
6265         return stream;
6266 }
6267
6268 static void amdgpu_dm_crtc_destroy(struct drm_crtc *crtc)
6269 {
6270         drm_crtc_cleanup(crtc);
6271         kfree(crtc);
6272 }
6273
6274 static void dm_crtc_destroy_state(struct drm_crtc *crtc,
6275                                   struct drm_crtc_state *state)
6276 {
6277         struct dm_crtc_state *cur = to_dm_crtc_state(state);
6278
6279         /* TODO Destroy dc_stream objects are stream object is flattened */
6280         if (cur->stream)
6281                 dc_stream_release(cur->stream);
6282
6283
6284         __drm_atomic_helper_crtc_destroy_state(state);
6285
6286
6287         kfree(state);
6288 }
6289
6290 static void dm_crtc_reset_state(struct drm_crtc *crtc)
6291 {
6292         struct dm_crtc_state *state;
6293
6294         if (crtc->state)
6295                 dm_crtc_destroy_state(crtc, crtc->state);
6296
6297         state = kzalloc(sizeof(*state), GFP_KERNEL);
6298         if (WARN_ON(!state))
6299                 return;
6300
6301         __drm_atomic_helper_crtc_reset(crtc, &state->base);
6302 }
6303
6304 static struct drm_crtc_state *
6305 dm_crtc_duplicate_state(struct drm_crtc *crtc)
6306 {
6307         struct dm_crtc_state *state, *cur;
6308
6309         cur = to_dm_crtc_state(crtc->state);
6310
6311         if (WARN_ON(!crtc->state))
6312                 return NULL;
6313
6314         state = kzalloc(sizeof(*state), GFP_KERNEL);
6315         if (!state)
6316                 return NULL;
6317
6318         __drm_atomic_helper_crtc_duplicate_state(crtc, &state->base);
6319
6320         if (cur->stream) {
6321                 state->stream = cur->stream;
6322                 dc_stream_retain(state->stream);
6323         }
6324
6325         state->active_planes = cur->active_planes;
6326         state->vrr_infopacket = cur->vrr_infopacket;
6327         state->abm_level = cur->abm_level;
6328         state->vrr_supported = cur->vrr_supported;
6329         state->freesync_config = cur->freesync_config;
6330         state->cm_has_degamma = cur->cm_has_degamma;
6331         state->cm_is_degamma_srgb = cur->cm_is_degamma_srgb;
6332         state->force_dpms_off = cur->force_dpms_off;
6333         /* TODO Duplicate dc_stream after objects are stream object is flattened */
6334
6335         return &state->base;
6336 }
6337
6338 #ifdef CONFIG_DRM_AMD_SECURE_DISPLAY
6339 static int amdgpu_dm_crtc_late_register(struct drm_crtc *crtc)
6340 {
6341         crtc_debugfs_init(crtc);
6342
6343         return 0;
6344 }
6345 #endif
6346
6347 static inline int dm_set_vupdate_irq(struct drm_crtc *crtc, bool enable)
6348 {
6349         enum dc_irq_source irq_source;
6350         struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
6351         struct amdgpu_device *adev = drm_to_adev(crtc->dev);
6352         int rc;
6353
6354         irq_source = IRQ_TYPE_VUPDATE + acrtc->otg_inst;
6355
6356         rc = dc_interrupt_set(adev->dm.dc, irq_source, enable) ? 0 : -EBUSY;
6357
6358         DRM_DEBUG_VBL("crtc %d - vupdate irq %sabling: r=%d\n",
6359                       acrtc->crtc_id, enable ? "en" : "dis", rc);
6360         return rc;
6361 }
6362
6363 static inline int dm_set_vblank(struct drm_crtc *crtc, bool enable)
6364 {
6365         enum dc_irq_source irq_source;
6366         struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
6367         struct amdgpu_device *adev = drm_to_adev(crtc->dev);
6368         struct dm_crtc_state *acrtc_state = to_dm_crtc_state(crtc->state);
6369 #if defined(CONFIG_DRM_AMD_DC_DCN)
6370         struct amdgpu_display_manager *dm = &adev->dm;
6371         struct vblank_control_work *work;
6372 #endif
6373         int rc = 0;
6374
6375         if (enable) {
6376                 /* vblank irq on -> Only need vupdate irq in vrr mode */
6377                 if (amdgpu_dm_vrr_active(acrtc_state))
6378                         rc = dm_set_vupdate_irq(crtc, true);
6379         } else {
6380                 /* vblank irq off -> vupdate irq off */
6381                 rc = dm_set_vupdate_irq(crtc, false);
6382         }
6383
6384         if (rc)
6385                 return rc;
6386
6387         irq_source = IRQ_TYPE_VBLANK + acrtc->otg_inst;
6388
6389         if (!dc_interrupt_set(adev->dm.dc, irq_source, enable))
6390                 return -EBUSY;
6391
6392         if (amdgpu_in_reset(adev))
6393                 return 0;
6394
6395 #if defined(CONFIG_DRM_AMD_DC_DCN)
6396         if (dm->vblank_control_workqueue) {
6397                 work = kzalloc(sizeof(*work), GFP_ATOMIC);
6398                 if (!work)
6399                         return -ENOMEM;
6400
6401                 INIT_WORK(&work->work, vblank_control_worker);
6402                 work->dm = dm;
6403                 work->acrtc = acrtc;
6404                 work->enable = enable;
6405
6406                 if (acrtc_state->stream) {
6407                         dc_stream_retain(acrtc_state->stream);
6408                         work->stream = acrtc_state->stream;
6409                 }
6410
6411                 queue_work(dm->vblank_control_workqueue, &work->work);
6412         }
6413 #endif
6414
6415         return 0;
6416 }
6417
6418 static int dm_enable_vblank(struct drm_crtc *crtc)
6419 {
6420         return dm_set_vblank(crtc, true);
6421 }
6422
6423 static void dm_disable_vblank(struct drm_crtc *crtc)
6424 {
6425         dm_set_vblank(crtc, false);
6426 }
6427
6428 /* Implemented only the options currently availible for the driver */
6429 static const struct drm_crtc_funcs amdgpu_dm_crtc_funcs = {
6430         .reset = dm_crtc_reset_state,
6431         .destroy = amdgpu_dm_crtc_destroy,
6432         .set_config = drm_atomic_helper_set_config,
6433         .page_flip = drm_atomic_helper_page_flip,
6434         .atomic_duplicate_state = dm_crtc_duplicate_state,
6435         .atomic_destroy_state = dm_crtc_destroy_state,
6436         .set_crc_source = amdgpu_dm_crtc_set_crc_source,
6437         .verify_crc_source = amdgpu_dm_crtc_verify_crc_source,
6438         .get_crc_sources = amdgpu_dm_crtc_get_crc_sources,
6439         .get_vblank_counter = amdgpu_get_vblank_counter_kms,
6440         .enable_vblank = dm_enable_vblank,
6441         .disable_vblank = dm_disable_vblank,
6442         .get_vblank_timestamp = drm_crtc_vblank_helper_get_vblank_timestamp,
6443 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
6444         .late_register = amdgpu_dm_crtc_late_register,
6445 #endif
6446 };
6447
6448 static enum drm_connector_status
6449 amdgpu_dm_connector_detect(struct drm_connector *connector, bool force)
6450 {
6451         bool connected;
6452         struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
6453
6454         /*
6455          * Notes:
6456          * 1. This interface is NOT called in context of HPD irq.
6457          * 2. This interface *is called* in context of user-mode ioctl. Which
6458          * makes it a bad place for *any* MST-related activity.
6459          */
6460
6461         if (aconnector->base.force == DRM_FORCE_UNSPECIFIED &&
6462             !aconnector->fake_enable)
6463                 connected = (aconnector->dc_sink != NULL);
6464         else
6465                 connected = (aconnector->base.force == DRM_FORCE_ON);
6466
6467         update_subconnector_property(aconnector);
6468
6469         return (connected ? connector_status_connected :
6470                         connector_status_disconnected);
6471 }
6472
6473 int amdgpu_dm_connector_atomic_set_property(struct drm_connector *connector,
6474                                             struct drm_connector_state *connector_state,
6475                                             struct drm_property *property,
6476                                             uint64_t val)
6477 {
6478         struct drm_device *dev = connector->dev;
6479         struct amdgpu_device *adev = drm_to_adev(dev);
6480         struct dm_connector_state *dm_old_state =
6481                 to_dm_connector_state(connector->state);
6482         struct dm_connector_state *dm_new_state =
6483                 to_dm_connector_state(connector_state);
6484
6485         int ret = -EINVAL;
6486
6487         if (property == dev->mode_config.scaling_mode_property) {
6488                 enum amdgpu_rmx_type rmx_type;
6489
6490                 switch (val) {
6491                 case DRM_MODE_SCALE_CENTER:
6492                         rmx_type = RMX_CENTER;
6493                         break;
6494                 case DRM_MODE_SCALE_ASPECT:
6495                         rmx_type = RMX_ASPECT;
6496                         break;
6497                 case DRM_MODE_SCALE_FULLSCREEN:
6498                         rmx_type = RMX_FULL;
6499                         break;
6500                 case DRM_MODE_SCALE_NONE:
6501                 default:
6502                         rmx_type = RMX_OFF;
6503                         break;
6504                 }
6505
6506                 if (dm_old_state->scaling == rmx_type)
6507                         return 0;
6508
6509                 dm_new_state->scaling = rmx_type;
6510                 ret = 0;
6511         } else if (property == adev->mode_info.underscan_hborder_property) {
6512                 dm_new_state->underscan_hborder = val;
6513                 ret = 0;
6514         } else if (property == adev->mode_info.underscan_vborder_property) {
6515                 dm_new_state->underscan_vborder = val;
6516                 ret = 0;
6517         } else if (property == adev->mode_info.underscan_property) {
6518                 dm_new_state->underscan_enable = val;
6519                 ret = 0;
6520         } else if (property == adev->mode_info.abm_level_property) {
6521                 dm_new_state->abm_level = val;
6522                 ret = 0;
6523         }
6524
6525         return ret;
6526 }
6527
6528 int amdgpu_dm_connector_atomic_get_property(struct drm_connector *connector,
6529                                             const struct drm_connector_state *state,
6530                                             struct drm_property *property,
6531                                             uint64_t *val)
6532 {
6533         struct drm_device *dev = connector->dev;
6534         struct amdgpu_device *adev = drm_to_adev(dev);
6535         struct dm_connector_state *dm_state =
6536                 to_dm_connector_state(state);
6537         int ret = -EINVAL;
6538
6539         if (property == dev->mode_config.scaling_mode_property) {
6540                 switch (dm_state->scaling) {
6541                 case RMX_CENTER:
6542                         *val = DRM_MODE_SCALE_CENTER;
6543                         break;
6544                 case RMX_ASPECT:
6545                         *val = DRM_MODE_SCALE_ASPECT;
6546                         break;
6547                 case RMX_FULL:
6548                         *val = DRM_MODE_SCALE_FULLSCREEN;
6549                         break;
6550                 case RMX_OFF:
6551                 default:
6552                         *val = DRM_MODE_SCALE_NONE;
6553                         break;
6554                 }
6555                 ret = 0;
6556         } else if (property == adev->mode_info.underscan_hborder_property) {
6557                 *val = dm_state->underscan_hborder;
6558                 ret = 0;
6559         } else if (property == adev->mode_info.underscan_vborder_property) {
6560                 *val = dm_state->underscan_vborder;
6561                 ret = 0;
6562         } else if (property == adev->mode_info.underscan_property) {
6563                 *val = dm_state->underscan_enable;
6564                 ret = 0;
6565         } else if (property == adev->mode_info.abm_level_property) {
6566                 *val = dm_state->abm_level;
6567                 ret = 0;
6568         }
6569
6570         return ret;
6571 }
6572
6573 static void amdgpu_dm_connector_unregister(struct drm_connector *connector)
6574 {
6575         struct amdgpu_dm_connector *amdgpu_dm_connector = to_amdgpu_dm_connector(connector);
6576
6577         drm_dp_aux_unregister(&amdgpu_dm_connector->dm_dp_aux.aux);
6578 }
6579
6580 static void amdgpu_dm_connector_destroy(struct drm_connector *connector)
6581 {
6582         struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
6583         const struct dc_link *link = aconnector->dc_link;
6584         struct amdgpu_device *adev = drm_to_adev(connector->dev);
6585         struct amdgpu_display_manager *dm = &adev->dm;
6586         int i;
6587
6588         /*
6589          * Call only if mst_mgr was iniitalized before since it's not done
6590          * for all connector types.
6591          */
6592         if (aconnector->mst_mgr.dev)
6593                 drm_dp_mst_topology_mgr_destroy(&aconnector->mst_mgr);
6594
6595 #if defined(CONFIG_BACKLIGHT_CLASS_DEVICE) ||\
6596         defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE)
6597         for (i = 0; i < dm->num_of_edps; i++) {
6598                 if ((link == dm->backlight_link[i]) && dm->backlight_dev[i]) {
6599                         backlight_device_unregister(dm->backlight_dev[i]);
6600                         dm->backlight_dev[i] = NULL;
6601                 }
6602         }
6603 #endif
6604
6605         if (aconnector->dc_em_sink)
6606                 dc_sink_release(aconnector->dc_em_sink);
6607         aconnector->dc_em_sink = NULL;
6608         if (aconnector->dc_sink)
6609                 dc_sink_release(aconnector->dc_sink);
6610         aconnector->dc_sink = NULL;
6611
6612         drm_dp_cec_unregister_connector(&aconnector->dm_dp_aux.aux);
6613         drm_connector_unregister(connector);
6614         drm_connector_cleanup(connector);
6615         if (aconnector->i2c) {
6616                 i2c_del_adapter(&aconnector->i2c->base);
6617                 kfree(aconnector->i2c);
6618         }
6619         kfree(aconnector->dm_dp_aux.aux.name);
6620
6621         kfree(connector);
6622 }
6623
6624 void amdgpu_dm_connector_funcs_reset(struct drm_connector *connector)
6625 {
6626         struct dm_connector_state *state =
6627                 to_dm_connector_state(connector->state);
6628
6629         if (connector->state)
6630                 __drm_atomic_helper_connector_destroy_state(connector->state);
6631
6632         kfree(state);
6633
6634         state = kzalloc(sizeof(*state), GFP_KERNEL);
6635
6636         if (state) {
6637                 state->scaling = RMX_OFF;
6638                 state->underscan_enable = false;
6639                 state->underscan_hborder = 0;
6640                 state->underscan_vborder = 0;
6641                 state->base.max_requested_bpc = 8;
6642                 state->vcpi_slots = 0;
6643                 state->pbn = 0;
6644                 if (connector->connector_type == DRM_MODE_CONNECTOR_eDP)
6645                         state->abm_level = amdgpu_dm_abm_level;
6646
6647                 __drm_atomic_helper_connector_reset(connector, &state->base);
6648         }
6649 }
6650
6651 struct drm_connector_state *
6652 amdgpu_dm_connector_atomic_duplicate_state(struct drm_connector *connector)
6653 {
6654         struct dm_connector_state *state =
6655                 to_dm_connector_state(connector->state);
6656
6657         struct dm_connector_state *new_state =
6658                         kmemdup(state, sizeof(*state), GFP_KERNEL);
6659
6660         if (!new_state)
6661                 return NULL;
6662
6663         __drm_atomic_helper_connector_duplicate_state(connector, &new_state->base);
6664
6665         new_state->freesync_capable = state->freesync_capable;
6666         new_state->abm_level = state->abm_level;
6667         new_state->scaling = state->scaling;
6668         new_state->underscan_enable = state->underscan_enable;
6669         new_state->underscan_hborder = state->underscan_hborder;
6670         new_state->underscan_vborder = state->underscan_vborder;
6671         new_state->vcpi_slots = state->vcpi_slots;
6672         new_state->pbn = state->pbn;
6673         return &new_state->base;
6674 }
6675
6676 static int
6677 amdgpu_dm_connector_late_register(struct drm_connector *connector)
6678 {
6679         struct amdgpu_dm_connector *amdgpu_dm_connector =
6680                 to_amdgpu_dm_connector(connector);
6681         int r;
6682
6683         if ((connector->connector_type == DRM_MODE_CONNECTOR_DisplayPort) ||
6684             (connector->connector_type == DRM_MODE_CONNECTOR_eDP)) {
6685                 amdgpu_dm_connector->dm_dp_aux.aux.dev = connector->kdev;
6686                 r = drm_dp_aux_register(&amdgpu_dm_connector->dm_dp_aux.aux);
6687                 if (r)
6688                         return r;
6689         }
6690
6691 #if defined(CONFIG_DEBUG_FS)
6692         connector_debugfs_init(amdgpu_dm_connector);
6693 #endif
6694
6695         return 0;
6696 }
6697
6698 static const struct drm_connector_funcs amdgpu_dm_connector_funcs = {
6699         .reset = amdgpu_dm_connector_funcs_reset,
6700         .detect = amdgpu_dm_connector_detect,
6701         .fill_modes = drm_helper_probe_single_connector_modes,
6702         .destroy = amdgpu_dm_connector_destroy,
6703         .atomic_duplicate_state = amdgpu_dm_connector_atomic_duplicate_state,
6704         .atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
6705         .atomic_set_property = amdgpu_dm_connector_atomic_set_property,
6706         .atomic_get_property = amdgpu_dm_connector_atomic_get_property,
6707         .late_register = amdgpu_dm_connector_late_register,
6708         .early_unregister = amdgpu_dm_connector_unregister
6709 };
6710
6711 static int get_modes(struct drm_connector *connector)
6712 {
6713         return amdgpu_dm_connector_get_modes(connector);
6714 }
6715
6716 static void create_eml_sink(struct amdgpu_dm_connector *aconnector)
6717 {
6718         struct dc_sink_init_data init_params = {
6719                         .link = aconnector->dc_link,
6720                         .sink_signal = SIGNAL_TYPE_VIRTUAL
6721         };
6722         struct edid *edid;
6723
6724         if (!aconnector->base.edid_blob_ptr) {
6725                 DRM_ERROR("No EDID firmware found on connector: %s ,forcing to OFF!\n",
6726                                 aconnector->base.name);
6727
6728                 aconnector->base.force = DRM_FORCE_OFF;
6729                 aconnector->base.override_edid = false;
6730                 return;
6731         }
6732
6733         edid = (struct edid *) aconnector->base.edid_blob_ptr->data;
6734
6735         aconnector->edid = edid;
6736
6737         aconnector->dc_em_sink = dc_link_add_remote_sink(
6738                 aconnector->dc_link,
6739                 (uint8_t *)edid,
6740                 (edid->extensions + 1) * EDID_LENGTH,
6741                 &init_params);
6742
6743         if (aconnector->base.force == DRM_FORCE_ON) {
6744                 aconnector->dc_sink = aconnector->dc_link->local_sink ?
6745                 aconnector->dc_link->local_sink :
6746                 aconnector->dc_em_sink;
6747                 dc_sink_retain(aconnector->dc_sink);
6748         }
6749 }
6750
6751 static void handle_edid_mgmt(struct amdgpu_dm_connector *aconnector)
6752 {
6753         struct dc_link *link = (struct dc_link *)aconnector->dc_link;
6754
6755         /*
6756          * In case of headless boot with force on for DP managed connector
6757          * Those settings have to be != 0 to get initial modeset
6758          */
6759         if (link->connector_signal == SIGNAL_TYPE_DISPLAY_PORT) {
6760                 link->verified_link_cap.lane_count = LANE_COUNT_FOUR;
6761                 link->verified_link_cap.link_rate = LINK_RATE_HIGH2;
6762         }
6763
6764
6765         aconnector->base.override_edid = true;
6766         create_eml_sink(aconnector);
6767 }
6768
6769 static struct dc_stream_state *
6770 create_validate_stream_for_sink(struct amdgpu_dm_connector *aconnector,
6771                                 const struct drm_display_mode *drm_mode,
6772                                 const struct dm_connector_state *dm_state,
6773                                 const struct dc_stream_state *old_stream)
6774 {
6775         struct drm_connector *connector = &aconnector->base;
6776         struct amdgpu_device *adev = drm_to_adev(connector->dev);
6777         struct dc_stream_state *stream;
6778         const struct drm_connector_state *drm_state = dm_state ? &dm_state->base : NULL;
6779         int requested_bpc = drm_state ? drm_state->max_requested_bpc : 8;
6780         enum dc_status dc_result = DC_OK;
6781
6782         do {
6783                 stream = create_stream_for_sink(aconnector, drm_mode,
6784                                                 dm_state, old_stream,
6785                                                 requested_bpc);
6786                 if (stream == NULL) {
6787                         DRM_ERROR("Failed to create stream for sink!\n");
6788                         break;
6789                 }
6790
6791                 dc_result = dc_validate_stream(adev->dm.dc, stream);
6792
6793                 if (dc_result != DC_OK) {
6794                         DRM_DEBUG_KMS("Mode %dx%d (clk %d) failed DC validation with error %d (%s)\n",
6795                                       drm_mode->hdisplay,
6796                                       drm_mode->vdisplay,
6797                                       drm_mode->clock,
6798                                       dc_result,
6799                                       dc_status_to_str(dc_result));
6800
6801                         dc_stream_release(stream);
6802                         stream = NULL;
6803                         requested_bpc -= 2; /* lower bpc to retry validation */
6804                 }
6805
6806         } while (stream == NULL && requested_bpc >= 6);
6807
6808         if (dc_result == DC_FAIL_ENC_VALIDATE && !aconnector->force_yuv420_output) {
6809                 DRM_DEBUG_KMS("Retry forcing YCbCr420 encoding\n");
6810
6811                 aconnector->force_yuv420_output = true;
6812                 stream = create_validate_stream_for_sink(aconnector, drm_mode,
6813                                                 dm_state, old_stream);
6814                 aconnector->force_yuv420_output = false;
6815         }
6816
6817         return stream;
6818 }
6819
6820 enum drm_mode_status amdgpu_dm_connector_mode_valid(struct drm_connector *connector,
6821                                    struct drm_display_mode *mode)
6822 {
6823         int result = MODE_ERROR;
6824         struct dc_sink *dc_sink;
6825         /* TODO: Unhardcode stream count */
6826         struct dc_stream_state *stream;
6827         struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
6828
6829         if ((mode->flags & DRM_MODE_FLAG_INTERLACE) ||
6830                         (mode->flags & DRM_MODE_FLAG_DBLSCAN))
6831                 return result;
6832
6833         /*
6834          * Only run this the first time mode_valid is called to initilialize
6835          * EDID mgmt
6836          */
6837         if (aconnector->base.force != DRM_FORCE_UNSPECIFIED &&
6838                 !aconnector->dc_em_sink)
6839                 handle_edid_mgmt(aconnector);
6840
6841         dc_sink = to_amdgpu_dm_connector(connector)->dc_sink;
6842
6843         if (dc_sink == NULL && aconnector->base.force != DRM_FORCE_ON_DIGITAL &&
6844                                 aconnector->base.force != DRM_FORCE_ON) {
6845                 DRM_ERROR("dc_sink is NULL!\n");
6846                 goto fail;
6847         }
6848
6849         stream = create_validate_stream_for_sink(aconnector, mode, NULL, NULL);
6850         if (stream) {
6851                 dc_stream_release(stream);
6852                 result = MODE_OK;
6853         }
6854
6855 fail:
6856         /* TODO: error handling*/
6857         return result;
6858 }
6859
6860 static int fill_hdr_info_packet(const struct drm_connector_state *state,
6861                                 struct dc_info_packet *out)
6862 {
6863         struct hdmi_drm_infoframe frame;
6864         unsigned char buf[30]; /* 26 + 4 */
6865         ssize_t len;
6866         int ret, i;
6867
6868         memset(out, 0, sizeof(*out));
6869
6870         if (!state->hdr_output_metadata)
6871                 return 0;
6872
6873         ret = drm_hdmi_infoframe_set_hdr_metadata(&frame, state);
6874         if (ret)
6875                 return ret;
6876
6877         len = hdmi_drm_infoframe_pack_only(&frame, buf, sizeof(buf));
6878         if (len < 0)
6879                 return (int)len;
6880
6881         /* Static metadata is a fixed 26 bytes + 4 byte header. */
6882         if (len != 30)
6883                 return -EINVAL;
6884
6885         /* Prepare the infopacket for DC. */
6886         switch (state->connector->connector_type) {
6887         case DRM_MODE_CONNECTOR_HDMIA:
6888                 out->hb0 = 0x87; /* type */
6889                 out->hb1 = 0x01; /* version */
6890                 out->hb2 = 0x1A; /* length */
6891                 out->sb[0] = buf[3]; /* checksum */
6892                 i = 1;
6893                 break;
6894
6895         case DRM_MODE_CONNECTOR_DisplayPort:
6896         case DRM_MODE_CONNECTOR_eDP:
6897                 out->hb0 = 0x00; /* sdp id, zero */
6898                 out->hb1 = 0x87; /* type */
6899                 out->hb2 = 0x1D; /* payload len - 1 */
6900                 out->hb3 = (0x13 << 2); /* sdp version */
6901                 out->sb[0] = 0x01; /* version */
6902                 out->sb[1] = 0x1A; /* length */
6903                 i = 2;
6904                 break;
6905
6906         default:
6907                 return -EINVAL;
6908         }
6909
6910         memcpy(&out->sb[i], &buf[4], 26);
6911         out->valid = true;
6912
6913         print_hex_dump(KERN_DEBUG, "HDR SB:", DUMP_PREFIX_NONE, 16, 1, out->sb,
6914                        sizeof(out->sb), false);
6915
6916         return 0;
6917 }
6918
6919 static int
6920 amdgpu_dm_connector_atomic_check(struct drm_connector *conn,
6921                                  struct drm_atomic_state *state)
6922 {
6923         struct drm_connector_state *new_con_state =
6924                 drm_atomic_get_new_connector_state(state, conn);
6925         struct drm_connector_state *old_con_state =
6926                 drm_atomic_get_old_connector_state(state, conn);
6927         struct drm_crtc *crtc = new_con_state->crtc;
6928         struct drm_crtc_state *new_crtc_state;
6929         int ret;
6930
6931         trace_amdgpu_dm_connector_atomic_check(new_con_state);
6932
6933         if (!crtc)
6934                 return 0;
6935
6936         if (!drm_connector_atomic_hdr_metadata_equal(old_con_state, new_con_state)) {
6937                 struct dc_info_packet hdr_infopacket;
6938
6939                 ret = fill_hdr_info_packet(new_con_state, &hdr_infopacket);
6940                 if (ret)
6941                         return ret;
6942
6943                 new_crtc_state = drm_atomic_get_crtc_state(state, crtc);
6944                 if (IS_ERR(new_crtc_state))
6945                         return PTR_ERR(new_crtc_state);
6946
6947                 /*
6948                  * DC considers the stream backends changed if the
6949                  * static metadata changes. Forcing the modeset also
6950                  * gives a simple way for userspace to switch from
6951                  * 8bpc to 10bpc when setting the metadata to enter
6952                  * or exit HDR.
6953                  *
6954                  * Changing the static metadata after it's been
6955                  * set is permissible, however. So only force a
6956                  * modeset if we're entering or exiting HDR.
6957                  */
6958                 new_crtc_state->mode_changed =
6959                         !old_con_state->hdr_output_metadata ||
6960                         !new_con_state->hdr_output_metadata;
6961         }
6962
6963         return 0;
6964 }
6965
6966 static const struct drm_connector_helper_funcs
6967 amdgpu_dm_connector_helper_funcs = {
6968         /*
6969          * If hotplugging a second bigger display in FB Con mode, bigger resolution
6970          * modes will be filtered by drm_mode_validate_size(), and those modes
6971          * are missing after user start lightdm. So we need to renew modes list.
6972          * in get_modes call back, not just return the modes count
6973          */
6974         .get_modes = get_modes,
6975         .mode_valid = amdgpu_dm_connector_mode_valid,
6976         .atomic_check = amdgpu_dm_connector_atomic_check,
6977 };
6978
6979 static void dm_crtc_helper_disable(struct drm_crtc *crtc)
6980 {
6981 }
6982
6983 static int count_crtc_active_planes(struct drm_crtc_state *new_crtc_state)
6984 {
6985         struct drm_atomic_state *state = new_crtc_state->state;
6986         struct drm_plane *plane;
6987         int num_active = 0;
6988
6989         drm_for_each_plane_mask(plane, state->dev, new_crtc_state->plane_mask) {
6990                 struct drm_plane_state *new_plane_state;
6991
6992                 /* Cursor planes are "fake". */
6993                 if (plane->type == DRM_PLANE_TYPE_CURSOR)
6994                         continue;
6995
6996                 new_plane_state = drm_atomic_get_new_plane_state(state, plane);
6997
6998                 if (!new_plane_state) {
6999                         /*
7000                          * The plane is enable on the CRTC and hasn't changed
7001                          * state. This means that it previously passed
7002                          * validation and is therefore enabled.
7003                          */
7004                         num_active += 1;
7005                         continue;
7006                 }
7007
7008                 /* We need a framebuffer to be considered enabled. */
7009                 num_active += (new_plane_state->fb != NULL);
7010         }
7011
7012         return num_active;
7013 }
7014
7015 static void dm_update_crtc_active_planes(struct drm_crtc *crtc,
7016                                          struct drm_crtc_state *new_crtc_state)
7017 {
7018         struct dm_crtc_state *dm_new_crtc_state =
7019                 to_dm_crtc_state(new_crtc_state);
7020
7021         dm_new_crtc_state->active_planes = 0;
7022
7023         if (!dm_new_crtc_state->stream)
7024                 return;
7025
7026         dm_new_crtc_state->active_planes =
7027                 count_crtc_active_planes(new_crtc_state);
7028 }
7029
7030 static int dm_crtc_helper_atomic_check(struct drm_crtc *crtc,
7031                                        struct drm_atomic_state *state)
7032 {
7033         struct drm_crtc_state *crtc_state = drm_atomic_get_new_crtc_state(state,
7034                                                                           crtc);
7035         struct amdgpu_device *adev = drm_to_adev(crtc->dev);
7036         struct dc *dc = adev->dm.dc;
7037         struct dm_crtc_state *dm_crtc_state = to_dm_crtc_state(crtc_state);
7038         int ret = -EINVAL;
7039
7040         trace_amdgpu_dm_crtc_atomic_check(crtc_state);
7041
7042         dm_update_crtc_active_planes(crtc, crtc_state);
7043
7044         if (WARN_ON(unlikely(!dm_crtc_state->stream &&
7045                      modeset_required(crtc_state, NULL, dm_crtc_state->stream)))) {
7046                 return ret;
7047         }
7048
7049         /*
7050          * We require the primary plane to be enabled whenever the CRTC is, otherwise
7051          * drm_mode_cursor_universal may end up trying to enable the cursor plane while all other
7052          * planes are disabled, which is not supported by the hardware. And there is legacy
7053          * userspace which stops using the HW cursor altogether in response to the resulting EINVAL.
7054          */
7055         if (crtc_state->enable &&
7056             !(crtc_state->plane_mask & drm_plane_mask(crtc->primary))) {
7057                 DRM_DEBUG_ATOMIC("Can't enable a CRTC without enabling the primary plane\n");
7058                 return -EINVAL;
7059         }
7060
7061         /* In some use cases, like reset, no stream is attached */
7062         if (!dm_crtc_state->stream)
7063                 return 0;
7064
7065         if (dc_validate_stream(dc, dm_crtc_state->stream) == DC_OK)
7066                 return 0;
7067
7068         DRM_DEBUG_ATOMIC("Failed DC stream validation\n");
7069         return ret;
7070 }
7071
7072 static bool dm_crtc_helper_mode_fixup(struct drm_crtc *crtc,
7073                                       const struct drm_display_mode *mode,
7074                                       struct drm_display_mode *adjusted_mode)
7075 {
7076         return true;
7077 }
7078
7079 static const struct drm_crtc_helper_funcs amdgpu_dm_crtc_helper_funcs = {
7080         .disable = dm_crtc_helper_disable,
7081         .atomic_check = dm_crtc_helper_atomic_check,
7082         .mode_fixup = dm_crtc_helper_mode_fixup,
7083         .get_scanout_position = amdgpu_crtc_get_scanout_position,
7084 };
7085
7086 static void dm_encoder_helper_disable(struct drm_encoder *encoder)
7087 {
7088
7089 }
7090
7091 static int convert_dc_color_depth_into_bpc (enum dc_color_depth display_color_depth)
7092 {
7093         switch (display_color_depth) {
7094                 case COLOR_DEPTH_666:
7095                         return 6;
7096                 case COLOR_DEPTH_888:
7097                         return 8;
7098                 case COLOR_DEPTH_101010:
7099                         return 10;
7100                 case COLOR_DEPTH_121212:
7101                         return 12;
7102                 case COLOR_DEPTH_141414:
7103                         return 14;
7104                 case COLOR_DEPTH_161616:
7105                         return 16;
7106                 default:
7107                         break;
7108                 }
7109         return 0;
7110 }
7111
7112 static int dm_encoder_helper_atomic_check(struct drm_encoder *encoder,
7113                                           struct drm_crtc_state *crtc_state,
7114                                           struct drm_connector_state *conn_state)
7115 {
7116         struct drm_atomic_state *state = crtc_state->state;
7117         struct drm_connector *connector = conn_state->connector;
7118         struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
7119         struct dm_connector_state *dm_new_connector_state = to_dm_connector_state(conn_state);
7120         const struct drm_display_mode *adjusted_mode = &crtc_state->adjusted_mode;
7121         struct drm_dp_mst_topology_mgr *mst_mgr;
7122         struct drm_dp_mst_port *mst_port;
7123         enum dc_color_depth color_depth;
7124         int clock, bpp = 0;
7125         bool is_y420 = false;
7126
7127         if (!aconnector->port || !aconnector->dc_sink)
7128                 return 0;
7129
7130         mst_port = aconnector->port;
7131         mst_mgr = &aconnector->mst_port->mst_mgr;
7132
7133         if (!crtc_state->connectors_changed && !crtc_state->mode_changed)
7134                 return 0;
7135
7136         if (!state->duplicated) {
7137                 int max_bpc = conn_state->max_requested_bpc;
7138                 is_y420 = drm_mode_is_420_also(&connector->display_info, adjusted_mode) &&
7139                                 aconnector->force_yuv420_output;
7140                 color_depth = convert_color_depth_from_display_info(connector,
7141                                                                     is_y420,
7142                                                                     max_bpc);
7143                 bpp = convert_dc_color_depth_into_bpc(color_depth) * 3;
7144                 clock = adjusted_mode->clock;
7145                 dm_new_connector_state->pbn = drm_dp_calc_pbn_mode(clock, bpp, false);
7146         }
7147         dm_new_connector_state->vcpi_slots = drm_dp_atomic_find_vcpi_slots(state,
7148                                                                            mst_mgr,
7149                                                                            mst_port,
7150                                                                            dm_new_connector_state->pbn,
7151                                                                            dm_mst_get_pbn_divider(aconnector->dc_link));
7152         if (dm_new_connector_state->vcpi_slots < 0) {
7153                 DRM_DEBUG_ATOMIC("failed finding vcpi slots: %d\n", (int)dm_new_connector_state->vcpi_slots);
7154                 return dm_new_connector_state->vcpi_slots;
7155         }
7156         return 0;
7157 }
7158
7159 const struct drm_encoder_helper_funcs amdgpu_dm_encoder_helper_funcs = {
7160         .disable = dm_encoder_helper_disable,
7161         .atomic_check = dm_encoder_helper_atomic_check
7162 };
7163
7164 #if defined(CONFIG_DRM_AMD_DC_DCN)
7165 static int dm_update_mst_vcpi_slots_for_dsc(struct drm_atomic_state *state,
7166                                             struct dc_state *dc_state,
7167                                             struct dsc_mst_fairness_vars *vars)
7168 {
7169         struct dc_stream_state *stream = NULL;
7170         struct drm_connector *connector;
7171         struct drm_connector_state *new_con_state;
7172         struct amdgpu_dm_connector *aconnector;
7173         struct dm_connector_state *dm_conn_state;
7174         int i, j, clock;
7175         int vcpi, pbn_div, pbn = 0;
7176
7177         for_each_new_connector_in_state(state, connector, new_con_state, i) {
7178
7179                 aconnector = to_amdgpu_dm_connector(connector);
7180
7181                 if (!aconnector->port)
7182                         continue;
7183
7184                 if (!new_con_state || !new_con_state->crtc)
7185                         continue;
7186
7187                 dm_conn_state = to_dm_connector_state(new_con_state);
7188
7189                 for (j = 0; j < dc_state->stream_count; j++) {
7190                         stream = dc_state->streams[j];
7191                         if (!stream)
7192                                 continue;
7193
7194                         if ((struct amdgpu_dm_connector*)stream->dm_stream_context == aconnector)
7195                                 break;
7196
7197                         stream = NULL;
7198                 }
7199
7200                 if (!stream)
7201                         continue;
7202
7203                 if (stream->timing.flags.DSC != 1) {
7204                         drm_dp_mst_atomic_enable_dsc(state,
7205                                                      aconnector->port,
7206                                                      dm_conn_state->pbn,
7207                                                      0,
7208                                                      false);
7209                         continue;
7210                 }
7211
7212                 pbn_div = dm_mst_get_pbn_divider(stream->link);
7213                 clock = stream->timing.pix_clk_100hz / 10;
7214                 /* pbn is calculated by compute_mst_dsc_configs_for_state*/
7215                 for (j = 0; j < dc_state->stream_count; j++) {
7216                         if (vars[j].aconnector == aconnector) {
7217                                 pbn = vars[j].pbn;
7218                                 break;
7219                         }
7220                 }
7221
7222                 vcpi = drm_dp_mst_atomic_enable_dsc(state,
7223                                                     aconnector->port,
7224                                                     pbn, pbn_div,
7225                                                     true);
7226                 if (vcpi < 0)
7227                         return vcpi;
7228
7229                 dm_conn_state->pbn = pbn;
7230                 dm_conn_state->vcpi_slots = vcpi;
7231         }
7232         return 0;
7233 }
7234 #endif
7235
7236 static void dm_drm_plane_reset(struct drm_plane *plane)
7237 {
7238         struct dm_plane_state *amdgpu_state = NULL;
7239
7240         if (plane->state)
7241                 plane->funcs->atomic_destroy_state(plane, plane->state);
7242
7243         amdgpu_state = kzalloc(sizeof(*amdgpu_state), GFP_KERNEL);
7244         WARN_ON(amdgpu_state == NULL);
7245
7246         if (amdgpu_state)
7247                 __drm_atomic_helper_plane_reset(plane, &amdgpu_state->base);
7248 }
7249
7250 static struct drm_plane_state *
7251 dm_drm_plane_duplicate_state(struct drm_plane *plane)
7252 {
7253         struct dm_plane_state *dm_plane_state, *old_dm_plane_state;
7254
7255         old_dm_plane_state = to_dm_plane_state(plane->state);
7256         dm_plane_state = kzalloc(sizeof(*dm_plane_state), GFP_KERNEL);
7257         if (!dm_plane_state)
7258                 return NULL;
7259
7260         __drm_atomic_helper_plane_duplicate_state(plane, &dm_plane_state->base);
7261
7262         if (old_dm_plane_state->dc_state) {
7263                 dm_plane_state->dc_state = old_dm_plane_state->dc_state;
7264                 dc_plane_state_retain(dm_plane_state->dc_state);
7265         }
7266
7267         return &dm_plane_state->base;
7268 }
7269
7270 static void dm_drm_plane_destroy_state(struct drm_plane *plane,
7271                                 struct drm_plane_state *state)
7272 {
7273         struct dm_plane_state *dm_plane_state = to_dm_plane_state(state);
7274
7275         if (dm_plane_state->dc_state)
7276                 dc_plane_state_release(dm_plane_state->dc_state);
7277
7278         drm_atomic_helper_plane_destroy_state(plane, state);
7279 }
7280
7281 static const struct drm_plane_funcs dm_plane_funcs = {
7282         .update_plane   = drm_atomic_helper_update_plane,
7283         .disable_plane  = drm_atomic_helper_disable_plane,
7284         .destroy        = drm_primary_helper_destroy,
7285         .reset = dm_drm_plane_reset,
7286         .atomic_duplicate_state = dm_drm_plane_duplicate_state,
7287         .atomic_destroy_state = dm_drm_plane_destroy_state,
7288         .format_mod_supported = dm_plane_format_mod_supported,
7289 };
7290
7291 static int dm_plane_helper_prepare_fb(struct drm_plane *plane,
7292                                       struct drm_plane_state *new_state)
7293 {
7294         struct amdgpu_framebuffer *afb;
7295         struct drm_gem_object *obj;
7296         struct amdgpu_device *adev;
7297         struct amdgpu_bo *rbo;
7298         struct dm_plane_state *dm_plane_state_new, *dm_plane_state_old;
7299         struct list_head list;
7300         struct ttm_validate_buffer tv;
7301         struct ww_acquire_ctx ticket;
7302         uint32_t domain;
7303         int r;
7304
7305         if (!new_state->fb) {
7306                 DRM_DEBUG_KMS("No FB bound\n");
7307                 return 0;
7308         }
7309
7310         afb = to_amdgpu_framebuffer(new_state->fb);
7311         obj = new_state->fb->obj[0];
7312         rbo = gem_to_amdgpu_bo(obj);
7313         adev = amdgpu_ttm_adev(rbo->tbo.bdev);
7314         INIT_LIST_HEAD(&list);
7315
7316         tv.bo = &rbo->tbo;
7317         tv.num_shared = 1;
7318         list_add(&tv.head, &list);
7319
7320         r = ttm_eu_reserve_buffers(&ticket, &list, false, NULL);
7321         if (r) {
7322                 dev_err(adev->dev, "fail to reserve bo (%d)\n", r);
7323                 return r;
7324         }
7325
7326         if (plane->type != DRM_PLANE_TYPE_CURSOR)
7327                 domain = amdgpu_display_supported_domains(adev, rbo->flags);
7328         else
7329                 domain = AMDGPU_GEM_DOMAIN_VRAM;
7330
7331         r = amdgpu_bo_pin(rbo, domain);
7332         if (unlikely(r != 0)) {
7333                 if (r != -ERESTARTSYS)
7334                         DRM_ERROR("Failed to pin framebuffer with error %d\n", r);
7335                 ttm_eu_backoff_reservation(&ticket, &list);
7336                 return r;
7337         }
7338
7339         r = amdgpu_ttm_alloc_gart(&rbo->tbo);
7340         if (unlikely(r != 0)) {
7341                 amdgpu_bo_unpin(rbo);
7342                 ttm_eu_backoff_reservation(&ticket, &list);
7343                 DRM_ERROR("%p bind failed\n", rbo);
7344                 return r;
7345         }
7346
7347         ttm_eu_backoff_reservation(&ticket, &list);
7348
7349         afb->address = amdgpu_bo_gpu_offset(rbo);
7350
7351         amdgpu_bo_ref(rbo);
7352
7353         /**
7354          * We don't do surface updates on planes that have been newly created,
7355          * but we also don't have the afb->address during atomic check.
7356          *
7357          * Fill in buffer attributes depending on the address here, but only on
7358          * newly created planes since they're not being used by DC yet and this
7359          * won't modify global state.
7360          */
7361         dm_plane_state_old = to_dm_plane_state(plane->state);
7362         dm_plane_state_new = to_dm_plane_state(new_state);
7363
7364         if (dm_plane_state_new->dc_state &&
7365             dm_plane_state_old->dc_state != dm_plane_state_new->dc_state) {
7366                 struct dc_plane_state *plane_state =
7367                         dm_plane_state_new->dc_state;
7368                 bool force_disable_dcc = !plane_state->dcc.enable;
7369
7370                 fill_plane_buffer_attributes(
7371                         adev, afb, plane_state->format, plane_state->rotation,
7372                         afb->tiling_flags,
7373                         &plane_state->tiling_info, &plane_state->plane_size,
7374                         &plane_state->dcc, &plane_state->address,
7375                         afb->tmz_surface, force_disable_dcc);
7376         }
7377
7378         return 0;
7379 }
7380
7381 static void dm_plane_helper_cleanup_fb(struct drm_plane *plane,
7382                                        struct drm_plane_state *old_state)
7383 {
7384         struct amdgpu_bo *rbo;
7385         int r;
7386
7387         if (!old_state->fb)
7388                 return;
7389
7390         rbo = gem_to_amdgpu_bo(old_state->fb->obj[0]);
7391         r = amdgpu_bo_reserve(rbo, false);
7392         if (unlikely(r)) {
7393                 DRM_ERROR("failed to reserve rbo before unpin\n");
7394                 return;
7395         }
7396
7397         amdgpu_bo_unpin(rbo);
7398         amdgpu_bo_unreserve(rbo);
7399         amdgpu_bo_unref(&rbo);
7400 }
7401
7402 static int dm_plane_helper_check_state(struct drm_plane_state *state,
7403                                        struct drm_crtc_state *new_crtc_state)
7404 {
7405         struct drm_framebuffer *fb = state->fb;
7406         int min_downscale, max_upscale;
7407         int min_scale = 0;
7408         int max_scale = INT_MAX;
7409
7410         /* Plane enabled? Validate viewport and get scaling factors from plane caps. */
7411         if (fb && state->crtc) {
7412                 /* Validate viewport to cover the case when only the position changes */
7413                 if (state->plane->type != DRM_PLANE_TYPE_CURSOR) {
7414                         int viewport_width = state->crtc_w;
7415                         int viewport_height = state->crtc_h;
7416
7417                         if (state->crtc_x < 0)
7418                                 viewport_width += state->crtc_x;
7419                         else if (state->crtc_x + state->crtc_w > new_crtc_state->mode.crtc_hdisplay)
7420                                 viewport_width = new_crtc_state->mode.crtc_hdisplay - state->crtc_x;
7421
7422                         if (state->crtc_y < 0)
7423                                 viewport_height += state->crtc_y;
7424                         else if (state->crtc_y + state->crtc_h > new_crtc_state->mode.crtc_vdisplay)
7425                                 viewport_height = new_crtc_state->mode.crtc_vdisplay - state->crtc_y;
7426
7427                         if (viewport_width < 0 || viewport_height < 0) {
7428                                 DRM_DEBUG_ATOMIC("Plane completely outside of screen\n");
7429                                 return -EINVAL;
7430                         } else if (viewport_width < MIN_VIEWPORT_SIZE*2) { /* x2 for width is because of pipe-split. */
7431                                 DRM_DEBUG_ATOMIC("Viewport width %d smaller than %d\n", viewport_width, MIN_VIEWPORT_SIZE*2);
7432                                 return -EINVAL;
7433                         } else if (viewport_height < MIN_VIEWPORT_SIZE) {
7434                                 DRM_DEBUG_ATOMIC("Viewport height %d smaller than %d\n", viewport_height, MIN_VIEWPORT_SIZE);
7435                                 return -EINVAL;
7436                         }
7437
7438                 }
7439
7440                 /* Get min/max allowed scaling factors from plane caps. */
7441                 get_min_max_dc_plane_scaling(state->crtc->dev, fb,
7442                                              &min_downscale, &max_upscale);
7443                 /*
7444                  * Convert to drm convention: 16.16 fixed point, instead of dc's
7445                  * 1.0 == 1000. Also drm scaling is src/dst instead of dc's
7446                  * dst/src, so min_scale = 1.0 / max_upscale, etc.
7447                  */
7448                 min_scale = (1000 << 16) / max_upscale;
7449                 max_scale = (1000 << 16) / min_downscale;
7450         }
7451
7452         return drm_atomic_helper_check_plane_state(
7453                 state, new_crtc_state, min_scale, max_scale, true, true);
7454 }
7455
7456 static int dm_plane_atomic_check(struct drm_plane *plane,
7457                                  struct drm_atomic_state *state)
7458 {
7459         struct drm_plane_state *new_plane_state = drm_atomic_get_new_plane_state(state,
7460                                                                                  plane);
7461         struct amdgpu_device *adev = drm_to_adev(plane->dev);
7462         struct dc *dc = adev->dm.dc;
7463         struct dm_plane_state *dm_plane_state;
7464         struct dc_scaling_info scaling_info;
7465         struct drm_crtc_state *new_crtc_state;
7466         int ret;
7467
7468         trace_amdgpu_dm_plane_atomic_check(new_plane_state);
7469
7470         dm_plane_state = to_dm_plane_state(new_plane_state);
7471
7472         if (!dm_plane_state->dc_state)
7473                 return 0;
7474
7475         new_crtc_state =
7476                 drm_atomic_get_new_crtc_state(state,
7477                                               new_plane_state->crtc);
7478         if (!new_crtc_state)
7479                 return -EINVAL;
7480
7481         ret = dm_plane_helper_check_state(new_plane_state, new_crtc_state);
7482         if (ret)
7483                 return ret;
7484
7485         ret = fill_dc_scaling_info(new_plane_state, &scaling_info);
7486         if (ret)
7487                 return ret;
7488
7489         if (dc_validate_plane(dc, dm_plane_state->dc_state) == DC_OK)
7490                 return 0;
7491
7492         return -EINVAL;
7493 }
7494
7495 static int dm_plane_atomic_async_check(struct drm_plane *plane,
7496                                        struct drm_atomic_state *state)
7497 {
7498         /* Only support async updates on cursor planes. */
7499         if (plane->type != DRM_PLANE_TYPE_CURSOR)
7500                 return -EINVAL;
7501
7502         return 0;
7503 }
7504
7505 static void dm_plane_atomic_async_update(struct drm_plane *plane,
7506                                          struct drm_atomic_state *state)
7507 {
7508         struct drm_plane_state *new_state = drm_atomic_get_new_plane_state(state,
7509                                                                            plane);
7510         struct drm_plane_state *old_state =
7511                 drm_atomic_get_old_plane_state(state, plane);
7512
7513         trace_amdgpu_dm_atomic_update_cursor(new_state);
7514
7515         swap(plane->state->fb, new_state->fb);
7516
7517         plane->state->src_x = new_state->src_x;
7518         plane->state->src_y = new_state->src_y;
7519         plane->state->src_w = new_state->src_w;
7520         plane->state->src_h = new_state->src_h;
7521         plane->state->crtc_x = new_state->crtc_x;
7522         plane->state->crtc_y = new_state->crtc_y;
7523         plane->state->crtc_w = new_state->crtc_w;
7524         plane->state->crtc_h = new_state->crtc_h;
7525
7526         handle_cursor_update(plane, old_state);
7527 }
7528
7529 static const struct drm_plane_helper_funcs dm_plane_helper_funcs = {
7530         .prepare_fb = dm_plane_helper_prepare_fb,
7531         .cleanup_fb = dm_plane_helper_cleanup_fb,
7532         .atomic_check = dm_plane_atomic_check,
7533         .atomic_async_check = dm_plane_atomic_async_check,
7534         .atomic_async_update = dm_plane_atomic_async_update
7535 };
7536
7537 /*
7538  * TODO: these are currently initialized to rgb formats only.
7539  * For future use cases we should either initialize them dynamically based on
7540  * plane capabilities, or initialize this array to all formats, so internal drm
7541  * check will succeed, and let DC implement proper check
7542  */
7543 static const uint32_t rgb_formats[] = {
7544         DRM_FORMAT_XRGB8888,
7545         DRM_FORMAT_ARGB8888,
7546         DRM_FORMAT_RGBA8888,
7547         DRM_FORMAT_XRGB2101010,
7548         DRM_FORMAT_XBGR2101010,
7549         DRM_FORMAT_ARGB2101010,
7550         DRM_FORMAT_ABGR2101010,
7551         DRM_FORMAT_XRGB16161616,
7552         DRM_FORMAT_XBGR16161616,
7553         DRM_FORMAT_ARGB16161616,
7554         DRM_FORMAT_ABGR16161616,
7555         DRM_FORMAT_XBGR8888,
7556         DRM_FORMAT_ABGR8888,
7557         DRM_FORMAT_RGB565,
7558 };
7559
7560 static const uint32_t overlay_formats[] = {
7561         DRM_FORMAT_XRGB8888,
7562         DRM_FORMAT_ARGB8888,
7563         DRM_FORMAT_RGBA8888,
7564         DRM_FORMAT_XBGR8888,
7565         DRM_FORMAT_ABGR8888,
7566         DRM_FORMAT_RGB565
7567 };
7568
7569 static const u32 cursor_formats[] = {
7570         DRM_FORMAT_ARGB8888
7571 };
7572
7573 static int get_plane_formats(const struct drm_plane *plane,
7574                              const struct dc_plane_cap *plane_cap,
7575                              uint32_t *formats, int max_formats)
7576 {
7577         int i, num_formats = 0;
7578
7579         /*
7580          * TODO: Query support for each group of formats directly from
7581          * DC plane caps. This will require adding more formats to the
7582          * caps list.
7583          */
7584
7585         switch (plane->type) {
7586         case DRM_PLANE_TYPE_PRIMARY:
7587                 for (i = 0; i < ARRAY_SIZE(rgb_formats); ++i) {
7588                         if (num_formats >= max_formats)
7589                                 break;
7590
7591                         formats[num_formats++] = rgb_formats[i];
7592                 }
7593
7594                 if (plane_cap && plane_cap->pixel_format_support.nv12)
7595                         formats[num_formats++] = DRM_FORMAT_NV12;
7596                 if (plane_cap && plane_cap->pixel_format_support.p010)
7597                         formats[num_formats++] = DRM_FORMAT_P010;
7598                 if (plane_cap && plane_cap->pixel_format_support.fp16) {
7599                         formats[num_formats++] = DRM_FORMAT_XRGB16161616F;
7600                         formats[num_formats++] = DRM_FORMAT_ARGB16161616F;
7601                         formats[num_formats++] = DRM_FORMAT_XBGR16161616F;
7602                         formats[num_formats++] = DRM_FORMAT_ABGR16161616F;
7603                 }
7604                 break;
7605
7606         case DRM_PLANE_TYPE_OVERLAY:
7607                 for (i = 0; i < ARRAY_SIZE(overlay_formats); ++i) {
7608                         if (num_formats >= max_formats)
7609                                 break;
7610
7611                         formats[num_formats++] = overlay_formats[i];
7612                 }
7613                 break;
7614
7615         case DRM_PLANE_TYPE_CURSOR:
7616                 for (i = 0; i < ARRAY_SIZE(cursor_formats); ++i) {
7617                         if (num_formats >= max_formats)
7618                                 break;
7619
7620                         formats[num_formats++] = cursor_formats[i];
7621                 }
7622                 break;
7623         }
7624
7625         return num_formats;
7626 }
7627
7628 static int amdgpu_dm_plane_init(struct amdgpu_display_manager *dm,
7629                                 struct drm_plane *plane,
7630                                 unsigned long possible_crtcs,
7631                                 const struct dc_plane_cap *plane_cap)
7632 {
7633         uint32_t formats[32];
7634         int num_formats;
7635         int res = -EPERM;
7636         unsigned int supported_rotations;
7637         uint64_t *modifiers = NULL;
7638
7639         num_formats = get_plane_formats(plane, plane_cap, formats,
7640                                         ARRAY_SIZE(formats));
7641
7642         res = get_plane_modifiers(dm->adev, plane->type, &modifiers);
7643         if (res)
7644                 return res;
7645
7646         res = drm_universal_plane_init(adev_to_drm(dm->adev), plane, possible_crtcs,
7647                                        &dm_plane_funcs, formats, num_formats,
7648                                        modifiers, plane->type, NULL);
7649         kfree(modifiers);
7650         if (res)
7651                 return res;
7652
7653         if (plane->type == DRM_PLANE_TYPE_OVERLAY &&
7654             plane_cap && plane_cap->per_pixel_alpha) {
7655                 unsigned int blend_caps = BIT(DRM_MODE_BLEND_PIXEL_NONE) |
7656                                           BIT(DRM_MODE_BLEND_PREMULTI);
7657
7658                 drm_plane_create_alpha_property(plane);
7659                 drm_plane_create_blend_mode_property(plane, blend_caps);
7660         }
7661
7662         if (plane->type == DRM_PLANE_TYPE_PRIMARY &&
7663             plane_cap &&
7664             (plane_cap->pixel_format_support.nv12 ||
7665              plane_cap->pixel_format_support.p010)) {
7666                 /* This only affects YUV formats. */
7667                 drm_plane_create_color_properties(
7668                         plane,
7669                         BIT(DRM_COLOR_YCBCR_BT601) |
7670                         BIT(DRM_COLOR_YCBCR_BT709) |
7671                         BIT(DRM_COLOR_YCBCR_BT2020),
7672                         BIT(DRM_COLOR_YCBCR_LIMITED_RANGE) |
7673                         BIT(DRM_COLOR_YCBCR_FULL_RANGE),
7674                         DRM_COLOR_YCBCR_BT709, DRM_COLOR_YCBCR_LIMITED_RANGE);
7675         }
7676
7677         supported_rotations =
7678                 DRM_MODE_ROTATE_0 | DRM_MODE_ROTATE_90 |
7679                 DRM_MODE_ROTATE_180 | DRM_MODE_ROTATE_270;
7680
7681         if (dm->adev->asic_type >= CHIP_BONAIRE &&
7682             plane->type != DRM_PLANE_TYPE_CURSOR)
7683                 drm_plane_create_rotation_property(plane, DRM_MODE_ROTATE_0,
7684                                                    supported_rotations);
7685
7686         drm_plane_helper_add(plane, &dm_plane_helper_funcs);
7687
7688         /* Create (reset) the plane state */
7689         if (plane->funcs->reset)
7690                 plane->funcs->reset(plane);
7691
7692         return 0;
7693 }
7694
7695 static int amdgpu_dm_crtc_init(struct amdgpu_display_manager *dm,
7696                                struct drm_plane *plane,
7697                                uint32_t crtc_index)
7698 {
7699         struct amdgpu_crtc *acrtc = NULL;
7700         struct drm_plane *cursor_plane;
7701
7702         int res = -ENOMEM;
7703
7704         cursor_plane = kzalloc(sizeof(*cursor_plane), GFP_KERNEL);
7705         if (!cursor_plane)
7706                 goto fail;
7707
7708         cursor_plane->type = DRM_PLANE_TYPE_CURSOR;
7709         res = amdgpu_dm_plane_init(dm, cursor_plane, 0, NULL);
7710
7711         acrtc = kzalloc(sizeof(struct amdgpu_crtc), GFP_KERNEL);
7712         if (!acrtc)
7713                 goto fail;
7714
7715         res = drm_crtc_init_with_planes(
7716                         dm->ddev,
7717                         &acrtc->base,
7718                         plane,
7719                         cursor_plane,
7720                         &amdgpu_dm_crtc_funcs, NULL);
7721
7722         if (res)
7723                 goto fail;
7724
7725         drm_crtc_helper_add(&acrtc->base, &amdgpu_dm_crtc_helper_funcs);
7726
7727         /* Create (reset) the plane state */
7728         if (acrtc->base.funcs->reset)
7729                 acrtc->base.funcs->reset(&acrtc->base);
7730
7731         acrtc->max_cursor_width = dm->adev->dm.dc->caps.max_cursor_size;
7732         acrtc->max_cursor_height = dm->adev->dm.dc->caps.max_cursor_size;
7733
7734         acrtc->crtc_id = crtc_index;
7735         acrtc->base.enabled = false;
7736         acrtc->otg_inst = -1;
7737
7738         dm->adev->mode_info.crtcs[crtc_index] = acrtc;
7739         drm_crtc_enable_color_mgmt(&acrtc->base, MAX_COLOR_LUT_ENTRIES,
7740                                    true, MAX_COLOR_LUT_ENTRIES);
7741         drm_mode_crtc_set_gamma_size(&acrtc->base, MAX_COLOR_LEGACY_LUT_ENTRIES);
7742
7743         return 0;
7744
7745 fail:
7746         kfree(acrtc);
7747         kfree(cursor_plane);
7748         return res;
7749 }
7750
7751
7752 static int to_drm_connector_type(enum signal_type st)
7753 {
7754         switch (st) {
7755         case SIGNAL_TYPE_HDMI_TYPE_A:
7756                 return DRM_MODE_CONNECTOR_HDMIA;
7757         case SIGNAL_TYPE_EDP:
7758                 return DRM_MODE_CONNECTOR_eDP;
7759         case SIGNAL_TYPE_LVDS:
7760                 return DRM_MODE_CONNECTOR_LVDS;
7761         case SIGNAL_TYPE_RGB:
7762                 return DRM_MODE_CONNECTOR_VGA;
7763         case SIGNAL_TYPE_DISPLAY_PORT:
7764         case SIGNAL_TYPE_DISPLAY_PORT_MST:
7765                 return DRM_MODE_CONNECTOR_DisplayPort;
7766         case SIGNAL_TYPE_DVI_DUAL_LINK:
7767         case SIGNAL_TYPE_DVI_SINGLE_LINK:
7768                 return DRM_MODE_CONNECTOR_DVID;
7769         case SIGNAL_TYPE_VIRTUAL:
7770                 return DRM_MODE_CONNECTOR_VIRTUAL;
7771
7772         default:
7773                 return DRM_MODE_CONNECTOR_Unknown;
7774         }
7775 }
7776
7777 static struct drm_encoder *amdgpu_dm_connector_to_encoder(struct drm_connector *connector)
7778 {
7779         struct drm_encoder *encoder;
7780
7781         /* There is only one encoder per connector */
7782         drm_connector_for_each_possible_encoder(connector, encoder)
7783                 return encoder;
7784
7785         return NULL;
7786 }
7787
7788 static void amdgpu_dm_get_native_mode(struct drm_connector *connector)
7789 {
7790         struct drm_encoder *encoder;
7791         struct amdgpu_encoder *amdgpu_encoder;
7792
7793         encoder = amdgpu_dm_connector_to_encoder(connector);
7794
7795         if (encoder == NULL)
7796                 return;
7797
7798         amdgpu_encoder = to_amdgpu_encoder(encoder);
7799
7800         amdgpu_encoder->native_mode.clock = 0;
7801
7802         if (!list_empty(&connector->probed_modes)) {
7803                 struct drm_display_mode *preferred_mode = NULL;
7804
7805                 list_for_each_entry(preferred_mode,
7806                                     &connector->probed_modes,
7807                                     head) {
7808                         if (preferred_mode->type & DRM_MODE_TYPE_PREFERRED)
7809                                 amdgpu_encoder->native_mode = *preferred_mode;
7810
7811                         break;
7812                 }
7813
7814         }
7815 }
7816
7817 static struct drm_display_mode *
7818 amdgpu_dm_create_common_mode(struct drm_encoder *encoder,
7819                              char *name,
7820                              int hdisplay, int vdisplay)
7821 {
7822         struct drm_device *dev = encoder->dev;
7823         struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
7824         struct drm_display_mode *mode = NULL;
7825         struct drm_display_mode *native_mode = &amdgpu_encoder->native_mode;
7826
7827         mode = drm_mode_duplicate(dev, native_mode);
7828
7829         if (mode == NULL)
7830                 return NULL;
7831
7832         mode->hdisplay = hdisplay;
7833         mode->vdisplay = vdisplay;
7834         mode->type &= ~DRM_MODE_TYPE_PREFERRED;
7835         strscpy(mode->name, name, DRM_DISPLAY_MODE_LEN);
7836
7837         return mode;
7838
7839 }
7840
7841 static void amdgpu_dm_connector_add_common_modes(struct drm_encoder *encoder,
7842                                                  struct drm_connector *connector)
7843 {
7844         struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
7845         struct drm_display_mode *mode = NULL;
7846         struct drm_display_mode *native_mode = &amdgpu_encoder->native_mode;
7847         struct amdgpu_dm_connector *amdgpu_dm_connector =
7848                                 to_amdgpu_dm_connector(connector);
7849         int i;
7850         int n;
7851         struct mode_size {
7852                 char name[DRM_DISPLAY_MODE_LEN];
7853                 int w;
7854                 int h;
7855         } common_modes[] = {
7856                 {  "640x480",  640,  480},
7857                 {  "800x600",  800,  600},
7858                 { "1024x768", 1024,  768},
7859                 { "1280x720", 1280,  720},
7860                 { "1280x800", 1280,  800},
7861                 {"1280x1024", 1280, 1024},
7862                 { "1440x900", 1440,  900},
7863                 {"1680x1050", 1680, 1050},
7864                 {"1600x1200", 1600, 1200},
7865                 {"1920x1080", 1920, 1080},
7866                 {"1920x1200", 1920, 1200}
7867         };
7868
7869         n = ARRAY_SIZE(common_modes);
7870
7871         for (i = 0; i < n; i++) {
7872                 struct drm_display_mode *curmode = NULL;
7873                 bool mode_existed = false;
7874
7875                 if (common_modes[i].w > native_mode->hdisplay ||
7876                     common_modes[i].h > native_mode->vdisplay ||
7877                    (common_modes[i].w == native_mode->hdisplay &&
7878                     common_modes[i].h == native_mode->vdisplay))
7879                         continue;
7880
7881                 list_for_each_entry(curmode, &connector->probed_modes, head) {
7882                         if (common_modes[i].w == curmode->hdisplay &&
7883                             common_modes[i].h == curmode->vdisplay) {
7884                                 mode_existed = true;
7885                                 break;
7886                         }
7887                 }
7888
7889                 if (mode_existed)
7890                         continue;
7891
7892                 mode = amdgpu_dm_create_common_mode(encoder,
7893                                 common_modes[i].name, common_modes[i].w,
7894                                 common_modes[i].h);
7895                 drm_mode_probed_add(connector, mode);
7896                 amdgpu_dm_connector->num_modes++;
7897         }
7898 }
7899
7900 static void amdgpu_set_panel_orientation(struct drm_connector *connector)
7901 {
7902         struct drm_encoder *encoder;
7903         struct amdgpu_encoder *amdgpu_encoder;
7904         const struct drm_display_mode *native_mode;
7905
7906         if (connector->connector_type != DRM_MODE_CONNECTOR_eDP &&
7907             connector->connector_type != DRM_MODE_CONNECTOR_LVDS)
7908                 return;
7909
7910         encoder = amdgpu_dm_connector_to_encoder(connector);
7911         if (!encoder)
7912                 return;
7913
7914         amdgpu_encoder = to_amdgpu_encoder(encoder);
7915
7916         native_mode = &amdgpu_encoder->native_mode;
7917         if (native_mode->hdisplay == 0 || native_mode->vdisplay == 0)
7918                 return;
7919
7920         drm_connector_set_panel_orientation_with_quirk(connector,
7921                                                        DRM_MODE_PANEL_ORIENTATION_UNKNOWN,
7922                                                        native_mode->hdisplay,
7923                                                        native_mode->vdisplay);
7924 }
7925
7926 static void amdgpu_dm_connector_ddc_get_modes(struct drm_connector *connector,
7927                                               struct edid *edid)
7928 {
7929         struct amdgpu_dm_connector *amdgpu_dm_connector =
7930                         to_amdgpu_dm_connector(connector);
7931
7932         if (edid) {
7933                 /* empty probed_modes */
7934                 INIT_LIST_HEAD(&connector->probed_modes);
7935                 amdgpu_dm_connector->num_modes =
7936                                 drm_add_edid_modes(connector, edid);
7937
7938                 /* sorting the probed modes before calling function
7939                  * amdgpu_dm_get_native_mode() since EDID can have
7940                  * more than one preferred mode. The modes that are
7941                  * later in the probed mode list could be of higher
7942                  * and preferred resolution. For example, 3840x2160
7943                  * resolution in base EDID preferred timing and 4096x2160
7944                  * preferred resolution in DID extension block later.
7945                  */
7946                 drm_mode_sort(&connector->probed_modes);
7947                 amdgpu_dm_get_native_mode(connector);
7948
7949                 /* Freesync capabilities are reset by calling
7950                  * drm_add_edid_modes() and need to be
7951                  * restored here.
7952                  */
7953                 amdgpu_dm_update_freesync_caps(connector, edid);
7954
7955                 amdgpu_set_panel_orientation(connector);
7956         } else {
7957                 amdgpu_dm_connector->num_modes = 0;
7958         }
7959 }
7960
7961 static bool is_duplicate_mode(struct amdgpu_dm_connector *aconnector,
7962                               struct drm_display_mode *mode)
7963 {
7964         struct drm_display_mode *m;
7965
7966         list_for_each_entry (m, &aconnector->base.probed_modes, head) {
7967                 if (drm_mode_equal(m, mode))
7968                         return true;
7969         }
7970
7971         return false;
7972 }
7973
7974 static uint add_fs_modes(struct amdgpu_dm_connector *aconnector)
7975 {
7976         const struct drm_display_mode *m;
7977         struct drm_display_mode *new_mode;
7978         uint i;
7979         uint32_t new_modes_count = 0;
7980
7981         /* Standard FPS values
7982          *
7983          * 23.976       - TV/NTSC
7984          * 24           - Cinema
7985          * 25           - TV/PAL
7986          * 29.97        - TV/NTSC
7987          * 30           - TV/NTSC
7988          * 48           - Cinema HFR
7989          * 50           - TV/PAL
7990          * 60           - Commonly used
7991          * 48,72,96,120 - Multiples of 24
7992          */
7993         static const uint32_t common_rates[] = {
7994                 23976, 24000, 25000, 29970, 30000,
7995                 48000, 50000, 60000, 72000, 96000, 120000
7996         };
7997
7998         /*
7999          * Find mode with highest refresh rate with the same resolution
8000          * as the preferred mode. Some monitors report a preferred mode
8001          * with lower resolution than the highest refresh rate supported.
8002          */
8003
8004         m = get_highest_refresh_rate_mode(aconnector, true);
8005         if (!m)
8006                 return 0;
8007
8008         for (i = 0; i < ARRAY_SIZE(common_rates); i++) {
8009                 uint64_t target_vtotal, target_vtotal_diff;
8010                 uint64_t num, den;
8011
8012                 if (drm_mode_vrefresh(m) * 1000 < common_rates[i])
8013                         continue;
8014
8015                 if (common_rates[i] < aconnector->min_vfreq * 1000 ||
8016                     common_rates[i] > aconnector->max_vfreq * 1000)
8017                         continue;
8018
8019                 num = (unsigned long long)m->clock * 1000 * 1000;
8020                 den = common_rates[i] * (unsigned long long)m->htotal;
8021                 target_vtotal = div_u64(num, den);
8022                 target_vtotal_diff = target_vtotal - m->vtotal;
8023
8024                 /* Check for illegal modes */
8025                 if (m->vsync_start + target_vtotal_diff < m->vdisplay ||
8026                     m->vsync_end + target_vtotal_diff < m->vsync_start ||
8027                     m->vtotal + target_vtotal_diff < m->vsync_end)
8028                         continue;
8029
8030                 new_mode = drm_mode_duplicate(aconnector->base.dev, m);
8031                 if (!new_mode)
8032                         goto out;
8033
8034                 new_mode->vtotal += (u16)target_vtotal_diff;
8035                 new_mode->vsync_start += (u16)target_vtotal_diff;
8036                 new_mode->vsync_end += (u16)target_vtotal_diff;
8037                 new_mode->type &= ~DRM_MODE_TYPE_PREFERRED;
8038                 new_mode->type |= DRM_MODE_TYPE_DRIVER;
8039
8040                 if (!is_duplicate_mode(aconnector, new_mode)) {
8041                         drm_mode_probed_add(&aconnector->base, new_mode);
8042                         new_modes_count += 1;
8043                 } else
8044                         drm_mode_destroy(aconnector->base.dev, new_mode);
8045         }
8046  out:
8047         return new_modes_count;
8048 }
8049
8050 static void amdgpu_dm_connector_add_freesync_modes(struct drm_connector *connector,
8051                                                    struct edid *edid)
8052 {
8053         struct amdgpu_dm_connector *amdgpu_dm_connector =
8054                 to_amdgpu_dm_connector(connector);
8055
8056         if (!(amdgpu_freesync_vid_mode && edid))
8057                 return;
8058
8059         if (amdgpu_dm_connector->max_vfreq - amdgpu_dm_connector->min_vfreq > 10)
8060                 amdgpu_dm_connector->num_modes +=
8061                         add_fs_modes(amdgpu_dm_connector);
8062 }
8063
8064 static int amdgpu_dm_connector_get_modes(struct drm_connector *connector)
8065 {
8066         struct amdgpu_dm_connector *amdgpu_dm_connector =
8067                         to_amdgpu_dm_connector(connector);
8068         struct drm_encoder *encoder;
8069         struct edid *edid = amdgpu_dm_connector->edid;
8070
8071         encoder = amdgpu_dm_connector_to_encoder(connector);
8072
8073         if (!drm_edid_is_valid(edid)) {
8074                 amdgpu_dm_connector->num_modes =
8075                                 drm_add_modes_noedid(connector, 640, 480);
8076         } else {
8077                 amdgpu_dm_connector_ddc_get_modes(connector, edid);
8078                 amdgpu_dm_connector_add_common_modes(encoder, connector);
8079                 amdgpu_dm_connector_add_freesync_modes(connector, edid);
8080         }
8081         amdgpu_dm_fbc_init(connector);
8082
8083         return amdgpu_dm_connector->num_modes;
8084 }
8085
8086 void amdgpu_dm_connector_init_helper(struct amdgpu_display_manager *dm,
8087                                      struct amdgpu_dm_connector *aconnector,
8088                                      int connector_type,
8089                                      struct dc_link *link,
8090                                      int link_index)
8091 {
8092         struct amdgpu_device *adev = drm_to_adev(dm->ddev);
8093
8094         /*
8095          * Some of the properties below require access to state, like bpc.
8096          * Allocate some default initial connector state with our reset helper.
8097          */
8098         if (aconnector->base.funcs->reset)
8099                 aconnector->base.funcs->reset(&aconnector->base);
8100
8101         aconnector->connector_id = link_index;
8102         aconnector->dc_link = link;
8103         aconnector->base.interlace_allowed = false;
8104         aconnector->base.doublescan_allowed = false;
8105         aconnector->base.stereo_allowed = false;
8106         aconnector->base.dpms = DRM_MODE_DPMS_OFF;
8107         aconnector->hpd.hpd = AMDGPU_HPD_NONE; /* not used */
8108         aconnector->audio_inst = -1;
8109         mutex_init(&aconnector->hpd_lock);
8110
8111         /*
8112          * configure support HPD hot plug connector_>polled default value is 0
8113          * which means HPD hot plug not supported
8114          */
8115         switch (connector_type) {
8116         case DRM_MODE_CONNECTOR_HDMIA:
8117                 aconnector->base.polled = DRM_CONNECTOR_POLL_HPD;
8118                 aconnector->base.ycbcr_420_allowed =
8119                         link->link_enc->features.hdmi_ycbcr420_supported ? true : false;
8120                 break;
8121         case DRM_MODE_CONNECTOR_DisplayPort:
8122                 aconnector->base.polled = DRM_CONNECTOR_POLL_HPD;
8123                 if (link->is_dig_mapping_flexible &&
8124                     link->dc->res_pool->funcs->link_encs_assign) {
8125                         link->link_enc =
8126                                 link_enc_cfg_get_link_enc_used_by_link(link->ctx->dc, link);
8127                         if (!link->link_enc)
8128                                 link->link_enc =
8129                                         link_enc_cfg_get_next_avail_link_enc(link->ctx->dc);
8130                 }
8131
8132                 if (link->link_enc)
8133                         aconnector->base.ycbcr_420_allowed =
8134                         link->link_enc->features.dp_ycbcr420_supported ? true : false;
8135                 break;
8136         case DRM_MODE_CONNECTOR_DVID:
8137                 aconnector->base.polled = DRM_CONNECTOR_POLL_HPD;
8138                 break;
8139         default:
8140                 break;
8141         }
8142
8143         drm_object_attach_property(&aconnector->base.base,
8144                                 dm->ddev->mode_config.scaling_mode_property,
8145                                 DRM_MODE_SCALE_NONE);
8146
8147         drm_object_attach_property(&aconnector->base.base,
8148                                 adev->mode_info.underscan_property,
8149                                 UNDERSCAN_OFF);
8150         drm_object_attach_property(&aconnector->base.base,
8151                                 adev->mode_info.underscan_hborder_property,
8152                                 0);
8153         drm_object_attach_property(&aconnector->base.base,
8154                                 adev->mode_info.underscan_vborder_property,
8155                                 0);
8156
8157         if (!aconnector->mst_port)
8158                 drm_connector_attach_max_bpc_property(&aconnector->base, 8, 16);
8159
8160         /* This defaults to the max in the range, but we want 8bpc for non-edp. */
8161         aconnector->base.state->max_bpc = (connector_type == DRM_MODE_CONNECTOR_eDP) ? 16 : 8;
8162         aconnector->base.state->max_requested_bpc = aconnector->base.state->max_bpc;
8163
8164         if (connector_type == DRM_MODE_CONNECTOR_eDP &&
8165             (dc_is_dmcu_initialized(adev->dm.dc) || adev->dm.dc->ctx->dmub_srv)) {
8166                 drm_object_attach_property(&aconnector->base.base,
8167                                 adev->mode_info.abm_level_property, 0);
8168         }
8169
8170         if (connector_type == DRM_MODE_CONNECTOR_HDMIA ||
8171             connector_type == DRM_MODE_CONNECTOR_DisplayPort ||
8172             connector_type == DRM_MODE_CONNECTOR_eDP) {
8173                 drm_connector_attach_hdr_output_metadata_property(&aconnector->base);
8174
8175                 if (!aconnector->mst_port)
8176                         drm_connector_attach_vrr_capable_property(&aconnector->base);
8177
8178 #ifdef CONFIG_DRM_AMD_DC_HDCP
8179                 if (adev->dm.hdcp_workqueue)
8180                         drm_connector_attach_content_protection_property(&aconnector->base, true);
8181 #endif
8182         }
8183 }
8184
8185 static int amdgpu_dm_i2c_xfer(struct i2c_adapter *i2c_adap,
8186                               struct i2c_msg *msgs, int num)
8187 {
8188         struct amdgpu_i2c_adapter *i2c = i2c_get_adapdata(i2c_adap);
8189         struct ddc_service *ddc_service = i2c->ddc_service;
8190         struct i2c_command cmd;
8191         int i;
8192         int result = -EIO;
8193
8194         cmd.payloads = kcalloc(num, sizeof(struct i2c_payload), GFP_KERNEL);
8195
8196         if (!cmd.payloads)
8197                 return result;
8198
8199         cmd.number_of_payloads = num;
8200         cmd.engine = I2C_COMMAND_ENGINE_DEFAULT;
8201         cmd.speed = 100;
8202
8203         for (i = 0; i < num; i++) {
8204                 cmd.payloads[i].write = !(msgs[i].flags & I2C_M_RD);
8205                 cmd.payloads[i].address = msgs[i].addr;
8206                 cmd.payloads[i].length = msgs[i].len;
8207                 cmd.payloads[i].data = msgs[i].buf;
8208         }
8209
8210         if (dc_submit_i2c(
8211                         ddc_service->ctx->dc,
8212                         ddc_service->ddc_pin->hw_info.ddc_channel,
8213                         &cmd))
8214                 result = num;
8215
8216         kfree(cmd.payloads);
8217         return result;
8218 }
8219
8220 static u32 amdgpu_dm_i2c_func(struct i2c_adapter *adap)
8221 {
8222         return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL;
8223 }
8224
8225 static const struct i2c_algorithm amdgpu_dm_i2c_algo = {
8226         .master_xfer = amdgpu_dm_i2c_xfer,
8227         .functionality = amdgpu_dm_i2c_func,
8228 };
8229
8230 static struct amdgpu_i2c_adapter *
8231 create_i2c(struct ddc_service *ddc_service,
8232            int link_index,
8233            int *res)
8234 {
8235         struct amdgpu_device *adev = ddc_service->ctx->driver_context;
8236         struct amdgpu_i2c_adapter *i2c;
8237
8238         i2c = kzalloc(sizeof(struct amdgpu_i2c_adapter), GFP_KERNEL);
8239         if (!i2c)
8240                 return NULL;
8241         i2c->base.owner = THIS_MODULE;
8242         i2c->base.class = I2C_CLASS_DDC;
8243         i2c->base.dev.parent = &adev->pdev->dev;
8244         i2c->base.algo = &amdgpu_dm_i2c_algo;
8245         snprintf(i2c->base.name, sizeof(i2c->base.name), "AMDGPU DM i2c hw bus %d", link_index);
8246         i2c_set_adapdata(&i2c->base, i2c);
8247         i2c->ddc_service = ddc_service;
8248         if (i2c->ddc_service->ddc_pin)
8249                 i2c->ddc_service->ddc_pin->hw_info.ddc_channel = link_index;
8250
8251         return i2c;
8252 }
8253
8254
8255 /*
8256  * Note: this function assumes that dc_link_detect() was called for the
8257  * dc_link which will be represented by this aconnector.
8258  */
8259 static int amdgpu_dm_connector_init(struct amdgpu_display_manager *dm,
8260                                     struct amdgpu_dm_connector *aconnector,
8261                                     uint32_t link_index,
8262                                     struct amdgpu_encoder *aencoder)
8263 {
8264         int res = 0;
8265         int connector_type;
8266         struct dc *dc = dm->dc;
8267         struct dc_link *link = dc_get_link_at_index(dc, link_index);
8268         struct amdgpu_i2c_adapter *i2c;
8269
8270         link->priv = aconnector;
8271
8272         DRM_DEBUG_DRIVER("%s()\n", __func__);
8273
8274         i2c = create_i2c(link->ddc, link->link_index, &res);
8275         if (!i2c) {
8276                 DRM_ERROR("Failed to create i2c adapter data\n");
8277                 return -ENOMEM;
8278         }
8279
8280         aconnector->i2c = i2c;
8281         res = i2c_add_adapter(&i2c->base);
8282
8283         if (res) {
8284                 DRM_ERROR("Failed to register hw i2c %d\n", link->link_index);
8285                 goto out_free;
8286         }
8287
8288         connector_type = to_drm_connector_type(link->connector_signal);
8289
8290         res = drm_connector_init_with_ddc(
8291                         dm->ddev,
8292                         &aconnector->base,
8293                         &amdgpu_dm_connector_funcs,
8294                         connector_type,
8295                         &i2c->base);
8296
8297         if (res) {
8298                 DRM_ERROR("connector_init failed\n");
8299                 aconnector->connector_id = -1;
8300                 goto out_free;
8301         }
8302
8303         drm_connector_helper_add(
8304                         &aconnector->base,
8305                         &amdgpu_dm_connector_helper_funcs);
8306
8307         amdgpu_dm_connector_init_helper(
8308                 dm,
8309                 aconnector,
8310                 connector_type,
8311                 link,
8312                 link_index);
8313
8314         drm_connector_attach_encoder(
8315                 &aconnector->base, &aencoder->base);
8316
8317         if (connector_type == DRM_MODE_CONNECTOR_DisplayPort
8318                 || connector_type == DRM_MODE_CONNECTOR_eDP)
8319                 amdgpu_dm_initialize_dp_connector(dm, aconnector, link->link_index);
8320
8321 out_free:
8322         if (res) {
8323                 kfree(i2c);
8324                 aconnector->i2c = NULL;
8325         }
8326         return res;
8327 }
8328
8329 int amdgpu_dm_get_encoder_crtc_mask(struct amdgpu_device *adev)
8330 {
8331         switch (adev->mode_info.num_crtc) {
8332         case 1:
8333                 return 0x1;
8334         case 2:
8335                 return 0x3;
8336         case 3:
8337                 return 0x7;
8338         case 4:
8339                 return 0xf;
8340         case 5:
8341                 return 0x1f;
8342         case 6:
8343         default:
8344                 return 0x3f;
8345         }
8346 }
8347
8348 static int amdgpu_dm_encoder_init(struct drm_device *dev,
8349                                   struct amdgpu_encoder *aencoder,
8350                                   uint32_t link_index)
8351 {
8352         struct amdgpu_device *adev = drm_to_adev(dev);
8353
8354         int res = drm_encoder_init(dev,
8355                                    &aencoder->base,
8356                                    &amdgpu_dm_encoder_funcs,
8357                                    DRM_MODE_ENCODER_TMDS,
8358                                    NULL);
8359
8360         aencoder->base.possible_crtcs = amdgpu_dm_get_encoder_crtc_mask(adev);
8361
8362         if (!res)
8363                 aencoder->encoder_id = link_index;
8364         else
8365                 aencoder->encoder_id = -1;
8366
8367         drm_encoder_helper_add(&aencoder->base, &amdgpu_dm_encoder_helper_funcs);
8368
8369         return res;
8370 }
8371
8372 static void manage_dm_interrupts(struct amdgpu_device *adev,
8373                                  struct amdgpu_crtc *acrtc,
8374                                  bool enable)
8375 {
8376         /*
8377          * We have no guarantee that the frontend index maps to the same
8378          * backend index - some even map to more than one.
8379          *
8380          * TODO: Use a different interrupt or check DC itself for the mapping.
8381          */
8382         int irq_type =
8383                 amdgpu_display_crtc_idx_to_irq_type(
8384                         adev,
8385                         acrtc->crtc_id);
8386
8387         if (enable) {
8388                 drm_crtc_vblank_on(&acrtc->base);
8389                 amdgpu_irq_get(
8390                         adev,
8391                         &adev->pageflip_irq,
8392                         irq_type);
8393 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
8394                 amdgpu_irq_get(
8395                         adev,
8396                         &adev->vline0_irq,
8397                         irq_type);
8398 #endif
8399         } else {
8400 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
8401                 amdgpu_irq_put(
8402                         adev,
8403                         &adev->vline0_irq,
8404                         irq_type);
8405 #endif
8406                 amdgpu_irq_put(
8407                         adev,
8408                         &adev->pageflip_irq,
8409                         irq_type);
8410                 drm_crtc_vblank_off(&acrtc->base);
8411         }
8412 }
8413
8414 static void dm_update_pflip_irq_state(struct amdgpu_device *adev,
8415                                       struct amdgpu_crtc *acrtc)
8416 {
8417         int irq_type =
8418                 amdgpu_display_crtc_idx_to_irq_type(adev, acrtc->crtc_id);
8419
8420         /**
8421          * This reads the current state for the IRQ and force reapplies
8422          * the setting to hardware.
8423          */
8424         amdgpu_irq_update(adev, &adev->pageflip_irq, irq_type);
8425 }
8426
8427 static bool
8428 is_scaling_state_different(const struct dm_connector_state *dm_state,
8429                            const struct dm_connector_state *old_dm_state)
8430 {
8431         if (dm_state->scaling != old_dm_state->scaling)
8432                 return true;
8433         if (!dm_state->underscan_enable && old_dm_state->underscan_enable) {
8434                 if (old_dm_state->underscan_hborder != 0 && old_dm_state->underscan_vborder != 0)
8435                         return true;
8436         } else  if (dm_state->underscan_enable && !old_dm_state->underscan_enable) {
8437                 if (dm_state->underscan_hborder != 0 && dm_state->underscan_vborder != 0)
8438                         return true;
8439         } else if (dm_state->underscan_hborder != old_dm_state->underscan_hborder ||
8440                    dm_state->underscan_vborder != old_dm_state->underscan_vborder)
8441                 return true;
8442         return false;
8443 }
8444
8445 #ifdef CONFIG_DRM_AMD_DC_HDCP
8446 static bool is_content_protection_different(struct drm_connector_state *state,
8447                                             const struct drm_connector_state *old_state,
8448                                             const struct drm_connector *connector, struct hdcp_workqueue *hdcp_w)
8449 {
8450         struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
8451         struct dm_connector_state *dm_con_state = to_dm_connector_state(connector->state);
8452
8453         /* Handle: Type0/1 change */
8454         if (old_state->hdcp_content_type != state->hdcp_content_type &&
8455             state->content_protection != DRM_MODE_CONTENT_PROTECTION_UNDESIRED) {
8456                 state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED;
8457                 return true;
8458         }
8459
8460         /* CP is being re enabled, ignore this
8461          *
8462          * Handles:     ENABLED -> DESIRED
8463          */
8464         if (old_state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED &&
8465             state->content_protection == DRM_MODE_CONTENT_PROTECTION_DESIRED) {
8466                 state->content_protection = DRM_MODE_CONTENT_PROTECTION_ENABLED;
8467                 return false;
8468         }
8469
8470         /* S3 resume case, since old state will always be 0 (UNDESIRED) and the restored state will be ENABLED
8471          *
8472          * Handles:     UNDESIRED -> ENABLED
8473          */
8474         if (old_state->content_protection == DRM_MODE_CONTENT_PROTECTION_UNDESIRED &&
8475             state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED)
8476                 state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED;
8477
8478         /* Stream removed and re-enabled
8479          *
8480          * Can sometimes overlap with the HPD case,
8481          * thus set update_hdcp to false to avoid
8482          * setting HDCP multiple times.
8483          *
8484          * Handles:     DESIRED -> DESIRED (Special case)
8485          */
8486         if (!(old_state->crtc && old_state->crtc->enabled) &&
8487                 state->crtc && state->crtc->enabled &&
8488                 connector->state->content_protection == DRM_MODE_CONTENT_PROTECTION_DESIRED) {
8489                 dm_con_state->update_hdcp = false;
8490                 return true;
8491         }
8492
8493         /* Hot-plug, headless s3, dpms
8494          *
8495          * Only start HDCP if the display is connected/enabled.
8496          * update_hdcp flag will be set to false until the next
8497          * HPD comes in.
8498          *
8499          * Handles:     DESIRED -> DESIRED (Special case)
8500          */
8501         if (dm_con_state->update_hdcp && state->content_protection == DRM_MODE_CONTENT_PROTECTION_DESIRED &&
8502             connector->dpms == DRM_MODE_DPMS_ON && aconnector->dc_sink != NULL) {
8503                 dm_con_state->update_hdcp = false;
8504                 return true;
8505         }
8506
8507         /*
8508          * Handles:     UNDESIRED -> UNDESIRED
8509          *              DESIRED -> DESIRED
8510          *              ENABLED -> ENABLED
8511          */
8512         if (old_state->content_protection == state->content_protection)
8513                 return false;
8514
8515         /*
8516          * Handles:     UNDESIRED -> DESIRED
8517          *              DESIRED -> UNDESIRED
8518          *              ENABLED -> UNDESIRED
8519          */
8520         if (state->content_protection != DRM_MODE_CONTENT_PROTECTION_ENABLED)
8521                 return true;
8522
8523         /*
8524          * Handles:     DESIRED -> ENABLED
8525          */
8526         return false;
8527 }
8528
8529 #endif
8530 static void remove_stream(struct amdgpu_device *adev,
8531                           struct amdgpu_crtc *acrtc,
8532                           struct dc_stream_state *stream)
8533 {
8534         /* this is the update mode case */
8535
8536         acrtc->otg_inst = -1;
8537         acrtc->enabled = false;
8538 }
8539
8540 static int get_cursor_position(struct drm_plane *plane, struct drm_crtc *crtc,
8541                                struct dc_cursor_position *position)
8542 {
8543         struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
8544         int x, y;
8545         int xorigin = 0, yorigin = 0;
8546
8547         if (!crtc || !plane->state->fb)
8548                 return 0;
8549
8550         if ((plane->state->crtc_w > amdgpu_crtc->max_cursor_width) ||
8551             (plane->state->crtc_h > amdgpu_crtc->max_cursor_height)) {
8552                 DRM_ERROR("%s: bad cursor width or height %d x %d\n",
8553                           __func__,
8554                           plane->state->crtc_w,
8555                           plane->state->crtc_h);
8556                 return -EINVAL;
8557         }
8558
8559         x = plane->state->crtc_x;
8560         y = plane->state->crtc_y;
8561
8562         if (x <= -amdgpu_crtc->max_cursor_width ||
8563             y <= -amdgpu_crtc->max_cursor_height)
8564                 return 0;
8565
8566         if (x < 0) {
8567                 xorigin = min(-x, amdgpu_crtc->max_cursor_width - 1);
8568                 x = 0;
8569         }
8570         if (y < 0) {
8571                 yorigin = min(-y, amdgpu_crtc->max_cursor_height - 1);
8572                 y = 0;
8573         }
8574         position->enable = true;
8575         position->translate_by_source = true;
8576         position->x = x;
8577         position->y = y;
8578         position->x_hotspot = xorigin;
8579         position->y_hotspot = yorigin;
8580
8581         return 0;
8582 }
8583
8584 static void handle_cursor_update(struct drm_plane *plane,
8585                                  struct drm_plane_state *old_plane_state)
8586 {
8587         struct amdgpu_device *adev = drm_to_adev(plane->dev);
8588         struct amdgpu_framebuffer *afb = to_amdgpu_framebuffer(plane->state->fb);
8589         struct drm_crtc *crtc = afb ? plane->state->crtc : old_plane_state->crtc;
8590         struct dm_crtc_state *crtc_state = crtc ? to_dm_crtc_state(crtc->state) : NULL;
8591         struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
8592         uint64_t address = afb ? afb->address : 0;
8593         struct dc_cursor_position position = {0};
8594         struct dc_cursor_attributes attributes;
8595         int ret;
8596
8597         if (!plane->state->fb && !old_plane_state->fb)
8598                 return;
8599
8600         DC_LOG_CURSOR("%s: crtc_id=%d with size %d to %d\n",
8601                       __func__,
8602                       amdgpu_crtc->crtc_id,
8603                       plane->state->crtc_w,
8604                       plane->state->crtc_h);
8605
8606         ret = get_cursor_position(plane, crtc, &position);
8607         if (ret)
8608                 return;
8609
8610         if (!position.enable) {
8611                 /* turn off cursor */
8612                 if (crtc_state && crtc_state->stream) {
8613                         mutex_lock(&adev->dm.dc_lock);
8614                         dc_stream_set_cursor_position(crtc_state->stream,
8615                                                       &position);
8616                         mutex_unlock(&adev->dm.dc_lock);
8617                 }
8618                 return;
8619         }
8620
8621         amdgpu_crtc->cursor_width = plane->state->crtc_w;
8622         amdgpu_crtc->cursor_height = plane->state->crtc_h;
8623
8624         memset(&attributes, 0, sizeof(attributes));
8625         attributes.address.high_part = upper_32_bits(address);
8626         attributes.address.low_part  = lower_32_bits(address);
8627         attributes.width             = plane->state->crtc_w;
8628         attributes.height            = plane->state->crtc_h;
8629         attributes.color_format      = CURSOR_MODE_COLOR_PRE_MULTIPLIED_ALPHA;
8630         attributes.rotation_angle    = 0;
8631         attributes.attribute_flags.value = 0;
8632
8633         attributes.pitch = afb->base.pitches[0] / afb->base.format->cpp[0];
8634
8635         if (crtc_state->stream) {
8636                 mutex_lock(&adev->dm.dc_lock);
8637                 if (!dc_stream_set_cursor_attributes(crtc_state->stream,
8638                                                          &attributes))
8639                         DRM_ERROR("DC failed to set cursor attributes\n");
8640
8641                 if (!dc_stream_set_cursor_position(crtc_state->stream,
8642                                                    &position))
8643                         DRM_ERROR("DC failed to set cursor position\n");
8644                 mutex_unlock(&adev->dm.dc_lock);
8645         }
8646 }
8647
8648 static void prepare_flip_isr(struct amdgpu_crtc *acrtc)
8649 {
8650
8651         assert_spin_locked(&acrtc->base.dev->event_lock);
8652         WARN_ON(acrtc->event);
8653
8654         acrtc->event = acrtc->base.state->event;
8655
8656         /* Set the flip status */
8657         acrtc->pflip_status = AMDGPU_FLIP_SUBMITTED;
8658
8659         /* Mark this event as consumed */
8660         acrtc->base.state->event = NULL;
8661
8662         DC_LOG_PFLIP("crtc:%d, pflip_stat:AMDGPU_FLIP_SUBMITTED\n",
8663                      acrtc->crtc_id);
8664 }
8665
8666 static void update_freesync_state_on_stream(
8667         struct amdgpu_display_manager *dm,
8668         struct dm_crtc_state *new_crtc_state,
8669         struct dc_stream_state *new_stream,
8670         struct dc_plane_state *surface,
8671         u32 flip_timestamp_in_us)
8672 {
8673         struct mod_vrr_params vrr_params;
8674         struct dc_info_packet vrr_infopacket = {0};
8675         struct amdgpu_device *adev = dm->adev;
8676         struct amdgpu_crtc *acrtc = to_amdgpu_crtc(new_crtc_state->base.crtc);
8677         unsigned long flags;
8678         bool pack_sdp_v1_3 = false;
8679
8680         if (!new_stream)
8681                 return;
8682
8683         /*
8684          * TODO: Determine why min/max totals and vrefresh can be 0 here.
8685          * For now it's sufficient to just guard against these conditions.
8686          */
8687
8688         if (!new_stream->timing.h_total || !new_stream->timing.v_total)
8689                 return;
8690
8691         spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
8692         vrr_params = acrtc->dm_irq_params.vrr_params;
8693
8694         if (surface) {
8695                 mod_freesync_handle_preflip(
8696                         dm->freesync_module,
8697                         surface,
8698                         new_stream,
8699                         flip_timestamp_in_us,
8700                         &vrr_params);
8701
8702                 if (adev->family < AMDGPU_FAMILY_AI &&
8703                     amdgpu_dm_vrr_active(new_crtc_state)) {
8704                         mod_freesync_handle_v_update(dm->freesync_module,
8705                                                      new_stream, &vrr_params);
8706
8707                         /* Need to call this before the frame ends. */
8708                         dc_stream_adjust_vmin_vmax(dm->dc,
8709                                                    new_crtc_state->stream,
8710                                                    &vrr_params.adjust);
8711                 }
8712         }
8713
8714         mod_freesync_build_vrr_infopacket(
8715                 dm->freesync_module,
8716                 new_stream,
8717                 &vrr_params,
8718                 PACKET_TYPE_VRR,
8719                 TRANSFER_FUNC_UNKNOWN,
8720                 &vrr_infopacket,
8721                 pack_sdp_v1_3);
8722
8723         new_crtc_state->freesync_timing_changed |=
8724                 (memcmp(&acrtc->dm_irq_params.vrr_params.adjust,
8725                         &vrr_params.adjust,
8726                         sizeof(vrr_params.adjust)) != 0);
8727
8728         new_crtc_state->freesync_vrr_info_changed |=
8729                 (memcmp(&new_crtc_state->vrr_infopacket,
8730                         &vrr_infopacket,
8731                         sizeof(vrr_infopacket)) != 0);
8732
8733         acrtc->dm_irq_params.vrr_params = vrr_params;
8734         new_crtc_state->vrr_infopacket = vrr_infopacket;
8735
8736         new_stream->adjust = acrtc->dm_irq_params.vrr_params.adjust;
8737         new_stream->vrr_infopacket = vrr_infopacket;
8738
8739         if (new_crtc_state->freesync_vrr_info_changed)
8740                 DRM_DEBUG_KMS("VRR packet update: crtc=%u enabled=%d state=%d",
8741                               new_crtc_state->base.crtc->base.id,
8742                               (int)new_crtc_state->base.vrr_enabled,
8743                               (int)vrr_params.state);
8744
8745         spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
8746 }
8747
8748 static void update_stream_irq_parameters(
8749         struct amdgpu_display_manager *dm,
8750         struct dm_crtc_state *new_crtc_state)
8751 {
8752         struct dc_stream_state *new_stream = new_crtc_state->stream;
8753         struct mod_vrr_params vrr_params;
8754         struct mod_freesync_config config = new_crtc_state->freesync_config;
8755         struct amdgpu_device *adev = dm->adev;
8756         struct amdgpu_crtc *acrtc = to_amdgpu_crtc(new_crtc_state->base.crtc);
8757         unsigned long flags;
8758
8759         if (!new_stream)
8760                 return;
8761
8762         /*
8763          * TODO: Determine why min/max totals and vrefresh can be 0 here.
8764          * For now it's sufficient to just guard against these conditions.
8765          */
8766         if (!new_stream->timing.h_total || !new_stream->timing.v_total)
8767                 return;
8768
8769         spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
8770         vrr_params = acrtc->dm_irq_params.vrr_params;
8771
8772         if (new_crtc_state->vrr_supported &&
8773             config.min_refresh_in_uhz &&
8774             config.max_refresh_in_uhz) {
8775                 /*
8776                  * if freesync compatible mode was set, config.state will be set
8777                  * in atomic check
8778                  */
8779                 if (config.state == VRR_STATE_ACTIVE_FIXED && config.fixed_refresh_in_uhz &&
8780                     (!drm_atomic_crtc_needs_modeset(&new_crtc_state->base) ||
8781                      new_crtc_state->freesync_config.state == VRR_STATE_ACTIVE_FIXED)) {
8782                         vrr_params.max_refresh_in_uhz = config.max_refresh_in_uhz;
8783                         vrr_params.min_refresh_in_uhz = config.min_refresh_in_uhz;
8784                         vrr_params.fixed_refresh_in_uhz = config.fixed_refresh_in_uhz;
8785                         vrr_params.state = VRR_STATE_ACTIVE_FIXED;
8786                 } else {
8787                         config.state = new_crtc_state->base.vrr_enabled ?
8788                                                      VRR_STATE_ACTIVE_VARIABLE :
8789                                                      VRR_STATE_INACTIVE;
8790                 }
8791         } else {
8792                 config.state = VRR_STATE_UNSUPPORTED;
8793         }
8794
8795         mod_freesync_build_vrr_params(dm->freesync_module,
8796                                       new_stream,
8797                                       &config, &vrr_params);
8798
8799         new_crtc_state->freesync_timing_changed |=
8800                 (memcmp(&acrtc->dm_irq_params.vrr_params.adjust,
8801                         &vrr_params.adjust, sizeof(vrr_params.adjust)) != 0);
8802
8803         new_crtc_state->freesync_config = config;
8804         /* Copy state for access from DM IRQ handler */
8805         acrtc->dm_irq_params.freesync_config = config;
8806         acrtc->dm_irq_params.active_planes = new_crtc_state->active_planes;
8807         acrtc->dm_irq_params.vrr_params = vrr_params;
8808         spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
8809 }
8810
8811 static void amdgpu_dm_handle_vrr_transition(struct dm_crtc_state *old_state,
8812                                             struct dm_crtc_state *new_state)
8813 {
8814         bool old_vrr_active = amdgpu_dm_vrr_active(old_state);
8815         bool new_vrr_active = amdgpu_dm_vrr_active(new_state);
8816
8817         if (!old_vrr_active && new_vrr_active) {
8818                 /* Transition VRR inactive -> active:
8819                  * While VRR is active, we must not disable vblank irq, as a
8820                  * reenable after disable would compute bogus vblank/pflip
8821                  * timestamps if it likely happened inside display front-porch.
8822                  *
8823                  * We also need vupdate irq for the actual core vblank handling
8824                  * at end of vblank.
8825                  */
8826                 dm_set_vupdate_irq(new_state->base.crtc, true);
8827                 drm_crtc_vblank_get(new_state->base.crtc);
8828                 DRM_DEBUG_DRIVER("%s: crtc=%u VRR off->on: Get vblank ref\n",
8829                                  __func__, new_state->base.crtc->base.id);
8830         } else if (old_vrr_active && !new_vrr_active) {
8831                 /* Transition VRR active -> inactive:
8832                  * Allow vblank irq disable again for fixed refresh rate.
8833                  */
8834                 dm_set_vupdate_irq(new_state->base.crtc, false);
8835                 drm_crtc_vblank_put(new_state->base.crtc);
8836                 DRM_DEBUG_DRIVER("%s: crtc=%u VRR on->off: Drop vblank ref\n",
8837                                  __func__, new_state->base.crtc->base.id);
8838         }
8839 }
8840
8841 static void amdgpu_dm_commit_cursors(struct drm_atomic_state *state)
8842 {
8843         struct drm_plane *plane;
8844         struct drm_plane_state *old_plane_state;
8845         int i;
8846
8847         /*
8848          * TODO: Make this per-stream so we don't issue redundant updates for
8849          * commits with multiple streams.
8850          */
8851         for_each_old_plane_in_state(state, plane, old_plane_state, i)
8852                 if (plane->type == DRM_PLANE_TYPE_CURSOR)
8853                         handle_cursor_update(plane, old_plane_state);
8854 }
8855
8856 static void amdgpu_dm_commit_planes(struct drm_atomic_state *state,
8857                                     struct dc_state *dc_state,
8858                                     struct drm_device *dev,
8859                                     struct amdgpu_display_manager *dm,
8860                                     struct drm_crtc *pcrtc,
8861                                     bool wait_for_vblank)
8862 {
8863         uint32_t i;
8864         uint64_t timestamp_ns;
8865         struct drm_plane *plane;
8866         struct drm_plane_state *old_plane_state, *new_plane_state;
8867         struct amdgpu_crtc *acrtc_attach = to_amdgpu_crtc(pcrtc);
8868         struct drm_crtc_state *new_pcrtc_state =
8869                         drm_atomic_get_new_crtc_state(state, pcrtc);
8870         struct dm_crtc_state *acrtc_state = to_dm_crtc_state(new_pcrtc_state);
8871         struct dm_crtc_state *dm_old_crtc_state =
8872                         to_dm_crtc_state(drm_atomic_get_old_crtc_state(state, pcrtc));
8873         int planes_count = 0, vpos, hpos;
8874         long r;
8875         unsigned long flags;
8876         struct amdgpu_bo *abo;
8877         uint32_t target_vblank, last_flip_vblank;
8878         bool vrr_active = amdgpu_dm_vrr_active(acrtc_state);
8879         bool pflip_present = false;
8880         struct {
8881                 struct dc_surface_update surface_updates[MAX_SURFACES];
8882                 struct dc_plane_info plane_infos[MAX_SURFACES];
8883                 struct dc_scaling_info scaling_infos[MAX_SURFACES];
8884                 struct dc_flip_addrs flip_addrs[MAX_SURFACES];
8885                 struct dc_stream_update stream_update;
8886         } *bundle;
8887
8888         bundle = kzalloc(sizeof(*bundle), GFP_KERNEL);
8889
8890         if (!bundle) {
8891                 dm_error("Failed to allocate update bundle\n");
8892                 goto cleanup;
8893         }
8894
8895         /*
8896          * Disable the cursor first if we're disabling all the planes.
8897          * It'll remain on the screen after the planes are re-enabled
8898          * if we don't.
8899          */
8900         if (acrtc_state->active_planes == 0)
8901                 amdgpu_dm_commit_cursors(state);
8902
8903         /* update planes when needed */
8904         for_each_oldnew_plane_in_state(state, plane, old_plane_state, new_plane_state, i) {
8905                 struct drm_crtc *crtc = new_plane_state->crtc;
8906                 struct drm_crtc_state *new_crtc_state;
8907                 struct drm_framebuffer *fb = new_plane_state->fb;
8908                 struct amdgpu_framebuffer *afb = (struct amdgpu_framebuffer *)fb;
8909                 bool plane_needs_flip;
8910                 struct dc_plane_state *dc_plane;
8911                 struct dm_plane_state *dm_new_plane_state = to_dm_plane_state(new_plane_state);
8912
8913                 /* Cursor plane is handled after stream updates */
8914                 if (plane->type == DRM_PLANE_TYPE_CURSOR)
8915                         continue;
8916
8917                 if (!fb || !crtc || pcrtc != crtc)
8918                         continue;
8919
8920                 new_crtc_state = drm_atomic_get_new_crtc_state(state, crtc);
8921                 if (!new_crtc_state->active)
8922                         continue;
8923
8924                 dc_plane = dm_new_plane_state->dc_state;
8925
8926                 bundle->surface_updates[planes_count].surface = dc_plane;
8927                 if (new_pcrtc_state->color_mgmt_changed) {
8928                         bundle->surface_updates[planes_count].gamma = dc_plane->gamma_correction;
8929                         bundle->surface_updates[planes_count].in_transfer_func = dc_plane->in_transfer_func;
8930                         bundle->surface_updates[planes_count].gamut_remap_matrix = &dc_plane->gamut_remap_matrix;
8931                 }
8932
8933                 fill_dc_scaling_info(new_plane_state,
8934                                      &bundle->scaling_infos[planes_count]);
8935
8936                 bundle->surface_updates[planes_count].scaling_info =
8937                         &bundle->scaling_infos[planes_count];
8938
8939                 plane_needs_flip = old_plane_state->fb && new_plane_state->fb;
8940
8941                 pflip_present = pflip_present || plane_needs_flip;
8942
8943                 if (!plane_needs_flip) {
8944                         planes_count += 1;
8945                         continue;
8946                 }
8947
8948                 abo = gem_to_amdgpu_bo(fb->obj[0]);
8949
8950                 /*
8951                  * Wait for all fences on this FB. Do limited wait to avoid
8952                  * deadlock during GPU reset when this fence will not signal
8953                  * but we hold reservation lock for the BO.
8954                  */
8955                 r = dma_resv_wait_timeout(abo->tbo.base.resv, true, false,
8956                                           msecs_to_jiffies(5000));
8957                 if (unlikely(r <= 0))
8958                         DRM_ERROR("Waiting for fences timed out!");
8959
8960                 fill_dc_plane_info_and_addr(
8961                         dm->adev, new_plane_state,
8962                         afb->tiling_flags,
8963                         &bundle->plane_infos[planes_count],
8964                         &bundle->flip_addrs[planes_count].address,
8965                         afb->tmz_surface, false);
8966
8967                 DRM_DEBUG_ATOMIC("plane: id=%d dcc_en=%d\n",
8968                                  new_plane_state->plane->index,
8969                                  bundle->plane_infos[planes_count].dcc.enable);
8970
8971                 bundle->surface_updates[planes_count].plane_info =
8972                         &bundle->plane_infos[planes_count];
8973
8974                 /*
8975                  * Only allow immediate flips for fast updates that don't
8976                  * change FB pitch, DCC state, rotation or mirroing.
8977                  */
8978                 bundle->flip_addrs[planes_count].flip_immediate =
8979                         crtc->state->async_flip &&
8980                         acrtc_state->update_type == UPDATE_TYPE_FAST;
8981
8982                 timestamp_ns = ktime_get_ns();
8983                 bundle->flip_addrs[planes_count].flip_timestamp_in_us = div_u64(timestamp_ns, 1000);
8984                 bundle->surface_updates[planes_count].flip_addr = &bundle->flip_addrs[planes_count];
8985                 bundle->surface_updates[planes_count].surface = dc_plane;
8986
8987                 if (!bundle->surface_updates[planes_count].surface) {
8988                         DRM_ERROR("No surface for CRTC: id=%d\n",
8989                                         acrtc_attach->crtc_id);
8990                         continue;
8991                 }
8992
8993                 if (plane == pcrtc->primary)
8994                         update_freesync_state_on_stream(
8995                                 dm,
8996                                 acrtc_state,
8997                                 acrtc_state->stream,
8998                                 dc_plane,
8999                                 bundle->flip_addrs[planes_count].flip_timestamp_in_us);
9000
9001                 DRM_DEBUG_ATOMIC("%s Flipping to hi: 0x%x, low: 0x%x\n",
9002                                  __func__,
9003                                  bundle->flip_addrs[planes_count].address.grph.addr.high_part,
9004                                  bundle->flip_addrs[planes_count].address.grph.addr.low_part);
9005
9006                 planes_count += 1;
9007
9008         }
9009
9010         if (pflip_present) {
9011                 if (!vrr_active) {
9012                         /* Use old throttling in non-vrr fixed refresh rate mode
9013                          * to keep flip scheduling based on target vblank counts
9014                          * working in a backwards compatible way, e.g., for
9015                          * clients using the GLX_OML_sync_control extension or
9016                          * DRI3/Present extension with defined target_msc.
9017                          */
9018                         last_flip_vblank = amdgpu_get_vblank_counter_kms(pcrtc);
9019                 }
9020                 else {
9021                         /* For variable refresh rate mode only:
9022                          * Get vblank of last completed flip to avoid > 1 vrr
9023                          * flips per video frame by use of throttling, but allow
9024                          * flip programming anywhere in the possibly large
9025                          * variable vrr vblank interval for fine-grained flip
9026                          * timing control and more opportunity to avoid stutter
9027                          * on late submission of flips.
9028                          */
9029                         spin_lock_irqsave(&pcrtc->dev->event_lock, flags);
9030                         last_flip_vblank = acrtc_attach->dm_irq_params.last_flip_vblank;
9031                         spin_unlock_irqrestore(&pcrtc->dev->event_lock, flags);
9032                 }
9033
9034                 target_vblank = last_flip_vblank + wait_for_vblank;
9035
9036                 /*
9037                  * Wait until we're out of the vertical blank period before the one
9038                  * targeted by the flip
9039                  */
9040                 while ((acrtc_attach->enabled &&
9041                         (amdgpu_display_get_crtc_scanoutpos(dm->ddev, acrtc_attach->crtc_id,
9042                                                             0, &vpos, &hpos, NULL,
9043                                                             NULL, &pcrtc->hwmode)
9044                          & (DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_IN_VBLANK)) ==
9045                         (DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_IN_VBLANK) &&
9046                         (int)(target_vblank -
9047                           amdgpu_get_vblank_counter_kms(pcrtc)) > 0)) {
9048                         usleep_range(1000, 1100);
9049                 }
9050
9051                 /**
9052                  * Prepare the flip event for the pageflip interrupt to handle.
9053                  *
9054                  * This only works in the case where we've already turned on the
9055                  * appropriate hardware blocks (eg. HUBP) so in the transition case
9056                  * from 0 -> n planes we have to skip a hardware generated event
9057                  * and rely on sending it from software.
9058                  */
9059                 if (acrtc_attach->base.state->event &&
9060                     acrtc_state->active_planes > 0 &&
9061                     !acrtc_state->force_dpms_off) {
9062                         drm_crtc_vblank_get(pcrtc);
9063
9064                         spin_lock_irqsave(&pcrtc->dev->event_lock, flags);
9065
9066                         WARN_ON(acrtc_attach->pflip_status != AMDGPU_FLIP_NONE);
9067                         prepare_flip_isr(acrtc_attach);
9068
9069                         spin_unlock_irqrestore(&pcrtc->dev->event_lock, flags);
9070                 }
9071
9072                 if (acrtc_state->stream) {
9073                         if (acrtc_state->freesync_vrr_info_changed)
9074                                 bundle->stream_update.vrr_infopacket =
9075                                         &acrtc_state->stream->vrr_infopacket;
9076                 }
9077         }
9078
9079         /* Update the planes if changed or disable if we don't have any. */
9080         if ((planes_count || acrtc_state->active_planes == 0) &&
9081                 acrtc_state->stream) {
9082 #if defined(CONFIG_DRM_AMD_DC_DCN)
9083                 /*
9084                  * If PSR or idle optimizations are enabled then flush out
9085                  * any pending work before hardware programming.
9086                  */
9087                 if (dm->vblank_control_workqueue)
9088                         flush_workqueue(dm->vblank_control_workqueue);
9089 #endif
9090
9091                 bundle->stream_update.stream = acrtc_state->stream;
9092                 if (new_pcrtc_state->mode_changed) {
9093                         bundle->stream_update.src = acrtc_state->stream->src;
9094                         bundle->stream_update.dst = acrtc_state->stream->dst;
9095                 }
9096
9097                 if (new_pcrtc_state->color_mgmt_changed) {
9098                         /*
9099                          * TODO: This isn't fully correct since we've actually
9100                          * already modified the stream in place.
9101                          */
9102                         bundle->stream_update.gamut_remap =
9103                                 &acrtc_state->stream->gamut_remap_matrix;
9104                         bundle->stream_update.output_csc_transform =
9105                                 &acrtc_state->stream->csc_color_matrix;
9106                         bundle->stream_update.out_transfer_func =
9107                                 acrtc_state->stream->out_transfer_func;
9108                 }
9109
9110                 acrtc_state->stream->abm_level = acrtc_state->abm_level;
9111                 if (acrtc_state->abm_level != dm_old_crtc_state->abm_level)
9112                         bundle->stream_update.abm_level = &acrtc_state->abm_level;
9113
9114                 /*
9115                  * If FreeSync state on the stream has changed then we need to
9116                  * re-adjust the min/max bounds now that DC doesn't handle this
9117                  * as part of commit.
9118                  */
9119                 if (is_dc_timing_adjust_needed(dm_old_crtc_state, acrtc_state)) {
9120                         spin_lock_irqsave(&pcrtc->dev->event_lock, flags);
9121                         dc_stream_adjust_vmin_vmax(
9122                                 dm->dc, acrtc_state->stream,
9123                                 &acrtc_attach->dm_irq_params.vrr_params.adjust);
9124                         spin_unlock_irqrestore(&pcrtc->dev->event_lock, flags);
9125                 }
9126                 mutex_lock(&dm->dc_lock);
9127                 if ((acrtc_state->update_type > UPDATE_TYPE_FAST) &&
9128                                 acrtc_state->stream->link->psr_settings.psr_allow_active)
9129                         amdgpu_dm_psr_disable(acrtc_state->stream);
9130
9131                 dc_commit_updates_for_stream(dm->dc,
9132                                                      bundle->surface_updates,
9133                                                      planes_count,
9134                                                      acrtc_state->stream,
9135                                                      &bundle->stream_update,
9136                                                      dc_state);
9137
9138                 /**
9139                  * Enable or disable the interrupts on the backend.
9140                  *
9141                  * Most pipes are put into power gating when unused.
9142                  *
9143                  * When power gating is enabled on a pipe we lose the
9144                  * interrupt enablement state when power gating is disabled.
9145                  *
9146                  * So we need to update the IRQ control state in hardware
9147                  * whenever the pipe turns on (since it could be previously
9148                  * power gated) or off (since some pipes can't be power gated
9149                  * on some ASICs).
9150                  */
9151                 if (dm_old_crtc_state->active_planes != acrtc_state->active_planes)
9152                         dm_update_pflip_irq_state(drm_to_adev(dev),
9153                                                   acrtc_attach);
9154
9155                 if ((acrtc_state->update_type > UPDATE_TYPE_FAST) &&
9156                                 acrtc_state->stream->link->psr_settings.psr_version != DC_PSR_VERSION_UNSUPPORTED &&
9157                                 !acrtc_state->stream->link->psr_settings.psr_feature_enabled)
9158                         amdgpu_dm_link_setup_psr(acrtc_state->stream);
9159
9160                 /* Decrement skip count when PSR is enabled and we're doing fast updates. */
9161                 if (acrtc_state->update_type == UPDATE_TYPE_FAST &&
9162                     acrtc_state->stream->link->psr_settings.psr_feature_enabled) {
9163                         struct amdgpu_dm_connector *aconn =
9164                                 (struct amdgpu_dm_connector *)acrtc_state->stream->dm_stream_context;
9165
9166                         if (aconn->psr_skip_count > 0)
9167                                 aconn->psr_skip_count--;
9168
9169                         /* Allow PSR when skip count is 0. */
9170                         acrtc_attach->dm_irq_params.allow_psr_entry = !aconn->psr_skip_count;
9171                 } else {
9172                         acrtc_attach->dm_irq_params.allow_psr_entry = false;
9173                 }
9174
9175                 mutex_unlock(&dm->dc_lock);
9176         }
9177
9178         /*
9179          * Update cursor state *after* programming all the planes.
9180          * This avoids redundant programming in the case where we're going
9181          * to be disabling a single plane - those pipes are being disabled.
9182          */
9183         if (acrtc_state->active_planes)
9184                 amdgpu_dm_commit_cursors(state);
9185
9186 cleanup:
9187         kfree(bundle);
9188 }
9189
9190 static void amdgpu_dm_commit_audio(struct drm_device *dev,
9191                                    struct drm_atomic_state *state)
9192 {
9193         struct amdgpu_device *adev = drm_to_adev(dev);
9194         struct amdgpu_dm_connector *aconnector;
9195         struct drm_connector *connector;
9196         struct drm_connector_state *old_con_state, *new_con_state;
9197         struct drm_crtc_state *new_crtc_state;
9198         struct dm_crtc_state *new_dm_crtc_state;
9199         const struct dc_stream_status *status;
9200         int i, inst;
9201
9202         /* Notify device removals. */
9203         for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
9204                 if (old_con_state->crtc != new_con_state->crtc) {
9205                         /* CRTC changes require notification. */
9206                         goto notify;
9207                 }
9208
9209                 if (!new_con_state->crtc)
9210                         continue;
9211
9212                 new_crtc_state = drm_atomic_get_new_crtc_state(
9213                         state, new_con_state->crtc);
9214
9215                 if (!new_crtc_state)
9216                         continue;
9217
9218                 if (!drm_atomic_crtc_needs_modeset(new_crtc_state))
9219                         continue;
9220
9221         notify:
9222                 aconnector = to_amdgpu_dm_connector(connector);
9223
9224                 mutex_lock(&adev->dm.audio_lock);
9225                 inst = aconnector->audio_inst;
9226                 aconnector->audio_inst = -1;
9227                 mutex_unlock(&adev->dm.audio_lock);
9228
9229                 amdgpu_dm_audio_eld_notify(adev, inst);
9230         }
9231
9232         /* Notify audio device additions. */
9233         for_each_new_connector_in_state(state, connector, new_con_state, i) {
9234                 if (!new_con_state->crtc)
9235                         continue;
9236
9237                 new_crtc_state = drm_atomic_get_new_crtc_state(
9238                         state, new_con_state->crtc);
9239
9240                 if (!new_crtc_state)
9241                         continue;
9242
9243                 if (!drm_atomic_crtc_needs_modeset(new_crtc_state))
9244                         continue;
9245
9246                 new_dm_crtc_state = to_dm_crtc_state(new_crtc_state);
9247                 if (!new_dm_crtc_state->stream)
9248                         continue;
9249
9250                 status = dc_stream_get_status(new_dm_crtc_state->stream);
9251                 if (!status)
9252                         continue;
9253
9254                 aconnector = to_amdgpu_dm_connector(connector);
9255
9256                 mutex_lock(&adev->dm.audio_lock);
9257                 inst = status->audio_inst;
9258                 aconnector->audio_inst = inst;
9259                 mutex_unlock(&adev->dm.audio_lock);
9260
9261                 amdgpu_dm_audio_eld_notify(adev, inst);
9262         }
9263 }
9264
9265 /*
9266  * amdgpu_dm_crtc_copy_transient_flags - copy mirrored flags from DRM to DC
9267  * @crtc_state: the DRM CRTC state
9268  * @stream_state: the DC stream state.
9269  *
9270  * Copy the mirrored transient state flags from DRM, to DC. It is used to bring
9271  * a dc_stream_state's flags in sync with a drm_crtc_state's flags.
9272  */
9273 static void amdgpu_dm_crtc_copy_transient_flags(struct drm_crtc_state *crtc_state,
9274                                                 struct dc_stream_state *stream_state)
9275 {
9276         stream_state->mode_changed = drm_atomic_crtc_needs_modeset(crtc_state);
9277 }
9278
9279 /**
9280  * amdgpu_dm_atomic_commit_tail() - AMDgpu DM's commit tail implementation.
9281  * @state: The atomic state to commit
9282  *
9283  * This will tell DC to commit the constructed DC state from atomic_check,
9284  * programming the hardware. Any failures here implies a hardware failure, since
9285  * atomic check should have filtered anything non-kosher.
9286  */
9287 static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state)
9288 {
9289         struct drm_device *dev = state->dev;
9290         struct amdgpu_device *adev = drm_to_adev(dev);
9291         struct amdgpu_display_manager *dm = &adev->dm;
9292         struct dm_atomic_state *dm_state;
9293         struct dc_state *dc_state = NULL, *dc_state_temp = NULL;
9294         uint32_t i, j;
9295         struct drm_crtc *crtc;
9296         struct drm_crtc_state *old_crtc_state, *new_crtc_state;
9297         unsigned long flags;
9298         bool wait_for_vblank = true;
9299         struct drm_connector *connector;
9300         struct drm_connector_state *old_con_state, *new_con_state;
9301         struct dm_crtc_state *dm_old_crtc_state, *dm_new_crtc_state;
9302         int crtc_disable_count = 0;
9303         bool mode_set_reset_required = false;
9304
9305         trace_amdgpu_dm_atomic_commit_tail_begin(state);
9306
9307         drm_atomic_helper_update_legacy_modeset_state(dev, state);
9308
9309         dm_state = dm_atomic_get_new_state(state);
9310         if (dm_state && dm_state->context) {
9311                 dc_state = dm_state->context;
9312         } else {
9313                 /* No state changes, retain current state. */
9314                 dc_state_temp = dc_create_state(dm->dc);
9315                 ASSERT(dc_state_temp);
9316                 dc_state = dc_state_temp;
9317                 dc_resource_state_copy_construct_current(dm->dc, dc_state);
9318         }
9319
9320         for_each_oldnew_crtc_in_state (state, crtc, old_crtc_state,
9321                                        new_crtc_state, i) {
9322                 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
9323
9324                 dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
9325
9326                 if (old_crtc_state->active &&
9327                     (!new_crtc_state->active ||
9328                      drm_atomic_crtc_needs_modeset(new_crtc_state))) {
9329                         manage_dm_interrupts(adev, acrtc, false);
9330                         dc_stream_release(dm_old_crtc_state->stream);
9331                 }
9332         }
9333
9334         drm_atomic_helper_calc_timestamping_constants(state);
9335
9336         /* update changed items */
9337         for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
9338                 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
9339
9340                 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
9341                 dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
9342
9343                 DRM_DEBUG_ATOMIC(
9344                         "amdgpu_crtc id:%d crtc_state_flags: enable:%d, active:%d, "
9345                         "planes_changed:%d, mode_changed:%d,active_changed:%d,"
9346                         "connectors_changed:%d\n",
9347                         acrtc->crtc_id,
9348                         new_crtc_state->enable,
9349                         new_crtc_state->active,
9350                         new_crtc_state->planes_changed,
9351                         new_crtc_state->mode_changed,
9352                         new_crtc_state->active_changed,
9353                         new_crtc_state->connectors_changed);
9354
9355                 /* Disable cursor if disabling crtc */
9356                 if (old_crtc_state->active && !new_crtc_state->active) {
9357                         struct dc_cursor_position position;
9358
9359                         memset(&position, 0, sizeof(position));
9360                         mutex_lock(&dm->dc_lock);
9361                         dc_stream_set_cursor_position(dm_old_crtc_state->stream, &position);
9362                         mutex_unlock(&dm->dc_lock);
9363                 }
9364
9365                 /* Copy all transient state flags into dc state */
9366                 if (dm_new_crtc_state->stream) {
9367                         amdgpu_dm_crtc_copy_transient_flags(&dm_new_crtc_state->base,
9368                                                             dm_new_crtc_state->stream);
9369                 }
9370
9371                 /* handles headless hotplug case, updating new_state and
9372                  * aconnector as needed
9373                  */
9374
9375                 if (modeset_required(new_crtc_state, dm_new_crtc_state->stream, dm_old_crtc_state->stream)) {
9376
9377                         DRM_DEBUG_ATOMIC("Atomic commit: SET crtc id %d: [%p]\n", acrtc->crtc_id, acrtc);
9378
9379                         if (!dm_new_crtc_state->stream) {
9380                                 /*
9381                                  * this could happen because of issues with
9382                                  * userspace notifications delivery.
9383                                  * In this case userspace tries to set mode on
9384                                  * display which is disconnected in fact.
9385                                  * dc_sink is NULL in this case on aconnector.
9386                                  * We expect reset mode will come soon.
9387                                  *
9388                                  * This can also happen when unplug is done
9389                                  * during resume sequence ended
9390                                  *
9391                                  * In this case, we want to pretend we still
9392                                  * have a sink to keep the pipe running so that
9393                                  * hw state is consistent with the sw state
9394                                  */
9395                                 DRM_DEBUG_DRIVER("%s: Failed to create new stream for crtc %d\n",
9396                                                 __func__, acrtc->base.base.id);
9397                                 continue;
9398                         }
9399
9400                         if (dm_old_crtc_state->stream)
9401                                 remove_stream(adev, acrtc, dm_old_crtc_state->stream);
9402
9403                         pm_runtime_get_noresume(dev->dev);
9404
9405                         acrtc->enabled = true;
9406                         acrtc->hw_mode = new_crtc_state->mode;
9407                         crtc->hwmode = new_crtc_state->mode;
9408                         mode_set_reset_required = true;
9409                 } else if (modereset_required(new_crtc_state)) {
9410                         DRM_DEBUG_ATOMIC("Atomic commit: RESET. crtc id %d:[%p]\n", acrtc->crtc_id, acrtc);
9411                         /* i.e. reset mode */
9412                         if (dm_old_crtc_state->stream)
9413                                 remove_stream(adev, acrtc, dm_old_crtc_state->stream);
9414
9415                         mode_set_reset_required = true;
9416                 }
9417         } /* for_each_crtc_in_state() */
9418
9419         if (dc_state) {
9420                 /* if there mode set or reset, disable eDP PSR */
9421                 if (mode_set_reset_required) {
9422 #if defined(CONFIG_DRM_AMD_DC_DCN)
9423                         if (dm->vblank_control_workqueue)
9424                                 flush_workqueue(dm->vblank_control_workqueue);
9425 #endif
9426                         amdgpu_dm_psr_disable_all(dm);
9427                 }
9428
9429                 dm_enable_per_frame_crtc_master_sync(dc_state);
9430                 mutex_lock(&dm->dc_lock);
9431                 WARN_ON(!dc_commit_state(dm->dc, dc_state));
9432 #if defined(CONFIG_DRM_AMD_DC_DCN)
9433                /* Allow idle optimization when vblank count is 0 for display off */
9434                if (dm->active_vblank_irq_count == 0)
9435                    dc_allow_idle_optimizations(dm->dc,true);
9436 #endif
9437                 mutex_unlock(&dm->dc_lock);
9438         }
9439
9440         for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) {
9441                 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
9442
9443                 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
9444
9445                 if (dm_new_crtc_state->stream != NULL) {
9446                         const struct dc_stream_status *status =
9447                                         dc_stream_get_status(dm_new_crtc_state->stream);
9448
9449                         if (!status)
9450                                 status = dc_stream_get_status_from_state(dc_state,
9451                                                                          dm_new_crtc_state->stream);
9452                         if (!status)
9453                                 DC_ERR("got no status for stream %p on acrtc%p\n", dm_new_crtc_state->stream, acrtc);
9454                         else
9455                                 acrtc->otg_inst = status->primary_otg_inst;
9456                 }
9457         }
9458 #ifdef CONFIG_DRM_AMD_DC_HDCP
9459         for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
9460                 struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state);
9461                 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(dm_new_con_state->base.crtc);
9462                 struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
9463
9464                 new_crtc_state = NULL;
9465
9466                 if (acrtc)
9467                         new_crtc_state = drm_atomic_get_new_crtc_state(state, &acrtc->base);
9468
9469                 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
9470
9471                 if (dm_new_crtc_state && dm_new_crtc_state->stream == NULL &&
9472                     connector->state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED) {
9473                         hdcp_reset_display(adev->dm.hdcp_workqueue, aconnector->dc_link->link_index);
9474                         new_con_state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED;
9475                         dm_new_con_state->update_hdcp = true;
9476                         continue;
9477                 }
9478
9479                 if (is_content_protection_different(new_con_state, old_con_state, connector, adev->dm.hdcp_workqueue))
9480                         hdcp_update_display(
9481                                 adev->dm.hdcp_workqueue, aconnector->dc_link->link_index, aconnector,
9482                                 new_con_state->hdcp_content_type,
9483                                 new_con_state->content_protection == DRM_MODE_CONTENT_PROTECTION_DESIRED);
9484         }
9485 #endif
9486
9487         /* Handle connector state changes */
9488         for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
9489                 struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state);
9490                 struct dm_connector_state *dm_old_con_state = to_dm_connector_state(old_con_state);
9491                 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(dm_new_con_state->base.crtc);
9492                 struct dc_surface_update dummy_updates[MAX_SURFACES];
9493                 struct dc_stream_update stream_update;
9494                 struct dc_info_packet hdr_packet;
9495                 struct dc_stream_status *status = NULL;
9496                 bool abm_changed, hdr_changed, scaling_changed;
9497
9498                 memset(&dummy_updates, 0, sizeof(dummy_updates));
9499                 memset(&stream_update, 0, sizeof(stream_update));
9500
9501                 if (acrtc) {
9502                         new_crtc_state = drm_atomic_get_new_crtc_state(state, &acrtc->base);
9503                         old_crtc_state = drm_atomic_get_old_crtc_state(state, &acrtc->base);
9504                 }
9505
9506                 /* Skip any modesets/resets */
9507                 if (!acrtc || drm_atomic_crtc_needs_modeset(new_crtc_state))
9508                         continue;
9509
9510                 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
9511                 dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
9512
9513                 scaling_changed = is_scaling_state_different(dm_new_con_state,
9514                                                              dm_old_con_state);
9515
9516                 abm_changed = dm_new_crtc_state->abm_level !=
9517                               dm_old_crtc_state->abm_level;
9518
9519                 hdr_changed =
9520                         !drm_connector_atomic_hdr_metadata_equal(old_con_state, new_con_state);
9521
9522                 if (!scaling_changed && !abm_changed && !hdr_changed)
9523                         continue;
9524
9525                 stream_update.stream = dm_new_crtc_state->stream;
9526                 if (scaling_changed) {
9527                         update_stream_scaling_settings(&dm_new_con_state->base.crtc->mode,
9528                                         dm_new_con_state, dm_new_crtc_state->stream);
9529
9530                         stream_update.src = dm_new_crtc_state->stream->src;
9531                         stream_update.dst = dm_new_crtc_state->stream->dst;
9532                 }
9533
9534                 if (abm_changed) {
9535                         dm_new_crtc_state->stream->abm_level = dm_new_crtc_state->abm_level;
9536
9537                         stream_update.abm_level = &dm_new_crtc_state->abm_level;
9538                 }
9539
9540                 if (hdr_changed) {
9541                         fill_hdr_info_packet(new_con_state, &hdr_packet);
9542                         stream_update.hdr_static_metadata = &hdr_packet;
9543                 }
9544
9545                 status = dc_stream_get_status(dm_new_crtc_state->stream);
9546
9547                 if (WARN_ON(!status))
9548                         continue;
9549
9550                 WARN_ON(!status->plane_count);
9551
9552                 /*
9553                  * TODO: DC refuses to perform stream updates without a dc_surface_update.
9554                  * Here we create an empty update on each plane.
9555                  * To fix this, DC should permit updating only stream properties.
9556                  */
9557                 for (j = 0; j < status->plane_count; j++)
9558                         dummy_updates[j].surface = status->plane_states[0];
9559
9560
9561                 mutex_lock(&dm->dc_lock);
9562                 dc_commit_updates_for_stream(dm->dc,
9563                                                      dummy_updates,
9564                                                      status->plane_count,
9565                                                      dm_new_crtc_state->stream,
9566                                                      &stream_update,
9567                                                      dc_state);
9568                 mutex_unlock(&dm->dc_lock);
9569         }
9570
9571         /* Count number of newly disabled CRTCs for dropping PM refs later. */
9572         for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state,
9573                                       new_crtc_state, i) {
9574                 if (old_crtc_state->active && !new_crtc_state->active)
9575                         crtc_disable_count++;
9576
9577                 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
9578                 dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
9579
9580                 /* For freesync config update on crtc state and params for irq */
9581                 update_stream_irq_parameters(dm, dm_new_crtc_state);
9582
9583                 /* Handle vrr on->off / off->on transitions */
9584                 amdgpu_dm_handle_vrr_transition(dm_old_crtc_state,
9585                                                 dm_new_crtc_state);
9586         }
9587
9588         /**
9589          * Enable interrupts for CRTCs that are newly enabled or went through
9590          * a modeset. It was intentionally deferred until after the front end
9591          * state was modified to wait until the OTG was on and so the IRQ
9592          * handlers didn't access stale or invalid state.
9593          */
9594         for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
9595                 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
9596 #ifdef CONFIG_DEBUG_FS
9597                 bool configure_crc = false;
9598                 enum amdgpu_dm_pipe_crc_source cur_crc_src;
9599 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
9600                 struct crc_rd_work *crc_rd_wrk = dm->crc_rd_wrk;
9601 #endif
9602                 spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
9603                 cur_crc_src = acrtc->dm_irq_params.crc_src;
9604                 spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
9605 #endif
9606                 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
9607
9608                 if (new_crtc_state->active &&
9609                     (!old_crtc_state->active ||
9610                      drm_atomic_crtc_needs_modeset(new_crtc_state))) {
9611                         dc_stream_retain(dm_new_crtc_state->stream);
9612                         acrtc->dm_irq_params.stream = dm_new_crtc_state->stream;
9613                         manage_dm_interrupts(adev, acrtc, true);
9614
9615 #ifdef CONFIG_DEBUG_FS
9616                         /**
9617                          * Frontend may have changed so reapply the CRC capture
9618                          * settings for the stream.
9619                          */
9620                         dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
9621
9622                         if (amdgpu_dm_is_valid_crc_source(cur_crc_src)) {
9623                                 configure_crc = true;
9624 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
9625                                 if (amdgpu_dm_crc_window_is_activated(crtc)) {
9626                                         spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
9627                                         acrtc->dm_irq_params.crc_window.update_win = true;
9628                                         acrtc->dm_irq_params.crc_window.skip_frame_cnt = 2;
9629                                         spin_lock_irq(&crc_rd_wrk->crc_rd_work_lock);
9630                                         crc_rd_wrk->crtc = crtc;
9631                                         spin_unlock_irq(&crc_rd_wrk->crc_rd_work_lock);
9632                                         spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
9633                                 }
9634 #endif
9635                         }
9636
9637                         if (configure_crc)
9638                                 if (amdgpu_dm_crtc_configure_crc_source(
9639                                         crtc, dm_new_crtc_state, cur_crc_src))
9640                                         DRM_DEBUG_DRIVER("Failed to configure crc source");
9641 #endif
9642                 }
9643         }
9644
9645         for_each_new_crtc_in_state(state, crtc, new_crtc_state, j)
9646                 if (new_crtc_state->async_flip)
9647                         wait_for_vblank = false;
9648
9649         /* update planes when needed per crtc*/
9650         for_each_new_crtc_in_state(state, crtc, new_crtc_state, j) {
9651                 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
9652
9653                 if (dm_new_crtc_state->stream)
9654                         amdgpu_dm_commit_planes(state, dc_state, dev,
9655                                                 dm, crtc, wait_for_vblank);
9656         }
9657
9658         /* Update audio instances for each connector. */
9659         amdgpu_dm_commit_audio(dev, state);
9660
9661 #if defined(CONFIG_BACKLIGHT_CLASS_DEVICE) ||           \
9662         defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE)
9663         /* restore the backlight level */
9664         for (i = 0; i < dm->num_of_edps; i++) {
9665                 if (dm->backlight_dev[i] &&
9666                     (amdgpu_dm_backlight_get_level(dm, i) != dm->brightness[i]))
9667                         amdgpu_dm_backlight_set_level(dm, i, dm->brightness[i]);
9668         }
9669 #endif
9670         /*
9671          * send vblank event on all events not handled in flip and
9672          * mark consumed event for drm_atomic_helper_commit_hw_done
9673          */
9674         spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
9675         for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) {
9676
9677                 if (new_crtc_state->event)
9678                         drm_send_event_locked(dev, &new_crtc_state->event->base);
9679
9680                 new_crtc_state->event = NULL;
9681         }
9682         spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
9683
9684         /* Signal HW programming completion */
9685         drm_atomic_helper_commit_hw_done(state);
9686
9687         if (wait_for_vblank)
9688                 drm_atomic_helper_wait_for_flip_done(dev, state);
9689
9690         drm_atomic_helper_cleanup_planes(dev, state);
9691
9692         /* return the stolen vga memory back to VRAM */
9693         if (!adev->mman.keep_stolen_vga_memory)
9694                 amdgpu_bo_free_kernel(&adev->mman.stolen_vga_memory, NULL, NULL);
9695         amdgpu_bo_free_kernel(&adev->mman.stolen_extended_memory, NULL, NULL);
9696
9697         /*
9698          * Finally, drop a runtime PM reference for each newly disabled CRTC,
9699          * so we can put the GPU into runtime suspend if we're not driving any
9700          * displays anymore
9701          */
9702         for (i = 0; i < crtc_disable_count; i++)
9703                 pm_runtime_put_autosuspend(dev->dev);
9704         pm_runtime_mark_last_busy(dev->dev);
9705
9706         if (dc_state_temp)
9707                 dc_release_state(dc_state_temp);
9708 }
9709
9710
9711 static int dm_force_atomic_commit(struct drm_connector *connector)
9712 {
9713         int ret = 0;
9714         struct drm_device *ddev = connector->dev;
9715         struct drm_atomic_state *state = drm_atomic_state_alloc(ddev);
9716         struct amdgpu_crtc *disconnected_acrtc = to_amdgpu_crtc(connector->encoder->crtc);
9717         struct drm_plane *plane = disconnected_acrtc->base.primary;
9718         struct drm_connector_state *conn_state;
9719         struct drm_crtc_state *crtc_state;
9720         struct drm_plane_state *plane_state;
9721
9722         if (!state)
9723                 return -ENOMEM;
9724
9725         state->acquire_ctx = ddev->mode_config.acquire_ctx;
9726
9727         /* Construct an atomic state to restore previous display setting */
9728
9729         /*
9730          * Attach connectors to drm_atomic_state
9731          */
9732         conn_state = drm_atomic_get_connector_state(state, connector);
9733
9734         ret = PTR_ERR_OR_ZERO(conn_state);
9735         if (ret)
9736                 goto out;
9737
9738         /* Attach crtc to drm_atomic_state*/
9739         crtc_state = drm_atomic_get_crtc_state(state, &disconnected_acrtc->base);
9740
9741         ret = PTR_ERR_OR_ZERO(crtc_state);
9742         if (ret)
9743                 goto out;
9744
9745         /* force a restore */
9746         crtc_state->mode_changed = true;
9747
9748         /* Attach plane to drm_atomic_state */
9749         plane_state = drm_atomic_get_plane_state(state, plane);
9750
9751         ret = PTR_ERR_OR_ZERO(plane_state);
9752         if (ret)
9753                 goto out;
9754
9755         /* Call commit internally with the state we just constructed */
9756         ret = drm_atomic_commit(state);
9757
9758 out:
9759         drm_atomic_state_put(state);
9760         if (ret)
9761                 DRM_ERROR("Restoring old state failed with %i\n", ret);
9762
9763         return ret;
9764 }
9765
9766 /*
9767  * This function handles all cases when set mode does not come upon hotplug.
9768  * This includes when a display is unplugged then plugged back into the
9769  * same port and when running without usermode desktop manager supprot
9770  */
9771 void dm_restore_drm_connector_state(struct drm_device *dev,
9772                                     struct drm_connector *connector)
9773 {
9774         struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
9775         struct amdgpu_crtc *disconnected_acrtc;
9776         struct dm_crtc_state *acrtc_state;
9777
9778         if (!aconnector->dc_sink || !connector->state || !connector->encoder)
9779                 return;
9780
9781         disconnected_acrtc = to_amdgpu_crtc(connector->encoder->crtc);
9782         if (!disconnected_acrtc)
9783                 return;
9784
9785         acrtc_state = to_dm_crtc_state(disconnected_acrtc->base.state);
9786         if (!acrtc_state->stream)
9787                 return;
9788
9789         /*
9790          * If the previous sink is not released and different from the current,
9791          * we deduce we are in a state where we can not rely on usermode call
9792          * to turn on the display, so we do it here
9793          */
9794         if (acrtc_state->stream->sink != aconnector->dc_sink)
9795                 dm_force_atomic_commit(&aconnector->base);
9796 }
9797
9798 /*
9799  * Grabs all modesetting locks to serialize against any blocking commits,
9800  * Waits for completion of all non blocking commits.
9801  */
9802 static int do_aquire_global_lock(struct drm_device *dev,
9803                                  struct drm_atomic_state *state)
9804 {
9805         struct drm_crtc *crtc;
9806         struct drm_crtc_commit *commit;
9807         long ret;
9808
9809         /*
9810          * Adding all modeset locks to aquire_ctx will
9811          * ensure that when the framework release it the
9812          * extra locks we are locking here will get released to
9813          */
9814         ret = drm_modeset_lock_all_ctx(dev, state->acquire_ctx);
9815         if (ret)
9816                 return ret;
9817
9818         list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
9819                 spin_lock(&crtc->commit_lock);
9820                 commit = list_first_entry_or_null(&crtc->commit_list,
9821                                 struct drm_crtc_commit, commit_entry);
9822                 if (commit)
9823                         drm_crtc_commit_get(commit);
9824                 spin_unlock(&crtc->commit_lock);
9825
9826                 if (!commit)
9827                         continue;
9828
9829                 /*
9830                  * Make sure all pending HW programming completed and
9831                  * page flips done
9832                  */
9833                 ret = wait_for_completion_interruptible_timeout(&commit->hw_done, 10*HZ);
9834
9835                 if (ret > 0)
9836                         ret = wait_for_completion_interruptible_timeout(
9837                                         &commit->flip_done, 10*HZ);
9838
9839                 if (ret == 0)
9840                         DRM_ERROR("[CRTC:%d:%s] hw_done or flip_done "
9841                                   "timed out\n", crtc->base.id, crtc->name);
9842
9843                 drm_crtc_commit_put(commit);
9844         }
9845
9846         return ret < 0 ? ret : 0;
9847 }
9848
9849 static void get_freesync_config_for_crtc(
9850         struct dm_crtc_state *new_crtc_state,
9851         struct dm_connector_state *new_con_state)
9852 {
9853         struct mod_freesync_config config = {0};
9854         struct amdgpu_dm_connector *aconnector =
9855                         to_amdgpu_dm_connector(new_con_state->base.connector);
9856         struct drm_display_mode *mode = &new_crtc_state->base.mode;
9857         int vrefresh = drm_mode_vrefresh(mode);
9858         bool fs_vid_mode = false;
9859
9860         new_crtc_state->vrr_supported = new_con_state->freesync_capable &&
9861                                         vrefresh >= aconnector->min_vfreq &&
9862                                         vrefresh <= aconnector->max_vfreq;
9863
9864         if (new_crtc_state->vrr_supported) {
9865                 new_crtc_state->stream->ignore_msa_timing_param = true;
9866                 fs_vid_mode = new_crtc_state->freesync_config.state == VRR_STATE_ACTIVE_FIXED;
9867
9868                 config.min_refresh_in_uhz = aconnector->min_vfreq * 1000000;
9869                 config.max_refresh_in_uhz = aconnector->max_vfreq * 1000000;
9870                 config.vsif_supported = true;
9871                 config.btr = true;
9872
9873                 if (fs_vid_mode) {
9874                         config.state = VRR_STATE_ACTIVE_FIXED;
9875                         config.fixed_refresh_in_uhz = new_crtc_state->freesync_config.fixed_refresh_in_uhz;
9876                         goto out;
9877                 } else if (new_crtc_state->base.vrr_enabled) {
9878                         config.state = VRR_STATE_ACTIVE_VARIABLE;
9879                 } else {
9880                         config.state = VRR_STATE_INACTIVE;
9881                 }
9882         }
9883 out:
9884         new_crtc_state->freesync_config = config;
9885 }
9886
9887 static void reset_freesync_config_for_crtc(
9888         struct dm_crtc_state *new_crtc_state)
9889 {
9890         new_crtc_state->vrr_supported = false;
9891
9892         memset(&new_crtc_state->vrr_infopacket, 0,
9893                sizeof(new_crtc_state->vrr_infopacket));
9894 }
9895
9896 static bool
9897 is_timing_unchanged_for_freesync(struct drm_crtc_state *old_crtc_state,
9898                                  struct drm_crtc_state *new_crtc_state)
9899 {
9900         struct drm_display_mode old_mode, new_mode;
9901
9902         if (!old_crtc_state || !new_crtc_state)
9903                 return false;
9904
9905         old_mode = old_crtc_state->mode;
9906         new_mode = new_crtc_state->mode;
9907
9908         if (old_mode.clock       == new_mode.clock &&
9909             old_mode.hdisplay    == new_mode.hdisplay &&
9910             old_mode.vdisplay    == new_mode.vdisplay &&
9911             old_mode.htotal      == new_mode.htotal &&
9912             old_mode.vtotal      != new_mode.vtotal &&
9913             old_mode.hsync_start == new_mode.hsync_start &&
9914             old_mode.vsync_start != new_mode.vsync_start &&
9915             old_mode.hsync_end   == new_mode.hsync_end &&
9916             old_mode.vsync_end   != new_mode.vsync_end &&
9917             old_mode.hskew       == new_mode.hskew &&
9918             old_mode.vscan       == new_mode.vscan &&
9919             (old_mode.vsync_end - old_mode.vsync_start) ==
9920             (new_mode.vsync_end - new_mode.vsync_start))
9921                 return true;
9922
9923         return false;
9924 }
9925
9926 static void set_freesync_fixed_config(struct dm_crtc_state *dm_new_crtc_state) {
9927         uint64_t num, den, res;
9928         struct drm_crtc_state *new_crtc_state = &dm_new_crtc_state->base;
9929
9930         dm_new_crtc_state->freesync_config.state = VRR_STATE_ACTIVE_FIXED;
9931
9932         num = (unsigned long long)new_crtc_state->mode.clock * 1000 * 1000000;
9933         den = (unsigned long long)new_crtc_state->mode.htotal *
9934               (unsigned long long)new_crtc_state->mode.vtotal;
9935
9936         res = div_u64(num, den);
9937         dm_new_crtc_state->freesync_config.fixed_refresh_in_uhz = res;
9938 }
9939
9940 static int dm_update_crtc_state(struct amdgpu_display_manager *dm,
9941                                 struct drm_atomic_state *state,
9942                                 struct drm_crtc *crtc,
9943                                 struct drm_crtc_state *old_crtc_state,
9944                                 struct drm_crtc_state *new_crtc_state,
9945                                 bool enable,
9946                                 bool *lock_and_validation_needed)
9947 {
9948         struct dm_atomic_state *dm_state = NULL;
9949         struct dm_crtc_state *dm_old_crtc_state, *dm_new_crtc_state;
9950         struct dc_stream_state *new_stream;
9951         int ret = 0;
9952
9953         /*
9954          * TODO Move this code into dm_crtc_atomic_check once we get rid of dc_validation_set
9955          * update changed items
9956          */
9957         struct amdgpu_crtc *acrtc = NULL;
9958         struct amdgpu_dm_connector *aconnector = NULL;
9959         struct drm_connector_state *drm_new_conn_state = NULL, *drm_old_conn_state = NULL;
9960         struct dm_connector_state *dm_new_conn_state = NULL, *dm_old_conn_state = NULL;
9961
9962         new_stream = NULL;
9963
9964         dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
9965         dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
9966         acrtc = to_amdgpu_crtc(crtc);
9967         aconnector = amdgpu_dm_find_first_crtc_matching_connector(state, crtc);
9968
9969         /* TODO This hack should go away */
9970         if (aconnector && enable) {
9971                 /* Make sure fake sink is created in plug-in scenario */
9972                 drm_new_conn_state = drm_atomic_get_new_connector_state(state,
9973                                                             &aconnector->base);
9974                 drm_old_conn_state = drm_atomic_get_old_connector_state(state,
9975                                                             &aconnector->base);
9976
9977                 if (IS_ERR(drm_new_conn_state)) {
9978                         ret = PTR_ERR_OR_ZERO(drm_new_conn_state);
9979                         goto fail;
9980                 }
9981
9982                 dm_new_conn_state = to_dm_connector_state(drm_new_conn_state);
9983                 dm_old_conn_state = to_dm_connector_state(drm_old_conn_state);
9984
9985                 if (!drm_atomic_crtc_needs_modeset(new_crtc_state))
9986                         goto skip_modeset;
9987
9988                 new_stream = create_validate_stream_for_sink(aconnector,
9989                                                              &new_crtc_state->mode,
9990                                                              dm_new_conn_state,
9991                                                              dm_old_crtc_state->stream);
9992
9993                 /*
9994                  * we can have no stream on ACTION_SET if a display
9995                  * was disconnected during S3, in this case it is not an
9996                  * error, the OS will be updated after detection, and
9997                  * will do the right thing on next atomic commit
9998                  */
9999
10000                 if (!new_stream) {
10001                         DRM_DEBUG_DRIVER("%s: Failed to create new stream for crtc %d\n",
10002                                         __func__, acrtc->base.base.id);
10003                         ret = -ENOMEM;
10004                         goto fail;
10005                 }
10006
10007                 /*
10008                  * TODO: Check VSDB bits to decide whether this should
10009                  * be enabled or not.
10010                  */
10011                 new_stream->triggered_crtc_reset.enabled =
10012                         dm->force_timing_sync;
10013
10014                 dm_new_crtc_state->abm_level = dm_new_conn_state->abm_level;
10015
10016                 ret = fill_hdr_info_packet(drm_new_conn_state,
10017                                            &new_stream->hdr_static_metadata);
10018                 if (ret)
10019                         goto fail;
10020
10021                 /*
10022                  * If we already removed the old stream from the context
10023                  * (and set the new stream to NULL) then we can't reuse
10024                  * the old stream even if the stream and scaling are unchanged.
10025                  * We'll hit the BUG_ON and black screen.
10026                  *
10027                  * TODO: Refactor this function to allow this check to work
10028                  * in all conditions.
10029                  */
10030                 if (amdgpu_freesync_vid_mode &&
10031                     dm_new_crtc_state->stream &&
10032                     is_timing_unchanged_for_freesync(new_crtc_state, old_crtc_state))
10033                         goto skip_modeset;
10034
10035                 if (dm_new_crtc_state->stream &&
10036                     dc_is_stream_unchanged(new_stream, dm_old_crtc_state->stream) &&
10037                     dc_is_stream_scaling_unchanged(new_stream, dm_old_crtc_state->stream)) {
10038                         new_crtc_state->mode_changed = false;
10039                         DRM_DEBUG_DRIVER("Mode change not required, setting mode_changed to %d",
10040                                          new_crtc_state->mode_changed);
10041                 }
10042         }
10043
10044         /* mode_changed flag may get updated above, need to check again */
10045         if (!drm_atomic_crtc_needs_modeset(new_crtc_state))
10046                 goto skip_modeset;
10047
10048         DRM_DEBUG_ATOMIC(
10049                 "amdgpu_crtc id:%d crtc_state_flags: enable:%d, active:%d, "
10050                 "planes_changed:%d, mode_changed:%d,active_changed:%d,"
10051                 "connectors_changed:%d\n",
10052                 acrtc->crtc_id,
10053                 new_crtc_state->enable,
10054                 new_crtc_state->active,
10055                 new_crtc_state->planes_changed,
10056                 new_crtc_state->mode_changed,
10057                 new_crtc_state->active_changed,
10058                 new_crtc_state->connectors_changed);
10059
10060         /* Remove stream for any changed/disabled CRTC */
10061         if (!enable) {
10062
10063                 if (!dm_old_crtc_state->stream)
10064                         goto skip_modeset;
10065
10066                 if (amdgpu_freesync_vid_mode && dm_new_crtc_state->stream &&
10067                     is_timing_unchanged_for_freesync(new_crtc_state,
10068                                                      old_crtc_state)) {
10069                         new_crtc_state->mode_changed = false;
10070                         DRM_DEBUG_DRIVER(
10071                                 "Mode change not required for front porch change, "
10072                                 "setting mode_changed to %d",
10073                                 new_crtc_state->mode_changed);
10074
10075                         set_freesync_fixed_config(dm_new_crtc_state);
10076
10077                         goto skip_modeset;
10078                 } else if (amdgpu_freesync_vid_mode && aconnector &&
10079                            is_freesync_video_mode(&new_crtc_state->mode,
10080                                                   aconnector)) {
10081                         struct drm_display_mode *high_mode;
10082
10083                         high_mode = get_highest_refresh_rate_mode(aconnector, false);
10084                         if (!drm_mode_equal(&new_crtc_state->mode, high_mode)) {
10085                                 set_freesync_fixed_config(dm_new_crtc_state);
10086                         }
10087                 }
10088
10089                 ret = dm_atomic_get_state(state, &dm_state);
10090                 if (ret)
10091                         goto fail;
10092
10093                 DRM_DEBUG_DRIVER("Disabling DRM crtc: %d\n",
10094                                 crtc->base.id);
10095
10096                 /* i.e. reset mode */
10097                 if (dc_remove_stream_from_ctx(
10098                                 dm->dc,
10099                                 dm_state->context,
10100                                 dm_old_crtc_state->stream) != DC_OK) {
10101                         ret = -EINVAL;
10102                         goto fail;
10103                 }
10104
10105                 dc_stream_release(dm_old_crtc_state->stream);
10106                 dm_new_crtc_state->stream = NULL;
10107
10108                 reset_freesync_config_for_crtc(dm_new_crtc_state);
10109
10110                 *lock_and_validation_needed = true;
10111
10112         } else {/* Add stream for any updated/enabled CRTC */
10113                 /*
10114                  * Quick fix to prevent NULL pointer on new_stream when
10115                  * added MST connectors not found in existing crtc_state in the chained mode
10116                  * TODO: need to dig out the root cause of that
10117                  */
10118                 if (!aconnector || (!aconnector->dc_sink && aconnector->mst_port))
10119                         goto skip_modeset;
10120
10121                 if (modereset_required(new_crtc_state))
10122                         goto skip_modeset;
10123
10124                 if (modeset_required(new_crtc_state, new_stream,
10125                                      dm_old_crtc_state->stream)) {
10126
10127                         WARN_ON(dm_new_crtc_state->stream);
10128
10129                         ret = dm_atomic_get_state(state, &dm_state);
10130                         if (ret)
10131                                 goto fail;
10132
10133                         dm_new_crtc_state->stream = new_stream;
10134
10135                         dc_stream_retain(new_stream);
10136
10137                         DRM_DEBUG_ATOMIC("Enabling DRM crtc: %d\n",
10138                                          crtc->base.id);
10139
10140                         if (dc_add_stream_to_ctx(
10141                                         dm->dc,
10142                                         dm_state->context,
10143                                         dm_new_crtc_state->stream) != DC_OK) {
10144                                 ret = -EINVAL;
10145                                 goto fail;
10146                         }
10147
10148                         *lock_and_validation_needed = true;
10149                 }
10150         }
10151
10152 skip_modeset:
10153         /* Release extra reference */
10154         if (new_stream)
10155                  dc_stream_release(new_stream);
10156
10157         /*
10158          * We want to do dc stream updates that do not require a
10159          * full modeset below.
10160          */
10161         if (!(enable && aconnector && new_crtc_state->active))
10162                 return 0;
10163         /*
10164          * Given above conditions, the dc state cannot be NULL because:
10165          * 1. We're in the process of enabling CRTCs (just been added
10166          *    to the dc context, or already is on the context)
10167          * 2. Has a valid connector attached, and
10168          * 3. Is currently active and enabled.
10169          * => The dc stream state currently exists.
10170          */
10171         BUG_ON(dm_new_crtc_state->stream == NULL);
10172
10173         /* Scaling or underscan settings */
10174         if (is_scaling_state_different(dm_old_conn_state, dm_new_conn_state) ||
10175                                 drm_atomic_crtc_needs_modeset(new_crtc_state))
10176                 update_stream_scaling_settings(
10177                         &new_crtc_state->mode, dm_new_conn_state, dm_new_crtc_state->stream);
10178
10179         /* ABM settings */
10180         dm_new_crtc_state->abm_level = dm_new_conn_state->abm_level;
10181
10182         /*
10183          * Color management settings. We also update color properties
10184          * when a modeset is needed, to ensure it gets reprogrammed.
10185          */
10186         if (dm_new_crtc_state->base.color_mgmt_changed ||
10187             drm_atomic_crtc_needs_modeset(new_crtc_state)) {
10188                 ret = amdgpu_dm_update_crtc_color_mgmt(dm_new_crtc_state);
10189                 if (ret)
10190                         goto fail;
10191         }
10192
10193         /* Update Freesync settings. */
10194         get_freesync_config_for_crtc(dm_new_crtc_state,
10195                                      dm_new_conn_state);
10196
10197         return ret;
10198
10199 fail:
10200         if (new_stream)
10201                 dc_stream_release(new_stream);
10202         return ret;
10203 }
10204
10205 static bool should_reset_plane(struct drm_atomic_state *state,
10206                                struct drm_plane *plane,
10207                                struct drm_plane_state *old_plane_state,
10208                                struct drm_plane_state *new_plane_state)
10209 {
10210         struct drm_plane *other;
10211         struct drm_plane_state *old_other_state, *new_other_state;
10212         struct drm_crtc_state *new_crtc_state;
10213         int i;
10214
10215         /*
10216          * TODO: Remove this hack once the checks below are sufficient
10217          * enough to determine when we need to reset all the planes on
10218          * the stream.
10219          */
10220         if (state->allow_modeset)
10221                 return true;
10222
10223         /* Exit early if we know that we're adding or removing the plane. */
10224         if (old_plane_state->crtc != new_plane_state->crtc)
10225                 return true;
10226
10227         /* old crtc == new_crtc == NULL, plane not in context. */
10228         if (!new_plane_state->crtc)
10229                 return false;
10230
10231         new_crtc_state =
10232                 drm_atomic_get_new_crtc_state(state, new_plane_state->crtc);
10233
10234         if (!new_crtc_state)
10235                 return true;
10236
10237         /* CRTC Degamma changes currently require us to recreate planes. */
10238         if (new_crtc_state->color_mgmt_changed)
10239                 return true;
10240
10241         if (drm_atomic_crtc_needs_modeset(new_crtc_state))
10242                 return true;
10243
10244         /*
10245          * If there are any new primary or overlay planes being added or
10246          * removed then the z-order can potentially change. To ensure
10247          * correct z-order and pipe acquisition the current DC architecture
10248          * requires us to remove and recreate all existing planes.
10249          *
10250          * TODO: Come up with a more elegant solution for this.
10251          */
10252         for_each_oldnew_plane_in_state(state, other, old_other_state, new_other_state, i) {
10253                 struct amdgpu_framebuffer *old_afb, *new_afb;
10254                 if (other->type == DRM_PLANE_TYPE_CURSOR)
10255                         continue;
10256
10257                 if (old_other_state->crtc != new_plane_state->crtc &&
10258                     new_other_state->crtc != new_plane_state->crtc)
10259                         continue;
10260
10261                 if (old_other_state->crtc != new_other_state->crtc)
10262                         return true;
10263
10264                 /* Src/dst size and scaling updates. */
10265                 if (old_other_state->src_w != new_other_state->src_w ||
10266                     old_other_state->src_h != new_other_state->src_h ||
10267                     old_other_state->crtc_w != new_other_state->crtc_w ||
10268                     old_other_state->crtc_h != new_other_state->crtc_h)
10269                         return true;
10270
10271                 /* Rotation / mirroring updates. */
10272                 if (old_other_state->rotation != new_other_state->rotation)
10273                         return true;
10274
10275                 /* Blending updates. */
10276                 if (old_other_state->pixel_blend_mode !=
10277                     new_other_state->pixel_blend_mode)
10278                         return true;
10279
10280                 /* Alpha updates. */
10281                 if (old_other_state->alpha != new_other_state->alpha)
10282                         return true;
10283
10284                 /* Colorspace changes. */
10285                 if (old_other_state->color_range != new_other_state->color_range ||
10286                     old_other_state->color_encoding != new_other_state->color_encoding)
10287                         return true;
10288
10289                 /* Framebuffer checks fall at the end. */
10290                 if (!old_other_state->fb || !new_other_state->fb)
10291                         continue;
10292
10293                 /* Pixel format changes can require bandwidth updates. */
10294                 if (old_other_state->fb->format != new_other_state->fb->format)
10295                         return true;
10296
10297                 old_afb = (struct amdgpu_framebuffer *)old_other_state->fb;
10298                 new_afb = (struct amdgpu_framebuffer *)new_other_state->fb;
10299
10300                 /* Tiling and DCC changes also require bandwidth updates. */
10301                 if (old_afb->tiling_flags != new_afb->tiling_flags ||
10302                     old_afb->base.modifier != new_afb->base.modifier)
10303                         return true;
10304         }
10305
10306         return false;
10307 }
10308
10309 static int dm_check_cursor_fb(struct amdgpu_crtc *new_acrtc,
10310                               struct drm_plane_state *new_plane_state,
10311                               struct drm_framebuffer *fb)
10312 {
10313         struct amdgpu_device *adev = drm_to_adev(new_acrtc->base.dev);
10314         struct amdgpu_framebuffer *afb = to_amdgpu_framebuffer(fb);
10315         unsigned int pitch;
10316         bool linear;
10317
10318         if (fb->width > new_acrtc->max_cursor_width ||
10319             fb->height > new_acrtc->max_cursor_height) {
10320                 DRM_DEBUG_ATOMIC("Bad cursor FB size %dx%d\n",
10321                                  new_plane_state->fb->width,
10322                                  new_plane_state->fb->height);
10323                 return -EINVAL;
10324         }
10325         if (new_plane_state->src_w != fb->width << 16 ||
10326             new_plane_state->src_h != fb->height << 16) {
10327                 DRM_DEBUG_ATOMIC("Cropping not supported for cursor plane\n");
10328                 return -EINVAL;
10329         }
10330
10331         /* Pitch in pixels */
10332         pitch = fb->pitches[0] / fb->format->cpp[0];
10333
10334         if (fb->width != pitch) {
10335                 DRM_DEBUG_ATOMIC("Cursor FB width %d doesn't match pitch %d",
10336                                  fb->width, pitch);
10337                 return -EINVAL;
10338         }
10339
10340         switch (pitch) {
10341         case 64:
10342         case 128:
10343         case 256:
10344                 /* FB pitch is supported by cursor plane */
10345                 break;
10346         default:
10347                 DRM_DEBUG_ATOMIC("Bad cursor FB pitch %d px\n", pitch);
10348                 return -EINVAL;
10349         }
10350
10351         /* Core DRM takes care of checking FB modifiers, so we only need to
10352          * check tiling flags when the FB doesn't have a modifier. */
10353         if (!(fb->flags & DRM_MODE_FB_MODIFIERS)) {
10354                 if (adev->family < AMDGPU_FAMILY_AI) {
10355                         linear = AMDGPU_TILING_GET(afb->tiling_flags, ARRAY_MODE) != DC_ARRAY_2D_TILED_THIN1 &&
10356                                  AMDGPU_TILING_GET(afb->tiling_flags, ARRAY_MODE) != DC_ARRAY_1D_TILED_THIN1 &&
10357                                  AMDGPU_TILING_GET(afb->tiling_flags, MICRO_TILE_MODE) == 0;
10358                 } else {
10359                         linear = AMDGPU_TILING_GET(afb->tiling_flags, SWIZZLE_MODE) == 0;
10360                 }
10361                 if (!linear) {
10362                         DRM_DEBUG_ATOMIC("Cursor FB not linear");
10363                         return -EINVAL;
10364                 }
10365         }
10366
10367         return 0;
10368 }
10369
10370 static int dm_update_plane_state(struct dc *dc,
10371                                  struct drm_atomic_state *state,
10372                                  struct drm_plane *plane,
10373                                  struct drm_plane_state *old_plane_state,
10374                                  struct drm_plane_state *new_plane_state,
10375                                  bool enable,
10376                                  bool *lock_and_validation_needed)
10377 {
10378
10379         struct dm_atomic_state *dm_state = NULL;
10380         struct drm_crtc *new_plane_crtc, *old_plane_crtc;
10381         struct drm_crtc_state *old_crtc_state, *new_crtc_state;
10382         struct dm_crtc_state *dm_new_crtc_state, *dm_old_crtc_state;
10383         struct dm_plane_state *dm_new_plane_state, *dm_old_plane_state;
10384         struct amdgpu_crtc *new_acrtc;
10385         bool needs_reset;
10386         int ret = 0;
10387
10388
10389         new_plane_crtc = new_plane_state->crtc;
10390         old_plane_crtc = old_plane_state->crtc;
10391         dm_new_plane_state = to_dm_plane_state(new_plane_state);
10392         dm_old_plane_state = to_dm_plane_state(old_plane_state);
10393
10394         if (plane->type == DRM_PLANE_TYPE_CURSOR) {
10395                 if (!enable || !new_plane_crtc ||
10396                         drm_atomic_plane_disabling(plane->state, new_plane_state))
10397                         return 0;
10398
10399                 new_acrtc = to_amdgpu_crtc(new_plane_crtc);
10400
10401                 if (new_plane_state->src_x != 0 || new_plane_state->src_y != 0) {
10402                         DRM_DEBUG_ATOMIC("Cropping not supported for cursor plane\n");
10403                         return -EINVAL;
10404                 }
10405
10406                 if (new_plane_state->fb) {
10407                         ret = dm_check_cursor_fb(new_acrtc, new_plane_state,
10408                                                  new_plane_state->fb);
10409                         if (ret)
10410                                 return ret;
10411                 }
10412
10413                 return 0;
10414         }
10415
10416         needs_reset = should_reset_plane(state, plane, old_plane_state,
10417                                          new_plane_state);
10418
10419         /* Remove any changed/removed planes */
10420         if (!enable) {
10421                 if (!needs_reset)
10422                         return 0;
10423
10424                 if (!old_plane_crtc)
10425                         return 0;
10426
10427                 old_crtc_state = drm_atomic_get_old_crtc_state(
10428                                 state, old_plane_crtc);
10429                 dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
10430
10431                 if (!dm_old_crtc_state->stream)
10432                         return 0;
10433
10434                 DRM_DEBUG_ATOMIC("Disabling DRM plane: %d on DRM crtc %d\n",
10435                                 plane->base.id, old_plane_crtc->base.id);
10436
10437                 ret = dm_atomic_get_state(state, &dm_state);
10438                 if (ret)
10439                         return ret;
10440
10441                 if (!dc_remove_plane_from_context(
10442                                 dc,
10443                                 dm_old_crtc_state->stream,
10444                                 dm_old_plane_state->dc_state,
10445                                 dm_state->context)) {
10446
10447                         return -EINVAL;
10448                 }
10449
10450
10451                 dc_plane_state_release(dm_old_plane_state->dc_state);
10452                 dm_new_plane_state->dc_state = NULL;
10453
10454                 *lock_and_validation_needed = true;
10455
10456         } else { /* Add new planes */
10457                 struct dc_plane_state *dc_new_plane_state;
10458
10459                 if (drm_atomic_plane_disabling(plane->state, new_plane_state))
10460                         return 0;
10461
10462                 if (!new_plane_crtc)
10463                         return 0;
10464
10465                 new_crtc_state = drm_atomic_get_new_crtc_state(state, new_plane_crtc);
10466                 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
10467
10468                 if (!dm_new_crtc_state->stream)
10469                         return 0;
10470
10471                 if (!needs_reset)
10472                         return 0;
10473
10474                 ret = dm_plane_helper_check_state(new_plane_state, new_crtc_state);
10475                 if (ret)
10476                         return ret;
10477
10478                 WARN_ON(dm_new_plane_state->dc_state);
10479
10480                 dc_new_plane_state = dc_create_plane_state(dc);
10481                 if (!dc_new_plane_state)
10482                         return -ENOMEM;
10483
10484                 DRM_DEBUG_ATOMIC("Enabling DRM plane: %d on DRM crtc %d\n",
10485                                  plane->base.id, new_plane_crtc->base.id);
10486
10487                 ret = fill_dc_plane_attributes(
10488                         drm_to_adev(new_plane_crtc->dev),
10489                         dc_new_plane_state,
10490                         new_plane_state,
10491                         new_crtc_state);
10492                 if (ret) {
10493                         dc_plane_state_release(dc_new_plane_state);
10494                         return ret;
10495                 }
10496
10497                 ret = dm_atomic_get_state(state, &dm_state);
10498                 if (ret) {
10499                         dc_plane_state_release(dc_new_plane_state);
10500                         return ret;
10501                 }
10502
10503                 /*
10504                  * Any atomic check errors that occur after this will
10505                  * not need a release. The plane state will be attached
10506                  * to the stream, and therefore part of the atomic
10507                  * state. It'll be released when the atomic state is
10508                  * cleaned.
10509                  */
10510                 if (!dc_add_plane_to_context(
10511                                 dc,
10512                                 dm_new_crtc_state->stream,
10513                                 dc_new_plane_state,
10514                                 dm_state->context)) {
10515
10516                         dc_plane_state_release(dc_new_plane_state);
10517                         return -EINVAL;
10518                 }
10519
10520                 dm_new_plane_state->dc_state = dc_new_plane_state;
10521
10522                 /* Tell DC to do a full surface update every time there
10523                  * is a plane change. Inefficient, but works for now.
10524                  */
10525                 dm_new_plane_state->dc_state->update_flags.bits.full_update = 1;
10526
10527                 *lock_and_validation_needed = true;
10528         }
10529
10530
10531         return ret;
10532 }
10533
10534 static int dm_check_crtc_cursor(struct drm_atomic_state *state,
10535                                 struct drm_crtc *crtc,
10536                                 struct drm_crtc_state *new_crtc_state)
10537 {
10538         struct drm_plane_state *new_cursor_state, *new_primary_state;
10539         int cursor_scale_w, cursor_scale_h, primary_scale_w, primary_scale_h;
10540
10541         /* On DCE and DCN there is no dedicated hardware cursor plane. We get a
10542          * cursor per pipe but it's going to inherit the scaling and
10543          * positioning from the underlying pipe. Check the cursor plane's
10544          * blending properties match the primary plane's. */
10545
10546         new_cursor_state = drm_atomic_get_new_plane_state(state, crtc->cursor);
10547         new_primary_state = drm_atomic_get_new_plane_state(state, crtc->primary);
10548         if (!new_cursor_state || !new_primary_state ||
10549             !new_cursor_state->fb || !new_primary_state->fb) {
10550                 return 0;
10551         }
10552
10553         cursor_scale_w = new_cursor_state->crtc_w * 1000 /
10554                          (new_cursor_state->src_w >> 16);
10555         cursor_scale_h = new_cursor_state->crtc_h * 1000 /
10556                          (new_cursor_state->src_h >> 16);
10557
10558         primary_scale_w = new_primary_state->crtc_w * 1000 /
10559                          (new_primary_state->src_w >> 16);
10560         primary_scale_h = new_primary_state->crtc_h * 1000 /
10561                          (new_primary_state->src_h >> 16);
10562
10563         if (cursor_scale_w != primary_scale_w ||
10564             cursor_scale_h != primary_scale_h) {
10565                 drm_dbg_atomic(crtc->dev, "Cursor plane scaling doesn't match primary plane\n");
10566                 return -EINVAL;
10567         }
10568
10569         return 0;
10570 }
10571
10572 #if defined(CONFIG_DRM_AMD_DC_DCN)
10573 static int add_affected_mst_dsc_crtcs(struct drm_atomic_state *state, struct drm_crtc *crtc)
10574 {
10575         struct drm_connector *connector;
10576         struct drm_connector_state *conn_state;
10577         struct amdgpu_dm_connector *aconnector = NULL;
10578         int i;
10579         for_each_new_connector_in_state(state, connector, conn_state, i) {
10580                 if (conn_state->crtc != crtc)
10581                         continue;
10582
10583                 aconnector = to_amdgpu_dm_connector(connector);
10584                 if (!aconnector->port || !aconnector->mst_port)
10585                         aconnector = NULL;
10586                 else
10587                         break;
10588         }
10589
10590         if (!aconnector)
10591                 return 0;
10592
10593         return drm_dp_mst_add_affected_dsc_crtcs(state, &aconnector->mst_port->mst_mgr);
10594 }
10595 #endif
10596
10597 static int validate_overlay(struct drm_atomic_state *state)
10598 {
10599         int i;
10600         struct drm_plane *plane;
10601         struct drm_plane_state *new_plane_state;
10602         struct drm_plane_state *primary_state, *overlay_state = NULL;
10603
10604         /* Check if primary plane is contained inside overlay */
10605         for_each_new_plane_in_state_reverse(state, plane, new_plane_state, i) {
10606                 if (plane->type == DRM_PLANE_TYPE_OVERLAY) {
10607                         if (drm_atomic_plane_disabling(plane->state, new_plane_state))
10608                                 return 0;
10609
10610                         overlay_state = new_plane_state;
10611                         continue;
10612                 }
10613         }
10614
10615         /* check if we're making changes to the overlay plane */
10616         if (!overlay_state)
10617                 return 0;
10618
10619         /* check if overlay plane is enabled */
10620         if (!overlay_state->crtc)
10621                 return 0;
10622
10623         /* find the primary plane for the CRTC that the overlay is enabled on */
10624         primary_state = drm_atomic_get_plane_state(state, overlay_state->crtc->primary);
10625         if (IS_ERR(primary_state))
10626                 return PTR_ERR(primary_state);
10627
10628         /* check if primary plane is enabled */
10629         if (!primary_state->crtc)
10630                 return 0;
10631
10632         /* Perform the bounds check to ensure the overlay plane covers the primary */
10633         if (primary_state->crtc_x < overlay_state->crtc_x ||
10634             primary_state->crtc_y < overlay_state->crtc_y ||
10635             primary_state->crtc_x + primary_state->crtc_w > overlay_state->crtc_x + overlay_state->crtc_w ||
10636             primary_state->crtc_y + primary_state->crtc_h > overlay_state->crtc_y + overlay_state->crtc_h) {
10637                 DRM_DEBUG_ATOMIC("Overlay plane is enabled with hardware cursor but does not fully cover primary plane\n");
10638                 return -EINVAL;
10639         }
10640
10641         return 0;
10642 }
10643
10644 /**
10645  * amdgpu_dm_atomic_check() - Atomic check implementation for AMDgpu DM.
10646  * @dev: The DRM device
10647  * @state: The atomic state to commit
10648  *
10649  * Validate that the given atomic state is programmable by DC into hardware.
10650  * This involves constructing a &struct dc_state reflecting the new hardware
10651  * state we wish to commit, then querying DC to see if it is programmable. It's
10652  * important not to modify the existing DC state. Otherwise, atomic_check
10653  * may unexpectedly commit hardware changes.
10654  *
10655  * When validating the DC state, it's important that the right locks are
10656  * acquired. For full updates case which removes/adds/updates streams on one
10657  * CRTC while flipping on another CRTC, acquiring global lock will guarantee
10658  * that any such full update commit will wait for completion of any outstanding
10659  * flip using DRMs synchronization events.
10660  *
10661  * Note that DM adds the affected connectors for all CRTCs in state, when that
10662  * might not seem necessary. This is because DC stream creation requires the
10663  * DC sink, which is tied to the DRM connector state. Cleaning this up should
10664  * be possible but non-trivial - a possible TODO item.
10665  *
10666  * Return: -Error code if validation failed.
10667  */
10668 static int amdgpu_dm_atomic_check(struct drm_device *dev,
10669                                   struct drm_atomic_state *state)
10670 {
10671         struct amdgpu_device *adev = drm_to_adev(dev);
10672         struct dm_atomic_state *dm_state = NULL;
10673         struct dc *dc = adev->dm.dc;
10674         struct drm_connector *connector;
10675         struct drm_connector_state *old_con_state, *new_con_state;
10676         struct drm_crtc *crtc;
10677         struct drm_crtc_state *old_crtc_state, *new_crtc_state;
10678         struct drm_plane *plane;
10679         struct drm_plane_state *old_plane_state, *new_plane_state;
10680         enum dc_status status;
10681         int ret, i;
10682         bool lock_and_validation_needed = false;
10683         struct dm_crtc_state *dm_old_crtc_state;
10684 #if defined(CONFIG_DRM_AMD_DC_DCN)
10685         struct dsc_mst_fairness_vars vars[MAX_PIPES];
10686         struct drm_dp_mst_topology_state *mst_state;
10687         struct drm_dp_mst_topology_mgr *mgr;
10688 #endif
10689
10690         trace_amdgpu_dm_atomic_check_begin(state);
10691
10692         ret = drm_atomic_helper_check_modeset(dev, state);
10693         if (ret)
10694                 goto fail;
10695
10696         /* Check connector changes */
10697         for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
10698                 struct dm_connector_state *dm_old_con_state = to_dm_connector_state(old_con_state);
10699                 struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state);
10700
10701                 /* Skip connectors that are disabled or part of modeset already. */
10702                 if (!old_con_state->crtc && !new_con_state->crtc)
10703                         continue;
10704
10705                 if (!new_con_state->crtc)
10706                         continue;
10707
10708                 new_crtc_state = drm_atomic_get_crtc_state(state, new_con_state->crtc);
10709                 if (IS_ERR(new_crtc_state)) {
10710                         ret = PTR_ERR(new_crtc_state);
10711                         goto fail;
10712                 }
10713
10714                 if (dm_old_con_state->abm_level !=
10715                     dm_new_con_state->abm_level)
10716                         new_crtc_state->connectors_changed = true;
10717         }
10718
10719 #if defined(CONFIG_DRM_AMD_DC_DCN)
10720         if (dc_resource_is_dsc_encoding_supported(dc)) {
10721                 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
10722                         if (drm_atomic_crtc_needs_modeset(new_crtc_state)) {
10723                                 ret = add_affected_mst_dsc_crtcs(state, crtc);
10724                                 if (ret)
10725                                         goto fail;
10726                         }
10727                 }
10728         }
10729 #endif
10730         for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
10731                 dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
10732
10733                 if (!drm_atomic_crtc_needs_modeset(new_crtc_state) &&
10734                     !new_crtc_state->color_mgmt_changed &&
10735                     old_crtc_state->vrr_enabled == new_crtc_state->vrr_enabled &&
10736                         dm_old_crtc_state->dsc_force_changed == false)
10737                         continue;
10738
10739                 ret = amdgpu_dm_verify_lut_sizes(new_crtc_state);
10740                 if (ret)
10741                         goto fail;
10742
10743                 if (!new_crtc_state->enable)
10744                         continue;
10745
10746                 ret = drm_atomic_add_affected_connectors(state, crtc);
10747                 if (ret)
10748                         return ret;
10749
10750                 ret = drm_atomic_add_affected_planes(state, crtc);
10751                 if (ret)
10752                         goto fail;
10753
10754                 if (dm_old_crtc_state->dsc_force_changed)
10755                         new_crtc_state->mode_changed = true;
10756         }
10757
10758         /*
10759          * Add all primary and overlay planes on the CRTC to the state
10760          * whenever a plane is enabled to maintain correct z-ordering
10761          * and to enable fast surface updates.
10762          */
10763         drm_for_each_crtc(crtc, dev) {
10764                 bool modified = false;
10765
10766                 for_each_oldnew_plane_in_state(state, plane, old_plane_state, new_plane_state, i) {
10767                         if (plane->type == DRM_PLANE_TYPE_CURSOR)
10768                                 continue;
10769
10770                         if (new_plane_state->crtc == crtc ||
10771                             old_plane_state->crtc == crtc) {
10772                                 modified = true;
10773                                 break;
10774                         }
10775                 }
10776
10777                 if (!modified)
10778                         continue;
10779
10780                 drm_for_each_plane_mask(plane, state->dev, crtc->state->plane_mask) {
10781                         if (plane->type == DRM_PLANE_TYPE_CURSOR)
10782                                 continue;
10783
10784                         new_plane_state =
10785                                 drm_atomic_get_plane_state(state, plane);
10786
10787                         if (IS_ERR(new_plane_state)) {
10788                                 ret = PTR_ERR(new_plane_state);
10789                                 goto fail;
10790                         }
10791                 }
10792         }
10793
10794         /* Remove exiting planes if they are modified */
10795         for_each_oldnew_plane_in_state_reverse(state, plane, old_plane_state, new_plane_state, i) {
10796                 ret = dm_update_plane_state(dc, state, plane,
10797                                             old_plane_state,
10798                                             new_plane_state,
10799                                             false,
10800                                             &lock_and_validation_needed);
10801                 if (ret)
10802                         goto fail;
10803         }
10804
10805         /* Disable all crtcs which require disable */
10806         for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
10807                 ret = dm_update_crtc_state(&adev->dm, state, crtc,
10808                                            old_crtc_state,
10809                                            new_crtc_state,
10810                                            false,
10811                                            &lock_and_validation_needed);
10812                 if (ret)
10813                         goto fail;
10814         }
10815
10816         /* Enable all crtcs which require enable */
10817         for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
10818                 ret = dm_update_crtc_state(&adev->dm, state, crtc,
10819                                            old_crtc_state,
10820                                            new_crtc_state,
10821                                            true,
10822                                            &lock_and_validation_needed);
10823                 if (ret)
10824                         goto fail;
10825         }
10826
10827         ret = validate_overlay(state);
10828         if (ret)
10829                 goto fail;
10830
10831         /* Add new/modified planes */
10832         for_each_oldnew_plane_in_state_reverse(state, plane, old_plane_state, new_plane_state, i) {
10833                 ret = dm_update_plane_state(dc, state, plane,
10834                                             old_plane_state,
10835                                             new_plane_state,
10836                                             true,
10837                                             &lock_and_validation_needed);
10838                 if (ret)
10839                         goto fail;
10840         }
10841
10842         /* Run this here since we want to validate the streams we created */
10843         ret = drm_atomic_helper_check_planes(dev, state);
10844         if (ret)
10845                 goto fail;
10846
10847         /* Check cursor planes scaling */
10848         for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) {
10849                 ret = dm_check_crtc_cursor(state, crtc, new_crtc_state);
10850                 if (ret)
10851                         goto fail;
10852         }
10853
10854         if (state->legacy_cursor_update) {
10855                 /*
10856                  * This is a fast cursor update coming from the plane update
10857                  * helper, check if it can be done asynchronously for better
10858                  * performance.
10859                  */
10860                 state->async_update =
10861                         !drm_atomic_helper_async_check(dev, state);
10862
10863                 /*
10864                  * Skip the remaining global validation if this is an async
10865                  * update. Cursor updates can be done without affecting
10866                  * state or bandwidth calcs and this avoids the performance
10867                  * penalty of locking the private state object and
10868                  * allocating a new dc_state.
10869                  */
10870                 if (state->async_update)
10871                         return 0;
10872         }
10873
10874         /* Check scaling and underscan changes*/
10875         /* TODO Removed scaling changes validation due to inability to commit
10876          * new stream into context w\o causing full reset. Need to
10877          * decide how to handle.
10878          */
10879         for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
10880                 struct dm_connector_state *dm_old_con_state = to_dm_connector_state(old_con_state);
10881                 struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state);
10882                 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(dm_new_con_state->base.crtc);
10883
10884                 /* Skip any modesets/resets */
10885                 if (!acrtc || drm_atomic_crtc_needs_modeset(
10886                                 drm_atomic_get_new_crtc_state(state, &acrtc->base)))
10887                         continue;
10888
10889                 /* Skip any thing not scale or underscan changes */
10890                 if (!is_scaling_state_different(dm_new_con_state, dm_old_con_state))
10891                         continue;
10892
10893                 lock_and_validation_needed = true;
10894         }
10895
10896 #if defined(CONFIG_DRM_AMD_DC_DCN)
10897         /* set the slot info for each mst_state based on the link encoding format */
10898         for_each_new_mst_mgr_in_state(state, mgr, mst_state, i) {
10899                 struct amdgpu_dm_connector *aconnector;
10900                 struct drm_connector *connector;
10901                 struct drm_connector_list_iter iter;
10902                 u8 link_coding_cap;
10903
10904                 if (!mgr->mst_state )
10905                         continue;
10906
10907                 drm_connector_list_iter_begin(dev, &iter);
10908                 drm_for_each_connector_iter(connector, &iter) {
10909                         int id = connector->index;
10910
10911                         if (id == mst_state->mgr->conn_base_id) {
10912                                 aconnector = to_amdgpu_dm_connector(connector);
10913                                 link_coding_cap = dc_link_dp_mst_decide_link_encoding_format(aconnector->dc_link);
10914                                 drm_dp_mst_update_slots(mst_state, link_coding_cap);
10915
10916                                 break;
10917                         }
10918                 }
10919                 drm_connector_list_iter_end(&iter);
10920
10921         }
10922 #endif
10923         /**
10924          * Streams and planes are reset when there are changes that affect
10925          * bandwidth. Anything that affects bandwidth needs to go through
10926          * DC global validation to ensure that the configuration can be applied
10927          * to hardware.
10928          *
10929          * We have to currently stall out here in atomic_check for outstanding
10930          * commits to finish in this case because our IRQ handlers reference
10931          * DRM state directly - we can end up disabling interrupts too early
10932          * if we don't.
10933          *
10934          * TODO: Remove this stall and drop DM state private objects.
10935          */
10936         if (lock_and_validation_needed) {
10937                 ret = dm_atomic_get_state(state, &dm_state);
10938                 if (ret)
10939                         goto fail;
10940
10941                 ret = do_aquire_global_lock(dev, state);
10942                 if (ret)
10943                         goto fail;
10944
10945 #if defined(CONFIG_DRM_AMD_DC_DCN)
10946                 if (!compute_mst_dsc_configs_for_state(state, dm_state->context, vars))
10947                         goto fail;
10948
10949                 ret = dm_update_mst_vcpi_slots_for_dsc(state, dm_state->context, vars);
10950                 if (ret)
10951                         goto fail;
10952 #endif
10953
10954                 /*
10955                  * Perform validation of MST topology in the state:
10956                  * We need to perform MST atomic check before calling
10957                  * dc_validate_global_state(), or there is a chance
10958                  * to get stuck in an infinite loop and hang eventually.
10959                  */
10960                 ret = drm_dp_mst_atomic_check(state);
10961                 if (ret)
10962                         goto fail;
10963                 status = dc_validate_global_state(dc, dm_state->context, false);
10964                 if (status != DC_OK) {
10965                         drm_dbg_atomic(dev,
10966                                        "DC global validation failure: %s (%d)",
10967                                        dc_status_to_str(status), status);
10968                         ret = -EINVAL;
10969                         goto fail;
10970                 }
10971         } else {
10972                 /*
10973                  * The commit is a fast update. Fast updates shouldn't change
10974                  * the DC context, affect global validation, and can have their
10975                  * commit work done in parallel with other commits not touching
10976                  * the same resource. If we have a new DC context as part of
10977                  * the DM atomic state from validation we need to free it and
10978                  * retain the existing one instead.
10979                  *
10980                  * Furthermore, since the DM atomic state only contains the DC
10981                  * context and can safely be annulled, we can free the state
10982                  * and clear the associated private object now to free
10983                  * some memory and avoid a possible use-after-free later.
10984                  */
10985
10986                 for (i = 0; i < state->num_private_objs; i++) {
10987                         struct drm_private_obj *obj = state->private_objs[i].ptr;
10988
10989                         if (obj->funcs == adev->dm.atomic_obj.funcs) {
10990                                 int j = state->num_private_objs-1;
10991
10992                                 dm_atomic_destroy_state(obj,
10993                                                 state->private_objs[i].state);
10994
10995                                 /* If i is not at the end of the array then the
10996                                  * last element needs to be moved to where i was
10997                                  * before the array can safely be truncated.
10998                                  */
10999                                 if (i != j)
11000                                         state->private_objs[i] =
11001                                                 state->private_objs[j];
11002
11003                                 state->private_objs[j].ptr = NULL;
11004                                 state->private_objs[j].state = NULL;
11005                                 state->private_objs[j].old_state = NULL;
11006                                 state->private_objs[j].new_state = NULL;
11007
11008                                 state->num_private_objs = j;
11009                                 break;
11010                         }
11011                 }
11012         }
11013
11014         /* Store the overall update type for use later in atomic check. */
11015         for_each_new_crtc_in_state (state, crtc, new_crtc_state, i) {
11016                 struct dm_crtc_state *dm_new_crtc_state =
11017                         to_dm_crtc_state(new_crtc_state);
11018
11019                 dm_new_crtc_state->update_type = lock_and_validation_needed ?
11020                                                          UPDATE_TYPE_FULL :
11021                                                          UPDATE_TYPE_FAST;
11022         }
11023
11024         /* Must be success */
11025         WARN_ON(ret);
11026
11027         trace_amdgpu_dm_atomic_check_finish(state, ret);
11028
11029         return ret;
11030
11031 fail:
11032         if (ret == -EDEADLK)
11033                 DRM_DEBUG_DRIVER("Atomic check stopped to avoid deadlock.\n");
11034         else if (ret == -EINTR || ret == -EAGAIN || ret == -ERESTARTSYS)
11035                 DRM_DEBUG_DRIVER("Atomic check stopped due to signal.\n");
11036         else
11037                 DRM_DEBUG_DRIVER("Atomic check failed with err: %d \n", ret);
11038
11039         trace_amdgpu_dm_atomic_check_finish(state, ret);
11040
11041         return ret;
11042 }
11043
11044 static bool is_dp_capable_without_timing_msa(struct dc *dc,
11045                                              struct amdgpu_dm_connector *amdgpu_dm_connector)
11046 {
11047         uint8_t dpcd_data;
11048         bool capable = false;
11049
11050         if (amdgpu_dm_connector->dc_link &&
11051                 dm_helpers_dp_read_dpcd(
11052                                 NULL,
11053                                 amdgpu_dm_connector->dc_link,
11054                                 DP_DOWN_STREAM_PORT_COUNT,
11055                                 &dpcd_data,
11056                                 sizeof(dpcd_data))) {
11057                 capable = (dpcd_data & DP_MSA_TIMING_PAR_IGNORED) ? true:false;
11058         }
11059
11060         return capable;
11061 }
11062
11063 static bool dm_edid_parser_send_cea(struct amdgpu_display_manager *dm,
11064                 unsigned int offset,
11065                 unsigned int total_length,
11066                 uint8_t *data,
11067                 unsigned int length,
11068                 struct amdgpu_hdmi_vsdb_info *vsdb)
11069 {
11070         bool res;
11071         union dmub_rb_cmd cmd;
11072         struct dmub_cmd_send_edid_cea *input;
11073         struct dmub_cmd_edid_cea_output *output;
11074
11075         if (length > DMUB_EDID_CEA_DATA_CHUNK_BYTES)
11076                 return false;
11077
11078         memset(&cmd, 0, sizeof(cmd));
11079
11080         input = &cmd.edid_cea.data.input;
11081
11082         cmd.edid_cea.header.type = DMUB_CMD__EDID_CEA;
11083         cmd.edid_cea.header.sub_type = 0;
11084         cmd.edid_cea.header.payload_bytes =
11085                 sizeof(cmd.edid_cea) - sizeof(cmd.edid_cea.header);
11086         input->offset = offset;
11087         input->length = length;
11088         input->total_length = total_length;
11089         memcpy(input->payload, data, length);
11090
11091         res = dc_dmub_srv_cmd_with_reply_data(dm->dc->ctx->dmub_srv, &cmd);
11092         if (!res) {
11093                 DRM_ERROR("EDID CEA parser failed\n");
11094                 return false;
11095         }
11096
11097         output = &cmd.edid_cea.data.output;
11098
11099         if (output->type == DMUB_CMD__EDID_CEA_ACK) {
11100                 if (!output->ack.success) {
11101                         DRM_ERROR("EDID CEA ack failed at offset %d\n",
11102                                         output->ack.offset);
11103                 }
11104         } else if (output->type == DMUB_CMD__EDID_CEA_AMD_VSDB) {
11105                 if (!output->amd_vsdb.vsdb_found)
11106                         return false;
11107
11108                 vsdb->freesync_supported = output->amd_vsdb.freesync_supported;
11109                 vsdb->amd_vsdb_version = output->amd_vsdb.amd_vsdb_version;
11110                 vsdb->min_refresh_rate_hz = output->amd_vsdb.min_frame_rate;
11111                 vsdb->max_refresh_rate_hz = output->amd_vsdb.max_frame_rate;
11112         } else {
11113                 DRM_WARN("Unknown EDID CEA parser results\n");
11114                 return false;
11115         }
11116
11117         return true;
11118 }
11119
11120 static bool parse_edid_cea_dmcu(struct amdgpu_display_manager *dm,
11121                 uint8_t *edid_ext, int len,
11122                 struct amdgpu_hdmi_vsdb_info *vsdb_info)
11123 {
11124         int i;
11125
11126         /* send extension block to DMCU for parsing */
11127         for (i = 0; i < len; i += 8) {
11128                 bool res;
11129                 int offset;
11130
11131                 /* send 8 bytes a time */
11132                 if (!dc_edid_parser_send_cea(dm->dc, i, len, &edid_ext[i], 8))
11133                         return false;
11134
11135                 if (i+8 == len) {
11136                         /* EDID block sent completed, expect result */
11137                         int version, min_rate, max_rate;
11138
11139                         res = dc_edid_parser_recv_amd_vsdb(dm->dc, &version, &min_rate, &max_rate);
11140                         if (res) {
11141                                 /* amd vsdb found */
11142                                 vsdb_info->freesync_supported = 1;
11143                                 vsdb_info->amd_vsdb_version = version;
11144                                 vsdb_info->min_refresh_rate_hz = min_rate;
11145                                 vsdb_info->max_refresh_rate_hz = max_rate;
11146                                 return true;
11147                         }
11148                         /* not amd vsdb */
11149                         return false;
11150                 }
11151
11152                 /* check for ack*/
11153                 res = dc_edid_parser_recv_cea_ack(dm->dc, &offset);
11154                 if (!res)
11155                         return false;
11156         }
11157
11158         return false;
11159 }
11160
11161 static bool parse_edid_cea_dmub(struct amdgpu_display_manager *dm,
11162                 uint8_t *edid_ext, int len,
11163                 struct amdgpu_hdmi_vsdb_info *vsdb_info)
11164 {
11165         int i;
11166
11167         /* send extension block to DMCU for parsing */
11168         for (i = 0; i < len; i += 8) {
11169                 /* send 8 bytes a time */
11170                 if (!dm_edid_parser_send_cea(dm, i, len, &edid_ext[i], 8, vsdb_info))
11171                         return false;
11172         }
11173
11174         return vsdb_info->freesync_supported;
11175 }
11176
11177 static bool parse_edid_cea(struct amdgpu_dm_connector *aconnector,
11178                 uint8_t *edid_ext, int len,
11179                 struct amdgpu_hdmi_vsdb_info *vsdb_info)
11180 {
11181         struct amdgpu_device *adev = drm_to_adev(aconnector->base.dev);
11182
11183         if (adev->dm.dmub_srv)
11184                 return parse_edid_cea_dmub(&adev->dm, edid_ext, len, vsdb_info);
11185         else
11186                 return parse_edid_cea_dmcu(&adev->dm, edid_ext, len, vsdb_info);
11187 }
11188
11189 static int parse_hdmi_amd_vsdb(struct amdgpu_dm_connector *aconnector,
11190                 struct edid *edid, struct amdgpu_hdmi_vsdb_info *vsdb_info)
11191 {
11192         uint8_t *edid_ext = NULL;
11193         int i;
11194         bool valid_vsdb_found = false;
11195
11196         /*----- drm_find_cea_extension() -----*/
11197         /* No EDID or EDID extensions */
11198         if (edid == NULL || edid->extensions == 0)
11199                 return -ENODEV;
11200
11201         /* Find CEA extension */
11202         for (i = 0; i < edid->extensions; i++) {
11203                 edid_ext = (uint8_t *)edid + EDID_LENGTH * (i + 1);
11204                 if (edid_ext[0] == CEA_EXT)
11205                         break;
11206         }
11207
11208         if (i == edid->extensions)
11209                 return -ENODEV;
11210
11211         /*----- cea_db_offsets() -----*/
11212         if (edid_ext[0] != CEA_EXT)
11213                 return -ENODEV;
11214
11215         valid_vsdb_found = parse_edid_cea(aconnector, edid_ext, EDID_LENGTH, vsdb_info);
11216
11217         return valid_vsdb_found ? i : -ENODEV;
11218 }
11219
11220 void amdgpu_dm_update_freesync_caps(struct drm_connector *connector,
11221                                         struct edid *edid)
11222 {
11223         int i = 0;
11224         struct detailed_timing *timing;
11225         struct detailed_non_pixel *data;
11226         struct detailed_data_monitor_range *range;
11227         struct amdgpu_dm_connector *amdgpu_dm_connector =
11228                         to_amdgpu_dm_connector(connector);
11229         struct dm_connector_state *dm_con_state = NULL;
11230         struct dc_sink *sink;
11231
11232         struct drm_device *dev = connector->dev;
11233         struct amdgpu_device *adev = drm_to_adev(dev);
11234         bool freesync_capable = false;
11235         struct amdgpu_hdmi_vsdb_info vsdb_info = {0};
11236
11237         if (!connector->state) {
11238                 DRM_ERROR("%s - Connector has no state", __func__);
11239                 goto update;
11240         }
11241
11242         sink = amdgpu_dm_connector->dc_sink ?
11243                 amdgpu_dm_connector->dc_sink :
11244                 amdgpu_dm_connector->dc_em_sink;
11245
11246         if (!edid || !sink) {
11247                 dm_con_state = to_dm_connector_state(connector->state);
11248
11249                 amdgpu_dm_connector->min_vfreq = 0;
11250                 amdgpu_dm_connector->max_vfreq = 0;
11251                 amdgpu_dm_connector->pixel_clock_mhz = 0;
11252                 connector->display_info.monitor_range.min_vfreq = 0;
11253                 connector->display_info.monitor_range.max_vfreq = 0;
11254                 freesync_capable = false;
11255
11256                 goto update;
11257         }
11258
11259         dm_con_state = to_dm_connector_state(connector->state);
11260
11261         if (!adev->dm.freesync_module)
11262                 goto update;
11263
11264
11265         if (sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT
11266                 || sink->sink_signal == SIGNAL_TYPE_EDP) {
11267                 bool edid_check_required = false;
11268
11269                 if (edid) {
11270                         edid_check_required = is_dp_capable_without_timing_msa(
11271                                                 adev->dm.dc,
11272                                                 amdgpu_dm_connector);
11273                 }
11274
11275                 if (edid_check_required == true && (edid->version > 1 ||
11276                    (edid->version == 1 && edid->revision > 1))) {
11277                         for (i = 0; i < 4; i++) {
11278
11279                                 timing  = &edid->detailed_timings[i];
11280                                 data    = &timing->data.other_data;
11281                                 range   = &data->data.range;
11282                                 /*
11283                                  * Check if monitor has continuous frequency mode
11284                                  */
11285                                 if (data->type != EDID_DETAIL_MONITOR_RANGE)
11286                                         continue;
11287                                 /*
11288                                  * Check for flag range limits only. If flag == 1 then
11289                                  * no additional timing information provided.
11290                                  * Default GTF, GTF Secondary curve and CVT are not
11291                                  * supported
11292                                  */
11293                                 if (range->flags != 1)
11294                                         continue;
11295
11296                                 amdgpu_dm_connector->min_vfreq = range->min_vfreq;
11297                                 amdgpu_dm_connector->max_vfreq = range->max_vfreq;
11298                                 amdgpu_dm_connector->pixel_clock_mhz =
11299                                         range->pixel_clock_mhz * 10;
11300
11301                                 connector->display_info.monitor_range.min_vfreq = range->min_vfreq;
11302                                 connector->display_info.monitor_range.max_vfreq = range->max_vfreq;
11303
11304                                 break;
11305                         }
11306
11307                         if (amdgpu_dm_connector->max_vfreq -
11308                             amdgpu_dm_connector->min_vfreq > 10) {
11309
11310                                 freesync_capable = true;
11311                         }
11312                 }
11313         } else if (edid && sink->sink_signal == SIGNAL_TYPE_HDMI_TYPE_A) {
11314                 i = parse_hdmi_amd_vsdb(amdgpu_dm_connector, edid, &vsdb_info);
11315                 if (i >= 0 && vsdb_info.freesync_supported) {
11316                         timing  = &edid->detailed_timings[i];
11317                         data    = &timing->data.other_data;
11318
11319                         amdgpu_dm_connector->min_vfreq = vsdb_info.min_refresh_rate_hz;
11320                         amdgpu_dm_connector->max_vfreq = vsdb_info.max_refresh_rate_hz;
11321                         if (amdgpu_dm_connector->max_vfreq - amdgpu_dm_connector->min_vfreq > 10)
11322                                 freesync_capable = true;
11323
11324                         connector->display_info.monitor_range.min_vfreq = vsdb_info.min_refresh_rate_hz;
11325                         connector->display_info.monitor_range.max_vfreq = vsdb_info.max_refresh_rate_hz;
11326                 }
11327         }
11328
11329 update:
11330         if (dm_con_state)
11331                 dm_con_state->freesync_capable = freesync_capable;
11332
11333         if (connector->vrr_capable_property)
11334                 drm_connector_set_vrr_capable_property(connector,
11335                                                        freesync_capable);
11336 }
11337
11338 void amdgpu_dm_trigger_timing_sync(struct drm_device *dev)
11339 {
11340         struct amdgpu_device *adev = drm_to_adev(dev);
11341         struct dc *dc = adev->dm.dc;
11342         int i;
11343
11344         mutex_lock(&adev->dm.dc_lock);
11345         if (dc->current_state) {
11346                 for (i = 0; i < dc->current_state->stream_count; ++i)
11347                         dc->current_state->streams[i]
11348                                 ->triggered_crtc_reset.enabled =
11349                                 adev->dm.force_timing_sync;
11350
11351                 dm_enable_per_frame_crtc_master_sync(dc->current_state);
11352                 dc_trigger_sync(dc, dc->current_state);
11353         }
11354         mutex_unlock(&adev->dm.dc_lock);
11355 }
11356
11357 void dm_write_reg_func(const struct dc_context *ctx, uint32_t address,
11358                        uint32_t value, const char *func_name)
11359 {
11360 #ifdef DM_CHECK_ADDR_0
11361         if (address == 0) {
11362                 DC_ERR("invalid register write. address = 0");
11363                 return;
11364         }
11365 #endif
11366         cgs_write_register(ctx->cgs_device, address, value);
11367         trace_amdgpu_dc_wreg(&ctx->perf_trace->write_count, address, value);
11368 }
11369
11370 uint32_t dm_read_reg_func(const struct dc_context *ctx, uint32_t address,
11371                           const char *func_name)
11372 {
11373         uint32_t value;
11374 #ifdef DM_CHECK_ADDR_0
11375         if (address == 0) {
11376                 DC_ERR("invalid register read; address = 0\n");
11377                 return 0;
11378         }
11379 #endif
11380
11381         if (ctx->dmub_srv &&
11382             ctx->dmub_srv->reg_helper_offload.gather_in_progress &&
11383             !ctx->dmub_srv->reg_helper_offload.should_burst_write) {
11384                 ASSERT(false);
11385                 return 0;
11386         }
11387
11388         value = cgs_read_register(ctx->cgs_device, address);
11389
11390         trace_amdgpu_dc_rreg(&ctx->perf_trace->read_count, address, value);
11391
11392         return value;
11393 }
11394
11395 int amdgpu_dm_set_dmub_async_sync_status(bool is_cmd_aux, struct dc_context *ctx,
11396         uint8_t status_type, uint32_t *operation_result)
11397 {
11398         struct amdgpu_device *adev = ctx->driver_context;
11399         int return_status = -1;
11400         struct dmub_notification *p_notify = adev->dm.dmub_notify;
11401
11402         if (is_cmd_aux) {
11403                 if (status_type == DMUB_ASYNC_TO_SYNC_ACCESS_SUCCESS) {
11404                         return_status = p_notify->aux_reply.length;
11405                         *operation_result = p_notify->result;
11406                 } else if (status_type == DMUB_ASYNC_TO_SYNC_ACCESS_TIMEOUT) {
11407                         *operation_result = AUX_RET_ERROR_TIMEOUT;
11408                 } else if (status_type == DMUB_ASYNC_TO_SYNC_ACCESS_FAIL) {
11409                         *operation_result = AUX_RET_ERROR_ENGINE_ACQUIRE;
11410                 } else {
11411                         *operation_result = AUX_RET_ERROR_UNKNOWN;
11412                 }
11413         } else {
11414                 if (status_type == DMUB_ASYNC_TO_SYNC_ACCESS_SUCCESS) {
11415                         return_status = 0;
11416                         *operation_result = p_notify->sc_status;
11417                 } else {
11418                         *operation_result = SET_CONFIG_UNKNOWN_ERROR;
11419                 }
11420         }
11421
11422         return return_status;
11423 }
11424
11425 int amdgpu_dm_process_dmub_aux_transfer_sync(bool is_cmd_aux, struct dc_context *ctx,
11426         unsigned int link_index, void *cmd_payload, void *operation_result)
11427 {
11428         struct amdgpu_device *adev = ctx->driver_context;
11429         int ret = 0;
11430
11431         if (is_cmd_aux) {
11432                 dc_process_dmub_aux_transfer_async(ctx->dc,
11433                         link_index, (struct aux_payload *)cmd_payload);
11434         } else if (dc_process_dmub_set_config_async(ctx->dc, link_index,
11435                                         (struct set_config_cmd_payload *)cmd_payload,
11436                                         adev->dm.dmub_notify)) {
11437                 return amdgpu_dm_set_dmub_async_sync_status(is_cmd_aux,
11438                                         ctx, DMUB_ASYNC_TO_SYNC_ACCESS_SUCCESS,
11439                                         (uint32_t *)operation_result);
11440         }
11441
11442         ret = wait_for_completion_timeout(&adev->dm.dmub_aux_transfer_done, 10 * HZ);
11443         if (ret == 0) {
11444                 DRM_ERROR("wait_for_completion_timeout timeout!");
11445                 return amdgpu_dm_set_dmub_async_sync_status(is_cmd_aux,
11446                                 ctx, DMUB_ASYNC_TO_SYNC_ACCESS_TIMEOUT,
11447                                 (uint32_t *)operation_result);
11448         }
11449
11450         if (is_cmd_aux) {
11451                 if (adev->dm.dmub_notify->result == AUX_RET_SUCCESS) {
11452                         struct aux_payload *payload = (struct aux_payload *)cmd_payload;
11453
11454                         payload->reply[0] = adev->dm.dmub_notify->aux_reply.command;
11455                         if (!payload->write && adev->dm.dmub_notify->aux_reply.length &&
11456                             payload->reply[0] == AUX_TRANSACTION_REPLY_AUX_ACK) {
11457                                 memcpy(payload->data, adev->dm.dmub_notify->aux_reply.data,
11458                                        adev->dm.dmub_notify->aux_reply.length);
11459                         }
11460                 }
11461         }
11462
11463         return amdgpu_dm_set_dmub_async_sync_status(is_cmd_aux,
11464                         ctx, DMUB_ASYNC_TO_SYNC_ACCESS_SUCCESS,
11465                         (uint32_t *)operation_result);
11466 }
This page took 0.743536 seconds and 4 git commands to generate.